Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Thu, 10 Mar 2011 22:26:00 +0000 (14:26 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 10 Mar 2011 22:26:00 +0000 (14:26 -0800)
Conflicts:
drivers/net/bnx2x/bnx2x_cmn.c

1055 files changed:
Documentation/feature-removal-schedule.txt
Documentation/networking/batman-adv.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/phonet.txt
MAINTAINERS
drivers/block/drbd/drbd_nl.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ldisc.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/md/dm-log-userspace-transfer.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/atl1c/atl1c_hw.c
drivers/net/atl1c/atl1c_hw.h
drivers/net/atl1c/atl1c_main.c
drivers/net/atl1e/atl1e_ethtool.c
drivers/net/atl1e/atl1e_hw.c
drivers/net/atl1e/atl1e_hw.h
drivers/net/atl1e/atl1e_main.c
drivers/net/atlx/atl1.c
drivers/net/atlx/atl2.c
drivers/net/ax88796.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bna/bnad.c
drivers/net/bna/bnad.h
drivers/net/bnx2.c
drivers/net/bnx2.h
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_dcb.c
drivers/net/bnx2x/bnx2x_dcb.h
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_hsi.h
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_link.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_reg.h
drivers/net/bonding/Makefile
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c [new file with mode: 0644]
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/c_can/Kconfig [new file with mode: 0644]
drivers/net/can/c_can/Makefile [new file with mode: 0644]
drivers/net/can/c_can/c_can.c [new file with mode: 0644]
drivers/net/can/c_can/c_can.h [new file with mode: 0644]
drivers/net/can/c_can/c_can_platform.c [new file with mode: 0644]
drivers/net/cnic.c
drivers/net/cnic.h
drivers/net/cnic_if.h
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb4/cxgb4_main.c
drivers/net/dm9000.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/lib.c
drivers/net/e1000e/netdev.c
drivers/net/e1000e/phy.c
drivers/net/enic/Makefile
drivers/net/enic/enic.h
drivers/net/enic/enic_dev.c [new file with mode: 0644]
drivers/net/enic/enic_dev.h [new file with mode: 0644]
drivers/net/enic/enic_main.c
drivers/net/enic/vnic_dev.c
drivers/net/enic/vnic_dev.h
drivers/net/enic/vnic_rq.h
drivers/net/eql.c
drivers/net/fec.c
drivers/net/ftmac100.c [new file with mode: 0644]
drivers/net/ftmac100.h [new file with mode: 0644]
drivers/net/hamradio/bpqether.c
drivers/net/igb/e1000_82575.c
drivers/net/igb/e1000_defines.h
drivers/net/igb/e1000_hw.h
drivers/net/igb/e1000_mbx.c
drivers/net/igb/e1000_regs.h
drivers/net/igb/igb.h
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c
drivers/net/igbvf/ethtool.c
drivers/net/igbvf/igbvf.h
drivers/net/igbvf/netdev.c
drivers/net/ipg.c
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe_82598.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_common.c
drivers/net/ixgbe/ixgbe_common.h
drivers/net/ixgbe/ixgbe_dcb.c
drivers/net/ixgbe/ixgbe_dcb.h
drivers/net/ixgbe/ixgbe_dcb_82598.c
drivers/net/ixgbe/ixgbe_dcb_82598.h
drivers/net/ixgbe/ixgbe_dcb_82599.c
drivers/net/ixgbe/ixgbe_dcb_82599.h
drivers/net/ixgbe/ixgbe_dcb_nl.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_fcoe.h
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_mbx.c
drivers/net/ixgbe/ixgbe_mbx.h
drivers/net/ixgbe/ixgbe_phy.c
drivers/net/ixgbe/ixgbe_phy.h
drivers/net/ixgbe/ixgbe_sriov.c
drivers/net/ixgbe/ixgbe_sriov.h
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_x540.c
drivers/net/ixgbevf/defines.h
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/jme.c
drivers/net/jme.h
drivers/net/loopback.c
drivers/net/macvtap.c
drivers/net/mii.c
drivers/net/mv643xx_eth.c
drivers/net/myri10ge/myri10ge.c
drivers/net/phy/Kconfig
drivers/net/phy/micrel.c
drivers/net/ppp_generic.c
drivers/net/pptp.c
drivers/net/qla3xxx.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_main.c
drivers/net/r8169.c
drivers/net/s2io.c
drivers/net/sfc/efx.c
drivers/net/sfc/efx.h
drivers/net/sfc/ethtool.c
drivers/net/sfc/falcon.c
drivers/net/sfc/falcon_boards.c
drivers/net/sfc/falcon_xmac.c
drivers/net/sfc/filter.c
drivers/net/sfc/io.h
drivers/net/sfc/mcdi.c
drivers/net/sfc/mcdi.h
drivers/net/sfc/mcdi_mac.c
drivers/net/sfc/mcdi_pcol.h
drivers/net/sfc/mcdi_phy.c
drivers/net/sfc/mdio_10g.c
drivers/net/sfc/mdio_10g.h
drivers/net/sfc/mtd.c
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/phy.h
drivers/net/sfc/qt202x_phy.c
drivers/net/sfc/regs.h
drivers/net/sfc/rx.c
drivers/net/sfc/selftest.c
drivers/net/sfc/selftest.h
drivers/net/sfc/siena.c
drivers/net/sfc/spi.h
drivers/net/sfc/tenxpress.c
drivers/net/sfc/tx.c
drivers/net/sfc/txc43128_phy.c
drivers/net/sfc/workarounds.h
drivers/net/sh_eth.c
drivers/net/sis900.c
drivers/net/smc91x.c
drivers/net/sungem.c
drivers/net/sungem.h
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/tlan.c
drivers/net/tlan.h
drivers/net/tun.c
drivers/net/typhoon.c
drivers/net/veth.c
drivers/net/via-velocity.c
drivers/net/via-velocity.h
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-traffic.c
drivers/net/vxge/vxge-traffic.h
drivers/net/vxge/vxge-version.h
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/adm8211.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ar9170/Kconfig
drivers/net/wireless/ath/ar9170/ar9170.h
drivers/net/wireless/ath/ar9170/main.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/attach.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/base.h
drivers/net/wireless/ath/ath5k/caps.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath5k/debug.h
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/eeprom.h
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath5k/reg.h
drivers/net/wireless/ath/ath5k/trace.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9485_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/virtual.c [deleted file]
drivers/net/wireless/ath/ath9k/wmi.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/ath/carl9170/fwcmd.h
drivers/net/wireless/ath/carl9170/fwdesc.h
drivers/net/wireless/ath/carl9170/hw.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/ath/carl9170/version.h
drivers/net/wireless/ath/carl9170/wlan.h
drivers/net/wireless/ath/key.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/regd.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43/xmit.h
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlegacy/Kconfig [new file with mode: 0644]
drivers/net/wireless/iwlegacy/Makefile [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-fh.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-hw.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-led.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-led.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-rs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-calib.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-calib.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-hw.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-led.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-led.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-lib.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-rs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-rx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-sta.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-tx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965-ucode.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-4965.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-commands.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-core.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-core.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-csr.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-debug.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-debugfs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-dev.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-devtrace.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-devtrace.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-eeprom.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-eeprom.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-fh.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-hcmd.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-helpers.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-io.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-led.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-led.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-legacy-rs.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-power.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-power.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-prph.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-rx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-scan.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-spectrum.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-sta.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-sta.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-tx.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl3945-base.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl4965-base.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945-fh.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945-hw.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945-led.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945-led.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945-rs.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-3945.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-4965-hw.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-4965.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.c
drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
drivers/net/wireless/iwlwifi/iwl-agn-led.c
drivers/net/wireless/iwlwifi/iwl-agn-led.h
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.h
drivers/net/wireless/iwlwifi/iwl-agn-rx.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-commands.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/iwlwifi/iwl-hcmd.c
drivers/net/wireless/iwlwifi/iwl-led.c
drivers/net/wireless/iwlwifi/iwl-led.h
drivers/net/wireless/iwlwifi/iwl-legacy.c [deleted file]
drivers/net/wireless/iwlwifi/iwl-legacy.h [deleted file]
drivers/net/wireless/iwlwifi/iwl-power.c
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-sta.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwlwifi/iwl3945-base.c [deleted file]
drivers/net/wireless/iwmc3200wifi/cfg80211.c
drivers/net/wireless/iwmc3200wifi/rx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/dev.h
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/scan.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/eeprom.h
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/lmac.h
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/p54.h
drivers/net/wireless/p54/p54spi_eeprom.h
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800lib.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00ht.c
drivers/net/wireless/rt2x00/rt2x00lib.h
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00reg.h
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/Makefile
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/debug.h
drivers/net/wireless/rtlwifi/efuse.c
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/pci.h
drivers/net/wireless/rtlwifi/ps.c
drivers/net/wireless/rtlwifi/rtl8192c/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/main.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
drivers/net/wireless/rtlwifi/rtl8192ce/def.h
drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
drivers/net/wireless/rtlwifi/rtl8192ce/fw.c [deleted file]
drivers/net/wireless/rtlwifi/rtl8192ce/fw.h [deleted file]
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
drivers/net/wireless/rtlwifi/rtl8192ce/led.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
drivers/net/wireless/rtlwifi/rtl8192cu/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/def.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/dm.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/dm.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/led.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/led.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/mac.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/phy.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/phy.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/reg.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/rf.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/sw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/table.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/table.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8192cu/trx.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/usb.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/usb.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/wl1251/acx.c
drivers/net/wireless/wl1251/acx.h
drivers/net/wireless/wl1251/event.c
drivers/net/wireless/wl1251/main.c
drivers/net/wireless/wl1251/ps.c
drivers/net/wireless/wl1251/rx.c
drivers/net/wireless/wl1251/tx.c
drivers/net/wireless/wl1251/wl1251.h
drivers/net/wireless/wl12xx/Kconfig
drivers/net/wireless/wl12xx/acx.c
drivers/net/wireless/wl12xx/acx.h
drivers/net/wireless/wl12xx/boot.c
drivers/net/wireless/wl12xx/cmd.c
drivers/net/wireless/wl12xx/cmd.h
drivers/net/wireless/wl12xx/conf.h
drivers/net/wireless/wl12xx/debugfs.c
drivers/net/wireless/wl12xx/event.c
drivers/net/wireless/wl12xx/event.h
drivers/net/wireless/wl12xx/init.c
drivers/net/wireless/wl12xx/init.h
drivers/net/wireless/wl12xx/main.c
drivers/net/wireless/wl12xx/ps.c
drivers/net/wireless/wl12xx/ps.h
drivers/net/wireless/wl12xx/rx.c
drivers/net/wireless/wl12xx/rx.h
drivers/net/wireless/wl12xx/sdio.c
drivers/net/wireless/wl12xx/spi.c
drivers/net/wireless/wl12xx/tx.c
drivers/net/wireless/wl12xx/tx.h
drivers/net/wireless/wl12xx/wl12xx.h
drivers/net/wireless/wl12xx/wl12xx_80211.h
drivers/net/wireless/zd1211rw/zd_chip.c
drivers/net/wireless/zd1211rw/zd_chip.h
drivers/net/wireless/zd1211rw/zd_def.h
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/wireless/zd1211rw/zd_mac.h
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/wireless/zd1211rw/zd_usb.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/cxgbi/cxgb3i/Kconfig
drivers/scsi/cxgbi/cxgb4i/Kconfig
drivers/scsi/cxgbi/libcxgbi.c
drivers/ssb/main.c
drivers/ssb/pci.c
drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
drivers/staging/brcm80211/sys/wl_mac80211.c
drivers/staging/brcm80211/sys/wlc_mac80211.c
drivers/staging/pohmelfs/config.c
drivers/staging/winbond/wbusb.c
drivers/video/uvesafb.c
include/linux/audit.h
include/linux/cpu_rmap.h [new file with mode: 0644]
include/linux/dcbnl.h
include/linux/dccp.h
include/linux/ethtool.h
include/linux/if.h
include/linux/if_link.h
include/linux/inetdevice.h
include/linux/interrupt.h
include/linux/ip_vs.h
include/linux/irqdesc.h
include/linux/micrel_phy.h [new file with mode: 0644]
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/Kbuild
include/linux/netfilter/ipset/Kbuild [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_ahash.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_bitmap.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_getport.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_hash.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_list.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_timeout.h [new file with mode: 0644]
include/linux/netfilter/ipset/pfxlen.h [new file with mode: 0644]
include/linux/netfilter/nf_conntrack_snmp.h [new file with mode: 0644]
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/netfilter/x_tables.h
include/linux/netfilter/xt_AUDIT.h [new file with mode: 0644]
include/linux/netfilter/xt_CT.h
include/linux/netfilter/xt_NFQUEUE.h
include/linux/netfilter/xt_TCPOPTSTRIP.h
include/linux/netfilter/xt_TPROXY.h
include/linux/netfilter/xt_cluster.h
include/linux/netfilter/xt_comment.h
include/linux/netfilter/xt_connlimit.h
include/linux/netfilter/xt_conntrack.h
include/linux/netfilter/xt_devgroup.h [new file with mode: 0644]
include/linux/netfilter/xt_quota.h
include/linux/netfilter/xt_set.h [new file with mode: 0644]
include/linux/netfilter/xt_socket.h
include/linux/netfilter/xt_time.h
include/linux/netfilter/xt_u32.h
include/linux/netfilter_bridge/ebt_802_3.h
include/linux/netfilter_bridge/ebt_among.h
include/linux/netfilter_bridge/ebt_arp.h
include/linux/netfilter_bridge/ebt_ip.h
include/linux/netfilter_bridge/ebt_ip6.h
include/linux/netfilter_bridge/ebt_limit.h
include/linux/netfilter_bridge/ebt_log.h
include/linux/netfilter_bridge/ebt_mark_m.h
include/linux/netfilter_bridge/ebt_nflog.h
include/linux/netfilter_bridge/ebt_pkttype.h
include/linux/netfilter_bridge/ebt_stp.h
include/linux/netfilter_bridge/ebt_ulog.h
include/linux/netfilter_bridge/ebt_vlan.h
include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
include/linux/netfilter_ipv4/ipt_ECN.h
include/linux/netfilter_ipv4/ipt_SAME.h
include/linux/netfilter_ipv4/ipt_TTL.h
include/linux/netfilter_ipv4/ipt_addrtype.h
include/linux/netfilter_ipv4/ipt_ah.h
include/linux/netfilter_ipv4/ipt_ecn.h
include/linux/netfilter_ipv4/ipt_ttl.h
include/linux/netfilter_ipv6/ip6t_HL.h
include/linux/netfilter_ipv6/ip6t_REJECT.h
include/linux/netfilter_ipv6/ip6t_ah.h
include/linux/netfilter_ipv6/ip6t_frag.h
include/linux/netfilter_ipv6/ip6t_hl.h
include/linux/netfilter_ipv6/ip6t_ipv6header.h
include/linux/netfilter_ipv6/ip6t_mh.h
include/linux/netfilter_ipv6/ip6t_opts.h
include/linux/netfilter_ipv6/ip6t_rt.h
include/linux/netlink.h
include/linux/nl80211.h
include/linux/pci.h
include/linux/phonet.h
include/linux/pkt_sched.h
include/linux/security.h
include/linux/skbuff.h
include/linux/sockios.h
include/linux/ssb/ssb_regs.h
include/linux/tipc.h
include/linux/tipc_config.h
include/linux/xfrm.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/bluetooth/smp.h [new file with mode: 0644]
include/net/cfg80211.h
include/net/dcbnl.h
include/net/dst.h
include/net/dst_ops.h
include/net/flow.h
include/net/icmp.h
include/net/ieee80211_radiotap.h
include/net/inet_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netevent.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_extend.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_conntrack_l3proto.h
include/net/netfilter/nf_conntrack_timestamp.h [new file with mode: 0644]
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ip_vs.h [new file with mode: 0644]
include/net/netns/ipv4.h
include/net/phonet/pep.h
include/net/phonet/phonet.h
include/net/protocol.h
include/net/route.h
include/net/sch_generic.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/udplite.h
include/net/xfrm.h
kernel/audit.c
kernel/auditfilter.c
kernel/irq/manage.c
lib/Kconfig
lib/Makefile
lib/cpu_rmap.c [new file with mode: 0644]
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/9p/trans_rdma.c
net/Kconfig
net/atm/clip.c
net/batman-adv/Makefile
net/batman-adv/aggregation.c
net/batman-adv/aggregation.h
net/batman-adv/bat_debugfs.c
net/batman-adv/bat_debugfs.h
net/batman-adv/bat_sysfs.c
net/batman-adv/bat_sysfs.h
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/ring_buffer.c
net/batman-adv/ring_buffer.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c
net/batman-adv/unicast.h
net/batman-adv/vis.c
net/batman-adv/vis.h
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/core.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/capi.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap.c [deleted file]
net/bluetooth/l2cap_core.c [new file with mode: 0644]
net/bluetooth/l2cap_sock.c [new file with mode: 0644]
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bridge/br_device.c
net/bridge/br_if.c
net/bridge/br_netfilter.c
net/bridge/br_private.h
net/bridge/netfilter/ebt_ip6.c
net/bridge/netfilter/ebtables.c
net/caif/cfcnfg.c
net/caif/cfdgml.c
net/caif/cfserl.c
net/caif/cfutill.c
net/caif/cfveil.c
net/core/dev.c
net/core/dst.c
net/core/ethtool.c
net/core/filter.c
net/core/flow.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dcb/dcbnl.c
net/dccp/ccids/ccid2.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/decnet/dn_route.c
net/decnet/dn_table.c
net/dsa/mv88e6060.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/datagram.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_hash.c [deleted file]
net/ipv4/fib_lookup.h
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_LOG.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/netfilter/nf_nat_amanda.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_westwood.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/mip6.c
net/ipv6/ndisc.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_LOG.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_state.c
net/key/af_key.c
net/l2tp/l2tp_ip.c
net/llc/llc_input.c
net/mac80211/Kconfig
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.h
net/mac80211/driver-trace.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/work.c
net/mac80211/wpa.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/Kconfig [new file with mode: 0644]
net/netfilter/ipset/Makefile [new file with mode: 0644]
net/netfilter/ipset/ip_set_bitmap_ip.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_bitmap_ipmac.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_bitmap_port.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_core.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_getport.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ip.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipport.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipportip.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipportnet.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_net.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_netport.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_list_set.c [new file with mode: 0644]
net/netfilter/ipset/pfxlen.c [new file with mode: 0644]
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_est.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_lc.c
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_pe.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/ipvs/ip_vs_proto.c
net/netfilter/ipvs/ip_vs_proto_ah_esp.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_proto_tcp.c
net/netfilter/ipvs/ip_vs_proto_udp.c
net/netfilter/ipvs/ip_vs_rr.c
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/ipvs/ip_vs_wrr.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_broadcast.c [new file with mode: 0644]
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netbios_ns.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_snmp.c [new file with mode: 0644]
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_timestamp.c [new file with mode: 0644]
net/netfilter/nf_log.c
net/netfilter/nf_queue.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/netfilter/xt_AUDIT.c [new file with mode: 0644]
net/netfilter/xt_CLASSIFY.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_LED.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_TEE.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_conntrack.c
net/netfilter/xt_cpu.c
net/netfilter/xt_devgroup.c [new file with mode: 0644]
net/netfilter/xt_iprange.c
net/netfilter/xt_ipvs.c
net/netfilter/xt_set.c [new file with mode: 0644]
net/netlabel/netlabel_user.h
net/netlink/af_netlink.c
net/packet/af_packet.c
net/phonet/Kconfig
net/phonet/af_phonet.c
net/phonet/pep.c
net/phonet/socket.c
net/rds/rds.h
net/rose/af_rose.c
net/rose/rose_route.c
net/rxrpc/ar-peer.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_cmp.c
net/sched/em_meta.c
net/sched/em_nbyte.c
net/sched/em_text.c
net/sched/em_u32.c
net/sched/ematch.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_choke.c [new file with mode: 0644]
net/sched/sch_dsmark.c
net/sched/sch_fifo.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c [new file with mode: 0644]
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_red.c
net/sched/sch_sfb.c [new file with mode: 0644]
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sctp/tsnmap.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/svcsock.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/node.c
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/tipc/subscr.c
net/unix/af_unix.c
net/wanrouter/wanmain.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
net/wireless/wext-compat.c
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_hash.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
security/capability.c
security/commoncap.c
security/security.c
security/selinux/hooks.c
security/selinux/include/xfrm.h
security/selinux/xfrm.c

index b3f35e5f9c95470c96dce361251c3c2275bc2207..d6f5255ca54780f2aa8982445a15809a31ccd90c 100644 (file)
@@ -35,6 +35,17 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
 
 ---------------------------
 
+What:  AR9170USB
+When:  2.6.40
+
+Why:   This driver is deprecated and the firmware is no longer
+       maintained. The replacement driver "carl9170" has been
+       around for a while, so the devices are still supported.
+
+Who:   Christian Lamparter <chunkeey@googlemail.com>
+
+---------------------------
+
 What:  IRQF_SAMPLE_RANDOM
 Check: IRQF_SAMPLE_RANDOM
 When:  July 2009
@@ -604,6 +615,13 @@ Who:       Jean Delvare <khali@linux-fr.org>
 
 ----------------------------
 
+What:  xt_connlimit rev 0
+When:  2012
+Who:   Jan Engelhardt <jengelh@medozas.de>
+Files: net/netfilter/xt_connlimit.c
+
+----------------------------
+
 What:  noswapaccount kernel command line parameter
 When:  2.6.40
 Why:   The original implementation of memsw feature enabled by
index 77f0cdd5b0dd4fc2352d8b0b590d1afe2a29881b..18afcd8afd5132f3050b9e7d31d2a57be3496c4e 100644 (file)
@@ -1,4 +1,4 @@
-[state: 21-11-2010]
+[state: 27-01-2011]
 
 BATMAN-ADV
 ----------
@@ -67,15 +67,16 @@ All  mesh  wide  settings  can be found in batman's own interface
 folder:
 
 #  ls  /sys/class/net/bat0/mesh/
-#  aggregated_ogms  bonding  fragmentation  orig_interval
-#  vis_mode
+#  aggregated_ogms  gw_bandwidth  hop_penalty
+#  bonding          gw_mode       orig_interval
+#  fragmentation    gw_sel_class  vis_mode
 
 
 There is a special folder for debugging informations:
 
 #  ls /sys/kernel/debug/batman_adv/bat0/
-#  originators  socket  transtable_global  transtable_local
-#  vis_data
+#  gateways     socket        transtable_global  vis_data
+#  originators  softif_neigh  transtable_local
 
 
 Some of the files contain all sort of status information  regard-
@@ -230,9 +231,8 @@ CONTACT
 Please send us comments, experiences, questions, anything :)
 
 IRC:            #batman   on   irc.freenode.org
-Mailing-list:   b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
-                (optional   subscription   at
-                 https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list:   b.a.t.m.a.n@open-mesh.org (optional  subscription
+          at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
 
 You can also contact the Authors:
 
index ac3b4a726a1a70b439abf72922d1db0791e78ae8..d3d653a5f9b923be1ab518cba040e9ccb3868f3e 100644 (file)
@@ -280,6 +280,17 @@ tcp_max_orphans - INTEGER
        more aggressively. Let me to remind again: each orphan eats
        up to ~64K of unswappable memory.
 
+tcp_max_ssthresh - INTEGER
+       Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
+       RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
+       on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
+       by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
+       segments per RTT when the cwnd is above tcp_max_ssthresh.
+       If TCP connection increased cwnd to thousands (or tens of thousands) segments,
+       and thousands of packets were being dropped during slow-start, you can set
+       tcp_max_ssthresh to improve performance for new TCP connection.
+       Default: 0 (off)
+
 tcp_max_syn_backlog - INTEGER
        Maximal number of remembered connection requests, which are
        still did not receive an acknowledgment from connecting client.
index 24ad2adba6e5fe0d067d7882ce275048e56bd655..81003581f47a90ef2dd581fee3924885cd80802b 100644 (file)
@@ -154,9 +154,28 @@ connections, one per accept()'d socket.
     write(cfd, msg, msglen);
   }
 
-Connections are established between two endpoints by a "third party"
-application. This means that both endpoints are passive; so connect()
-is not possible.
+Connections are traditionally established between two endpoints by a
+"third party" application. This means that both endpoints are passive.
+
+
+As of Linux kernel version 2.6.39, it is also possible to connect
+two endpoints directly, using connect() on the active side. This is
+intended to support the newer Nokia Wireless Modem API, as found in
+e.g. the Nokia Slim Modem in the ST-Ericsson U8500 platform:
+
+  struct sockaddr_spn spn;
+  int fd;
+
+  fd = socket(PF_PHONET, SOCK_SEQPACKET, PN_PROTO_PIPE);
+  memset(&spn, 0, sizeof(spn));
+  spn.spn_family = AF_PHONET;
+  spn.spn_obj = ...;
+  spn.spn_dev = ...;
+  spn.spn_resource = 0xD9;
+  connect(fd, (struct sockaddr *)&spn, sizeof(spn));
+  /* normal I/O here ... */
+  close(fd);
+
 
 WARNING:
 When polling a connected pipe socket for writability, there is an
@@ -181,45 +200,9 @@ The pipe protocol provides two socket options at the SOL_PNPIPE level:
     interface index of the network interface created by PNPIPE_ENCAP,
     or zero if encapsulation is off.
 
-
-Phonet Pipe-controller Implementation
--------------------------------------
-
-Phonet Pipe-controller is enabled by selecting the CONFIG_PHONET_PIPECTRLR Kconfig
-option. It is useful when communicating with those Nokia Modems which do not
-implement Pipe controller in them e.g. Nokia Slim Modem used in ST-Ericsson
-U8500 platform.
-
-The implementation is based on the Data Connection Establishment Sequence
-depicted in 'Nokia Wireless Modem API - Wireless_modem_user_guide.pdf'
-document.
-
-It allows a phonet sequenced socket (host-pep) to initiate a Pipe connection
-between itself and a remote pipe-end point (e.g. modem).
-
-The implementation adds socket options at SOL_PNPIPE level:
-
- PNPIPE_PIPE_HANDLE
-       It accepts an integer argument for setting value of pipe handle.
-
-  PNPIPE_ENABLE accepts one integer value (int). If set to zero, the pipe
-    is disabled. If the value is non-zero, the pipe is enabled. If the pipe
-    is not (yet) connected, ENOTCONN is error is returned.
-
-The implementation also adds socket 'connect'. On calling the 'connect', pipe
-will be created between the source socket and the destination, and the pipe
-state will be set to PIPE_DISABLED.
-
-After a pipe has been created and enabled successfully, the Pipe data can be
-exchanged between the host-pep and remote-pep (modem).
-
-User-space would typically follow below sequence with Pipe controller:-
--socket
--bind
--setsockopt for PNPIPE_PIPE_HANDLE
--connect
--setsockopt for PNPIPE_ENCAP_IP
--setsockopt for PNPIPE_ENABLE
+  PNPIPE_HANDLE is a read-only integer value. It contains the underlying
+    identifier ("pipe handle") of the pipe. This is only defined for
+    socket descriptors that are already connected or being connected.
 
 
 Authors
index f1bc3dc6b3699de3c072bfce91b3ec25417e41cb..4765c679c23b577651d314bfb245ea26ae231885 100644 (file)
@@ -1214,7 +1214,7 @@ ATHEROS AR9170 WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@web.de>
 L:     linux-wireless@vger.kernel.org
 W:     http://wireless.kernel.org/en/users/Drivers/ar9170
-S:     Maintained
+S:     Obsolete
 F:     drivers/net/wireless/ath/ar9170/
 
 CARL9170 LINUX COMMUNITY WIRELESS DRIVER
@@ -1710,6 +1710,7 @@ S:        Maintained
 F:     Documentation/zh_CN/
 
 CISCO VIC ETHERNET NIC DRIVER
+M:     Christian Benvenuti <benve@cisco.com>
 M:     Vasanthy Kolluri <vkolluri@cisco.com>
 M:     Roopa Prabhu <roprabhu@cisco.com>
 M:     David Wang <dwang2@cisco.com>
@@ -5158,6 +5159,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
 P:     rt2x00 project
 M:     Ivo van Doorn <IvDoorn@gmail.com>
 M:     Gertjan van Wingerde <gwingerde@gmail.com>
+M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 L:     users@rt2x00.serialmonkey.com (moderated for non-subscribers)
 W:     http://rt2x00.serialmonkey.com/
index 8cbfaa687d723152b27955cfbc4a43c1af858121..fe81c851ca8800fdce14bccc3c7be032b7263669 100644 (file)
@@ -2177,7 +2177,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
                return;
        }
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
                retcode = ERR_PERM;
                goto fail;
        }
index 6dcd55a74c0abbdc6a7d00e2b9896b1cb9acc4b9..5577ed656e2f108e565e1d4fee1db4365eeef6e6 100644 (file)
 
 #define VERSION "1.0"
 
+#define ATH3K_DNLOAD                           0x01
+#define ATH3K_GETSTATE                         0x05
+#define ATH3K_SET_NORMAL_MODE                  0x07
+#define ATH3K_GETVERSION                       0x09
+#define USB_REG_SWITCH_VID_PID                 0x0a
+
+#define ATH3K_MODE_MASK                                0x3F
+#define ATH3K_NORMAL_MODE                      0x0E
+
+#define ATH3K_PATCH_UPDATE                     0x80
+#define ATH3K_SYSCFG_UPDATE                    0x40
+
+#define ATH3K_XTAL_FREQ_26M                    0x00
+#define ATH3K_XTAL_FREQ_40M                    0x01
+#define ATH3K_XTAL_FREQ_19P2                   0x02
+#define ATH3K_NAME_LEN                         0xFF
+
+struct ath3k_version {
+       unsigned int    rom_version;
+       unsigned int    build_version;
+       unsigned int    ram_version;
+       unsigned char   ref_clock;
+       unsigned char   reserved[0x07];
+};
 
 static struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 */
@@ -42,15 +66,31 @@ static struct usb_device_id ath3k_table[] = {
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03F0, 0x311D) },
 
+       /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0CF3, 0x3004) },
+
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
+
        { }     /* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE(usb, ath3k_table);
 
+#define BTUSB_ATH3012          0x80
+/* This table is to load patch and sysconfig files
+ * for AR3012 */
+static struct usb_device_id ath3k_blist_tbl[] = {
+
+       /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+
+       { }     /* Terminating entry */
+};
+
 #define USB_REQ_DFU_DNLOAD     1
 #define BULK_SIZE              4096
+#define FW_HDR_SIZE            20
 
 static int ath3k_load_firmware(struct usb_device *udev,
                                const struct firmware *firmware)
@@ -106,28 +146,265 @@ error:
        return err;
 }
 
+static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
+{
+       int pipe = 0;
+
+       pipe = usb_rcvctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+                       USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+                       state, 0x01, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_get_version(struct usb_device *udev,
+                       struct ath3k_version *version)
+{
+       int pipe = 0;
+
+       pipe = usb_rcvctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+                       USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
+                       sizeof(struct ath3k_version),
+                       USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_load_fwfile(struct usb_device *udev,
+               const struct firmware *firmware)
+{
+       u8 *send_buf;
+       int err, pipe, len, size, count, sent = 0;
+       int ret;
+
+       count = firmware->size;
+
+       send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+       if (!send_buf) {
+               BT_ERR("Can't allocate memory chunk for firmware");
+               return -ENOMEM;
+       }
+
+       size = min_t(uint, count, FW_HDR_SIZE);
+       memcpy(send_buf, firmware->data, size);
+
+       pipe = usb_sndctrlpipe(udev, 0);
+       ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD,
+                       USB_TYPE_VENDOR, 0, 0, send_buf,
+                       size, USB_CTRL_SET_TIMEOUT);
+       if (ret < 0) {
+               BT_ERR("Can't change to loading configuration err");
+               kfree(send_buf);
+               return ret;
+       }
+
+       sent += size;
+       count -= size;
+
+       while (count) {
+               size = min_t(uint, count, BULK_SIZE);
+               pipe = usb_sndbulkpipe(udev, 0x02);
+
+               memcpy(send_buf, firmware->data + sent, size);
+
+               err = usb_bulk_msg(udev, pipe, send_buf, size,
+                                       &len, 3000);
+               if (err || (len != size)) {
+                       BT_ERR("Error in firmware loading err = %d,"
+                               "len = %d, size = %d", err, len, size);
+                       kfree(send_buf);
+                       return err;
+               }
+               sent  += size;
+               count -= size;
+       }
+
+       kfree(send_buf);
+       return 0;
+}
+
+static int ath3k_switch_pid(struct usb_device *udev)
+{
+       int pipe = 0;
+
+       pipe = usb_sndctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID,
+                       USB_TYPE_VENDOR, 0, 0,
+                       NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_set_normal_mode(struct usb_device *udev)
+{
+       unsigned char fw_state;
+       int pipe = 0, ret;
+
+       ret = ath3k_get_state(udev, &fw_state);
+       if (ret < 0) {
+               BT_ERR("Can't get state to change to normal mode err");
+               return ret;
+       }
+
+       if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) {
+               BT_DBG("firmware was already in normal mode");
+               return 0;
+       }
+
+       pipe = usb_sndctrlpipe(udev, 0);
+       return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE,
+                       USB_TYPE_VENDOR, 0, 0,
+                       NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+static int ath3k_load_patch(struct usb_device *udev)
+{
+       unsigned char fw_state;
+       char filename[ATH3K_NAME_LEN] = {0};
+       const struct firmware *firmware;
+       struct ath3k_version fw_version, pt_version;
+       int ret;
+
+       ret = ath3k_get_state(udev, &fw_state);
+       if (ret < 0) {
+               BT_ERR("Can't get state to change to load ram patch err");
+               return ret;
+       }
+
+       if (fw_state & ATH3K_PATCH_UPDATE) {
+               BT_DBG("Patch was already downloaded");
+               return 0;
+       }
+
+       ret = ath3k_get_version(udev, &fw_version);
+       if (ret < 0) {
+               BT_ERR("Can't get version to change to load ram patch err");
+               return ret;
+       }
+
+       snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
+               fw_version.rom_version);
+
+       ret = request_firmware(&firmware, filename, &udev->dev);
+       if (ret < 0) {
+               BT_ERR("Patch file not found %s", filename);
+               return ret;
+       }
+
+       pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
+       pt_version.build_version = *(int *)
+               (firmware->data + firmware->size - 4);
+
+       if ((pt_version.rom_version != fw_version.rom_version) ||
+               (pt_version.build_version <= fw_version.build_version)) {
+               BT_ERR("Patch file version did not match with firmware");
+               release_firmware(firmware);
+               return -EINVAL;
+       }
+
+       ret = ath3k_load_fwfile(udev, firmware);
+       release_firmware(firmware);
+
+       return ret;
+}
+
+static int ath3k_load_syscfg(struct usb_device *udev)
+{
+       unsigned char fw_state;
+       char filename[ATH3K_NAME_LEN] = {0};
+       const struct firmware *firmware;
+       struct ath3k_version fw_version;
+       int clk_value, ret;
+
+       ret = ath3k_get_state(udev, &fw_state);
+       if (ret < 0) {
+               BT_ERR("Can't get state to change to load configration err");
+               return -EBUSY;
+       }
+
+       ret = ath3k_get_version(udev, &fw_version);
+       if (ret < 0) {
+               BT_ERR("Can't get version to change to load ram patch err");
+               return ret;
+       }
+
+       switch (fw_version.ref_clock) {
+
+       case ATH3K_XTAL_FREQ_26M:
+               clk_value = 26;
+               break;
+       case ATH3K_XTAL_FREQ_40M:
+               clk_value = 40;
+               break;
+       case ATH3K_XTAL_FREQ_19P2:
+               clk_value = 19;
+               break;
+       default:
+               clk_value = 0;
+               break;
+       }
+
+       snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+               fw_version.rom_version, clk_value, ".dfu");
+
+       ret = request_firmware(&firmware, filename, &udev->dev);
+       if (ret < 0) {
+               BT_ERR("Configuration file not found %s", filename);
+               return ret;
+       }
+
+       ret = ath3k_load_fwfile(udev, firmware);
+       release_firmware(firmware);
+
+       return ret;
+}
+
 static int ath3k_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
 {
        const struct firmware *firmware;
        struct usb_device *udev = interface_to_usbdev(intf);
+       int ret;
 
        BT_DBG("intf %p id %p", intf, id);
 
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
-               return -EIO;
+       /* match device ID in ath3k blacklist table */
+       if (!id->driver_info) {
+               const struct usb_device_id *match;
+               match = usb_match_id(intf, ath3k_blist_tbl);
+               if (match)
+                       id = match;
        }
 
-       if (ath3k_load_firmware(udev, firmware)) {
-               release_firmware(firmware);
+       /* load patch and sysconfig files for AR3012 */
+       if (id->driver_info & BTUSB_ATH3012) {
+               ret = ath3k_load_patch(udev);
+               if (ret < 0) {
+                       BT_ERR("Loading patch file failed");
+                       return ret;
+               }
+               ret = ath3k_load_syscfg(udev);
+               if (ret < 0) {
+                       BT_ERR("Loading sysconfig file failed");
+                       return ret;
+               }
+               ret = ath3k_set_normal_mode(udev);
+               if (ret < 0) {
+                       BT_ERR("Set normal mode failed");
+                       return ret;
+               }
+               ath3k_switch_pid(udev);
+               return 0;
+       }
+
+       if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
+               BT_ERR("Error loading firmware");
                return -EIO;
        }
+
+       ret = ath3k_load_firmware(udev, firmware);
        release_firmware(firmware);
 
-       return 0;
+       return ret;
 }
 
 static void ath3k_disconnect(struct usb_interface *intf)
index 700a3840fddc2e8dec17e63f7910b128463c9225..7e0ebd4a1a7481bfb127c18d56f5e8b839fa4a58 100644 (file)
@@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = {
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
+       /* Atheros 3012 with sflash firmware */
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
+
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
 
@@ -714,15 +717,11 @@ static int btusb_send_frame(struct sk_buff *skb)
                pipe = usb_sndisocpipe(data->udev,
                                        data->isoc_tx_ep->bEndpointAddress);
 
-               urb->dev      = data->udev;
-               urb->pipe     = pipe;
-               urb->context  = skb;
-               urb->complete = btusb_isoc_tx_complete;
-               urb->interval = data->isoc_tx_ep->bInterval;
+               usb_fill_int_urb(urb, data->udev, pipe,
+                               skb->data, skb->len, btusb_isoc_tx_complete,
+                               skb, data->isoc_tx_ep->bInterval);
 
                urb->transfer_flags  = URB_ISO_ASAP;
-               urb->transfer_buffer = skb->data;
-               urb->transfer_buffer_length = skb->len;
 
                __fill_isoc_descriptor(urb, skb->len,
                                le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
index 3c6cabcb7d84b0428cdeae91b3f9a98f2bc85715..48ad2a7ab080ba9a58c425da7cc959dba0e1e3de 100644 (file)
@@ -398,6 +398,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
        hdev->flush = hci_uart_flush;
        hdev->send  = hci_uart_send_frame;
        hdev->destruct = hci_uart_destruct;
+       hdev->parent = hu->tty->dev;
 
        hdev->owner = THIS_MODULE;
 
index 8aba0ba57de50ff6fdc2dd3bdce24f0266f67cc6..2d749937a969bb862549b555e70618e802dc6d58 100644 (file)
@@ -193,10 +193,11 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        fl.nl_u.ip4_u.saddr = src_ip;
        fl.oif = addr->bound_dev_if;
 
-       ret = ip_route_output_key(&init_net, &rt, &fl);
-       if (ret)
+       rt = ip_route_output_key(&init_net, &fl);
+       if (IS_ERR(rt)) {
+               ret = PTR_ERR(rt);
                goto out;
-
+       }
        src_in->sin_family = AF_INET;
        src_in->sin_addr.s_addr = rt->rt_src;
 
index d02dcc6e5963bf99f35020525d0c4c2c306c5464..e0ccbc53fbcc36cca850fad944da4fb0c0e7fdcf 100644 (file)
@@ -354,7 +354,8 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
                          }
        };
 
-       if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+       rt = ip_route_output_flow(&init_net, &fl, NULL);
+       if (IS_ERR(rt))
                return NULL;
        return rt;
 }
index 8b00e6c46f01db0e71f69e2ba0dd6c07e3a1cb59..77b0eef2aad9a436db41b6ade0ff8afffe82e969 100644 (file)
@@ -331,7 +331,8 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
                          }
        };
 
-       if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+       rt = ip_route_output_flow(&init_net, &fl, NULL);
+       if (IS_ERR(rt))
                return NULL;
        return rt;
 }
index 3b4ec3238ceb60576de7d6fb567f6cf14fe7961c..3d7f3664b67b9f301ec1fb80cb27df1cbcd3b11f 100644 (file)
@@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
                                nesdev, nesdev->netdev[0]->name);
                netdev = nesdev->netdev[0];
                nesvnic = netdev_priv(netdev);
-               is_bonded = (netdev->master == event_netdev);
+               is_bonded = netif_is_bond_slave(netdev) &&
+                           (netdev->master == event_netdev);
                if ((netdev == event_netdev) || is_bonded) {
                        if (nesvnic->rdma_enabled == 0) {
                                nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
index 009ec814d517b69c19a02ca7baa003a126441160..e81599cb1fe655d778322cb255aca6cf08d91ad4 100644 (file)
@@ -1112,13 +1112,14 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
 
        memset(&fl, 0, sizeof fl);
        fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-       if (ip_route_output_key(&init_net, &rt, &fl)) {
+       rt = ip_route_output_key(&init_net, &fl);
+       if (IS_ERR(rt)) {
                printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
                                __func__, dst_ip);
                return rc;
        }
 
-       if (nesvnic->netdev->master)
+       if (netif_is_bond_slave(netdev))
                netdev = nesvnic->netdev->master;
        else
                netdev = nesvnic->netdev;
index 049eaf12aaab93465889a3dd5fe01e78955fb514..1f23e048f07713846531d7e1e5ca62588a572d42 100644 (file)
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
                return;
 
        spin_lock(&receiving_list_lock);
index 03823327db25236c09199968baa6441458f70017..925c25c295f0adbd449266e3816d5c9f36a887b0 100644 (file)
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
 config AX88796
        tristate "ASIX AX88796 NE2000 clone support"
        depends on ARM || MIPS || SUPERH
-       select CRC32
-       select MII
+       select PHYLIB
+       select MDIO_BITBANG
        help
          AX88796 driver, using platform bus to provide
          chip detection and resources
@@ -1944,7 +1944,8 @@ config 68360_ENET
 config FEC
        bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
-               MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
+               IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
+       default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
        select PHYLIB
        help
          Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2007,6 +2008,15 @@ config BCM63XX_ENET
          This driver supports the ethernet MACs in the Broadcom 63xx
          MIPS chipset family (BCM63XX).
 
+config FTMAC100
+       tristate "Faraday FTMAC100 10/100 Ethernet support"
+       depends on ARM
+       select MII
+       help
+         This driver supports the FTMAC100 10/100 Ethernet controller
+         from Faraday. It is used on Faraday A320, Andes AG101 and some
+         other ARM/NDS32 SoC's.
+
 source "drivers/net/fs_enet/Kconfig"
 
 source "drivers/net/octeon/Kconfig"
@@ -2099,6 +2109,7 @@ config E1000
 config E1000E
        tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
        depends on PCI && (!SPARC32 || BROKEN)
+       select CRC32
        ---help---
          This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
          ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2235,15 +2246,6 @@ config R8169
          To compile this driver as a module, choose M here: the module
          will be called r8169.  This is recommended.
 
-config R8169_VLAN
-       bool "VLAN support"
-       depends on R8169 && VLAN_8021Q
-       ---help---
-         Say Y here for the r8169 driver to support the functions required
-         by the kernel 802.1Q code.
-
-         If in doubt, say Y.
-
 config SB1250_MAC
        tristate "SB1250 Gigabit Ethernet support"
        depends on SIBYTE_SB1xxx_SOC
@@ -2594,14 +2596,9 @@ config CHELSIO_T1_1G
          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
          are using only 10G cards say 'N' here.
 
-config CHELSIO_T3_DEPENDS
-       tristate
-       depends on PCI && INET
-       default y
-
 config CHELSIO_T3
        tristate "Chelsio Communications T3 10Gb Ethernet support"
-       depends on CHELSIO_T3_DEPENDS
+       depends on PCI && INET
        select FW_LOADER
        select MDIO
        help
@@ -2619,14 +2616,9 @@ config CHELSIO_T3
          To compile this driver as a module, choose M here: the module
          will be called cxgb3.
 
-config CHELSIO_T4_DEPENDS
-       tristate
-       depends on PCI && INET
-       default y
-
 config CHELSIO_T4
        tristate "Chelsio Communications T4 Ethernet support"
-       depends on CHELSIO_T4_DEPENDS
+       depends on PCI
        select FW_LOADER
        select MDIO
        help
@@ -2644,14 +2636,9 @@ config CHELSIO_T4
          To compile this driver as a module choose M here; the module
          will be called cxgb4.
 
-config CHELSIO_T4VF_DEPENDS
-       tristate
-       depends on PCI && INET
-       default y
-
 config CHELSIO_T4VF
        tristate "Chelsio Communications T4 Virtual Function Ethernet support"
-       depends on CHELSIO_T4VF_DEPENDS
+       depends on PCI
        help
          This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
          adapters with PCI-E SR-IOV Virtual Functions.
index b90738d13994318dcf2a06442cd7eba52629b9a7..7c2171179f97ce1efff1410edbdc322138237474 100644 (file)
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 obj-$(CONFIG_AX88796) += ax88796.o
 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_FTMAC100) += ftmac100.o
 
 obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
index 1bf67200994827fd76250e718b3bdf4a67a609e8..23f2ab0f2fa84148890d634b9fe935de75929a41 100644 (file)
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
  */
 static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
 {
-       u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+       u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
        u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
                                ~GIGA_CR_1000T_SPEED_MASK;
 
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
        }
 
        if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
-           atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+           atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
                return -1;
        return 0;
 }
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
                                        "Error Setting up Auto-Negotiation\n");
                        return ret_val;
                }
-               mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+               mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
                break;
        case MEDIA_TYPE_100M_FULL:
-               mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+               mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
                break;
        case MEDIA_TYPE_100M_HALF:
-               mii_bmcr_data |= BMCR_SPEED_100;
+               mii_bmcr_data |= BMCR_SPEED100;
                break;
        case MEDIA_TYPE_10M_FULL:
-               mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+               mii_bmcr_data |= BMCR_FULLDPLX;
                break;
        case MEDIA_TYPE_10M_HALF:
-               mii_bmcr_data |= BMCR_SPEED_10;
                break;
        default:
                if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
        err = atl1c_phy_setup_adv(hw);
        if (err)
                return err;
-       mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+       mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
 
        return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
 }
index 3dd675979aa17ebfcfeda3d345981355a924c497..655fc6c4a8a49aa75ddff31f599938cc3d4138a5 100644 (file)
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
 #define REG_DEBUG_DATA0                0x1900
 #define REG_DEBUG_DATA1                0x1904
 
-/* PHY Control Register */
-#define MII_BMCR                       0x00
-#define BMCR_SPEED_SELECT_MSB          0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE          0x0080  /* Collision test enable */
-#define BMCR_FULL_DUPLEX               0x0100  /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG          0x0200  /* Restart auto negotiation */
-#define BMCR_ISOLATE                   0x0400  /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN                        0x0800  /* Power down */
-#define BMCR_AUTO_NEG_EN               0x1000  /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB          0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK                  0x4000  /* 0 = normal, 1 = loopback */
-#define BMCR_RESET                     0x8000  /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK                        0x2040
-#define BMCR_SPEED_1000                        0x0040
-#define BMCR_SPEED_100                 0x2000
-#define BMCR_SPEED_10                  0x0000
-
-/* PHY Status Register */
-#define MII_BMSR                       0x01
-#define BMMSR_EXTENDED_CAPS            0x0001  /* Extended register capabilities */
-#define BMSR_JABBER_DETECT             0x0002  /* Jabber Detected */
-#define BMSR_LINK_STATUS               0x0004  /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS              0x0008  /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT              0x0010  /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE          0x0020  /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS         0x0040  /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS           0x0100  /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS             0x0200  /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS             0x0400  /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS               0x0800  /* 10T   Half Duplex Capable */
-#define BMSR_10T_FD_CAPS               0x1000  /* 10T   Full Duplex Capable */
-#define BMSR_100X_HD_CAPS              0x2000  /* 100X  Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS          0x4000  /* 100X  Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS            0x8000  /* 100T4 Capable */
-
-#define MII_PHYSID1                    0x02
-#define MII_PHYSID2                    0x03
 #define L1D_MPW_PHYID1                 0xD01C  /* V7 */
 #define L1D_MPW_PHYID2                 0xD01D  /* V1-V6 */
 #define L1D_MPW_PHYID3                 0xD01E  /* V8 */
 
 
 /* Autoneg Advertisement Register */
-#define MII_ADVERTISE                  0x04
-#define ADVERTISE_SPEED_MASK           0x01E0
-#define ADVERTISE_DEFAULT_CAP          0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+       (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
 
 /* 1000BASE-T Control Register */
-#define MII_GIGA_CR                    0x09
 #define GIGA_CR_1000T_REPEATER_DTE     0x0400  /* 1=Repeater/switch device port 0=DTE device */
 
 #define GIGA_CR_1000T_MS_VALUE         0x0800  /* 1=Configure PHY as Master 0=Configure PHY as Slave */
index 3824382faecc1b1f18d42b9e9fac14094d5d2f95..7d9d5067a65ce1bb368bd2128d75c5ef87446828 100644 (file)
@@ -1102,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
        AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
        max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
                        DEVICE_CTRL_MAX_PAYLOAD_MASK;
-       hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+       hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
        max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
                        DEVICE_CTRL_MAX_RREQ_SZ_MASK;
-       hw->dmar_block = min(max_pay_load, hw->dmar_block);
+       hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
 
        txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
                        TXQ_NUM_TPD_BURST_SHIFT;
@@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
                goto err_reset;
        }
 
-       device_init_wakeup(&pdev->dev, 1);
        /* reset the controller to
         * put the device in a known good starting state */
        err = atl1c_phy_init(&adapter->hw);
index 6943a6c3b948cf56a93c0412b4b57ffc17a15b40..1209297433b828083bb8892f9118809593237835 100644 (file)
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
                ecmd->advertising = hw->autoneg_advertised |
                                    ADVERTISED_TP | ADVERTISED_Autoneg;
 
-               adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+               adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
                adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
                if (hw->autoneg_advertised & ADVERTISE_10_HALF)
-                       adv4 |= MII_AR_10T_HD_CAPS;
+                       adv4 |= ADVERTISE_10HALF;
                if (hw->autoneg_advertised & ADVERTISE_10_FULL)
-                       adv4 |= MII_AR_10T_FD_CAPS;
+                       adv4 |= ADVERTISE_10FULL;
                if (hw->autoneg_advertised & ADVERTISE_100_HALF)
-                       adv4 |= MII_AR_100TX_HD_CAPS;
+                       adv4 |= ADVERTISE_100HALF;
                if (hw->autoneg_advertised & ADVERTISE_100_FULL)
-                       adv4 |= MII_AR_100TX_FD_CAPS;
+                       adv4 |= ADVERTISE_100FULL;
                if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
-                       adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+                       adv9 |= ADVERTISE_1000FULL;
 
                if (adv4 != hw->mii_autoneg_adv_reg ||
                                adv9 != hw->mii_1000t_ctrl_reg) {
index 76cc043def8c481bcb2e479170cf0d5abbdc63a0..923063d2e5bbcd90a04fffe0642e8ca5bab32da6 100644 (file)
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
         * Advertisement Register (Address 4) and the 1000 mb speed bits in
         * the  1000Base-T control Register (Address 9).
         */
-       mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+       mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
        mii_1000t_ctrl_reg  &= ~MII_AT001_CR_1000T_SPEED_MASK;
 
        /*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
         */
        switch (hw->media_type) {
        case MEDIA_TYPE_AUTO_SENSOR:
-               mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS   |
-                                       MII_AR_10T_FD_CAPS   |
-                                       MII_AR_100TX_HD_CAPS |
-                                       MII_AR_100TX_FD_CAPS);
-               hw->autoneg_advertised = ADVERTISE_10_HALF  |
-                                        ADVERTISE_10_FULL  |
-                                        ADVERTISE_100_HALF |
-                                        ADVERTISE_100_FULL;
+               mii_autoneg_adv_reg |= ADVERTISE_ALL;
+               hw->autoneg_advertised = ADVERTISE_ALL;
                if (hw->nic_type == athr_l1e) {
-                       mii_1000t_ctrl_reg |=
-                               MII_AT001_CR_1000T_FD_CAPS;
+                       mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
                        hw->autoneg_advertised |= ADVERTISE_1000_FULL;
                }
                break;
 
        case MEDIA_TYPE_100M_FULL:
-               mii_autoneg_adv_reg   |= MII_AR_100TX_FD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_100FULL;
                hw->autoneg_advertised = ADVERTISE_100_FULL;
                break;
 
        case MEDIA_TYPE_100M_HALF:
-               mii_autoneg_adv_reg   |= MII_AR_100TX_HD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_100_HALF;
                hw->autoneg_advertised = ADVERTISE_100_HALF;
                break;
 
        case MEDIA_TYPE_10M_FULL:
-               mii_autoneg_adv_reg   |= MII_AR_10T_FD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_10_FULL;
                hw->autoneg_advertised = ADVERTISE_10_FULL;
                break;
 
        default:
-               mii_autoneg_adv_reg   |= MII_AR_10T_HD_CAPS;
+               mii_autoneg_adv_reg   |= ADVERTISE_10_HALF;
                hw->autoneg_advertised = ADVERTISE_10_HALF;
                break;
        }
 
        /* flow control fixed to enable all */
-       mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+       mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
 
        hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
        hw->mii_1000t_ctrl_reg  = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
                return ret_val;
 
        if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
-               ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+               ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
                                           mii_1000t_ctrl_reg);
                if (ret_val)
                        return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
        int ret_val;
        u16 phy_data;
 
-       phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+       phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
 
        ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
        if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
                return err;
 
        if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
-               err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+               err = atl1e_write_phy_reg(hw, MII_CTRL1000,
                                       hw->mii_1000t_ctrl_reg);
                if (err)
                        return err;
        }
 
        err = atl1e_write_phy_reg(hw, MII_BMCR,
-                       MII_CR_RESET | MII_CR_AUTO_NEG_EN |
-                       MII_CR_RESTART_AUTO_NEG);
+                       BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
        return err;
 }
 
index 5ea2f4d86cface898329d40969f1365dba9ca1cc..74df16aef7933871fb87c29a511ddc5b19182a01 100644 (file)
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
 
 /***************************** MII definition ***************************************/
 /* PHY Common Register */
-#define MII_BMCR                        0x00
-#define MII_BMSR                        0x01
-#define MII_PHYSID1                     0x02
-#define MII_PHYSID2                     0x03
-#define MII_ADVERTISE                   0x04
-#define MII_LPA                         0x05
-#define MII_EXPANSION                   0x06
-#define MII_AT001_CR                    0x09
-#define MII_AT001_SR                    0x0A
-#define MII_AT001_ESR                   0x0F
 #define MII_AT001_PSCR                  0x10
 #define MII_AT001_PSSR                  0x11
 #define MII_INT_CTRL                    0x12
 #define MII_INT_STATUS                  0x13
 #define MII_SMARTSPEED                  0x14
-#define MII_RERRCOUNTER                 0x15
-#define MII_SREVISION                   0x16
-#define MII_RESV1                       0x17
 #define MII_LBRERROR                    0x18
-#define MII_PHYADDR                     0x19
 #define MII_RESV2                       0x1a
-#define MII_TPISTATUS                   0x1b
-#define MII_NCONFIG                     0x1c
 
 #define MII_DBG_ADDR                   0x1D
 #define MII_DBG_DATA                   0x1E
 
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB                  0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE                  0x0080  /* Collision test enable */
-#define MII_CR_FULL_DUPLEX                       0x0100  /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG                  0x0200  /* Restart auto negotiation */
-#define MII_CR_ISOLATE                           0x0400  /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN                        0x0800  /* Power down */
-#define MII_CR_AUTO_NEG_EN                       0x1000  /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB                  0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK                          0x4000  /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET                             0x8000  /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK                        0x2040
-#define MII_CR_SPEED_1000                        0x0040
-#define MII_CR_SPEED_100                         0x2000
-#define MII_CR_SPEED_10                          0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS                     0x0001  /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT                     0x0002  /* Jabber Detected */
-#define MII_SR_LINK_STATUS                       0x0004  /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS                      0x0008  /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT                      0x0010  /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE                  0x0020  /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS                 0x0040  /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS                   0x0100  /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS                     0x0200  /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS                     0x0400  /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS                       0x0800  /* 10T   Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS                       0x1000  /* 10T   Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS                      0x2000  /* 100X  Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS                      0x4000  /* 100X  Full Duplex Capable */
-#define MII_SR_100T4_CAPS                        0x8000  /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT                             0x001f  /* Same as advertise selector  */
-#define MII_LPA_10HALF                           0x0020  /* Can do 10mbps half-duplex   */
-#define MII_LPA_10FULL                           0x0040  /* Can do 10mbps full-duplex   */
-#define MII_LPA_100HALF                          0x0080  /* Can do 100mbps half-duplex  */
-#define MII_LPA_100FULL                          0x0100  /* Can do 100mbps full-duplex  */
-#define MII_LPA_100BASE4                         0x0200  /* 100BASE-T4  */
-#define MII_LPA_PAUSE                            0x0400  /* PAUSE */
-#define MII_LPA_ASYPAUSE                         0x0800  /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT                           0x2000  /* Link partner faulted        */
-#define MII_LPA_LPACK                            0x4000  /* Link partner acked us       */
-#define MII_LPA_NPAGE                            0x8000  /* Next page bit               */
-
 /* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD                   0x0001  /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS                      0x0020  /* 10T   Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS                      0x0040  /* 10T   Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS                    0x0080  /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS                    0x0100  /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS                       0x0200  /* 100T4 Capable */
-#define MII_AR_PAUSE                            0x0400  /* Pause operation desired */
-#define MII_AR_ASM_DIR                          0x0800  /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT                     0x2000  /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE                        0x8000  /* Next Page ability supported */
-#define MII_AR_SPEED_MASK                       0x01E0
-#define MII_AR_DEFAULT_CAP_MASK                 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK                 0
 
 /* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS              0x0100  /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS              0x0200  /* Advertise 1000T FD capability  */
-#define MII_AT001_CR_1000T_REPEATER_DTE         0x0400  /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE             0x0800  /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE            0x1000  /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL     0x0000  /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1          0x2000  /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2          0x4000  /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3          0x6000  /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4          0x8000  /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK           0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK     0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS           0x0400  /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS           0x0800  /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS     0x1000  /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS      0x2000  /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES        0x4000  /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT      0x8000  /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT   12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT    13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS             0x1000  /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS             0x2000  /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS             0x4000  /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS             0x8000  /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+       (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK    MII_AT001_CR_1000T_SPEED_MASK
 
 /* AT001 PHY Specific Control Register */
 #define MII_AT001_PSCR_JABBER_DISABLE           0x0001  /* 1=Jabber Function disabled */
index e28f8baf394ec6b55049b934156a4aab6e84996c..1ff001a8270cec79343be82983220a6e04e36e1f 100644 (file)
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
        hw->device_id = pdev->device;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_id = pdev->subsystem_device;
+       hw->revision_id  = pdev->revision;
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
        phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
        max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
                        DEVICE_CTRL_MAX_PAYLOAD_MASK;
 
-       hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+       hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
 
        max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
                        DEVICE_CTRL_MAX_RREQ_SZ_MASK;
-       hw->dmar_block = min(max_pay_load, hw->dmar_block);
+       hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
 
        if (hw->nic_type != athr_l2e_revB)
                AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
                atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
                atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
 
-               mii_advertise_data = MII_AR_10T_HD_CAPS;
+               mii_advertise_data = ADVERTISE_10HALF;
 
-               if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+               if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
                    (atl1e_write_phy_reg(hw,
                           MII_ADVERTISE, mii_advertise_data) != 0) ||
                    (atl1e_phy_commit(hw)) != 0) {
index 3b527687c28fa9ba3509defb224ac08ca32665e9..67f40b9c16edb0a15f70f3af6d0efbf57a55c56b 100644 (file)
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
        hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
        adapter->wol = 0;
+       device_set_wakeup_enable(&adapter->pdev->dev, false);
        adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
        adapter->ict = 50000;           /* 100ms */
        adapter->link_speed = SPEED_0;  /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
 }
 
 #ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1_suspend(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct atl1_adapter *adapter = netdev_priv(netdev);
        struct atl1_hw *hw = &adapter->hw;
        u32 ctrl = 0;
        u32 wufc = adapter->wol;
        u32 val;
-       int retval;
        u16 speed;
        u16 duplex;
 
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
        if (netif_running(netdev))
                atl1_down(adapter);
 
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-
        atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
        atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
        val = ctrl & BMSR_LSTATUS;
        if (val)
                wufc &= ~ATLX_WUFC_LNKC;
+       if (!wufc)
+               goto disable_wol;
 
-       if (val && wufc) {
+       if (val) {
                val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
                if (val) {
                        if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
                ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
                iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
                ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
-
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-               goto exit;
-       }
-
-       if (!val && wufc) {
+       } else {
                ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
                iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
                ioread32(hw->hw_addr + REG_WOL_CTRL);
                iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
                ioread32(hw->hw_addr + REG_MAC_CTRL);
                hw->phy_configured = false;
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-               goto exit;
        }
 
-disable_wol:
+       return 0;
+
+ disable_wol:
        iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
        ioread32(hw->hw_addr + REG_WOL_CTRL);
        ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
        iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
        ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
        hw->phy_configured = false;
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-exit:
-       if (netif_running(netdev))
-               pci_disable_msi(adapter->pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
 }
 
-static int atl1_resume(struct pci_dev *pdev)
+static int atl1_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct atl1_adapter *adapter = netdev_priv(netdev);
-       u32 err;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               if (netif_msg_ifup(adapter))
-                       dev_printk(KERN_DEBUG, &pdev->dev,
-                               "error enabling pci device\n");
-               return err;
-       }
-
-       pci_set_master(pdev);
        iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
 
        atl1_reset_hw(&adapter->hw);
 
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
 
        return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS    (&atl1_pm_ops)
+
 #else
-#define atl1_suspend NULL
-#define atl1_resume NULL
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS    NULL
 #endif
 
 static void atl1_shutdown(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PM
-       atl1_suspend(pdev, PMSG_SUSPEND);
-#endif
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct atl1_adapter *adapter = netdev_priv(netdev);
+
+       atl1_suspend(&pdev->dev);
+       pci_wake_from_d3(pdev, adapter->wol);
+       pci_set_power_state(pdev, PCI_D3hot);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
        .id_table = atl1_pci_tbl,
        .probe = atl1_probe,
        .remove = __devexit_p(atl1_remove),
-       .suspend = atl1_suspend,
-       .resume = atl1_resume,
-       .shutdown = atl1_shutdown
+       .shutdown = atl1_shutdown,
+       .driver.pm = ATL1_PM_OPS,
 };
 
 /*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
        adapter->wol = 0;
        if (wol->wolopts & WAKE_MAGIC)
                adapter->wol |= ATLX_WUFC_MAG;
+
+       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
        return 0;
 }
 
index 4e6f4e95a5a03342320ee24cbe70bfa1365c7b3a..e637e9f28fd4456612aa8b2b1b47c8cf670acb81 100644 (file)
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
        hw->device_id = pdev->device;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_id = pdev->subsystem_device;
+       hw->revision_id  = pdev->revision;
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
        adapter->wol = 0;
index 4bebff3faeab4c35e54680ed958ccd558ffda833..e7cb8c8b9776441a58040c1e2b8ec9bb17cd1f21 100644 (file)
@@ -9,7 +9,7 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/isapnp.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/phy.h>
 #include <linux/eeprom_93cx6.h>
 #include <linux/slab.h>
 
 #include <net/ax88796.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
-
-static int phy_debug = 0;
 
 /* Rename the lib8390.c functions to show that they are in this driver */
-#define __ei_open       ax_ei_open
-#define __ei_close      ax_ei_close
-#define __ei_poll      ax_ei_poll
+#define __ei_open ax_ei_open
+#define __ei_close ax_ei_close
+#define __ei_poll ax_ei_poll
 #define __ei_start_xmit ax_ei_start_xmit
 #define __ei_tx_timeout ax_ei_tx_timeout
-#define __ei_get_stats  ax_ei_get_stats
+#define __ei_get_stats ax_ei_get_stats
 #define __ei_set_multicast_list ax_ei_set_multicast_list
-#define __ei_interrupt  ax_ei_interrupt
+#define __ei_interrupt ax_ei_interrupt
 #define ____alloc_ei_netdev ax__alloc_ei_netdev
-#define __NS8390_init   ax_NS8390_init
+#define __NS8390_init ax_NS8390_init
 
 /* force unsigned long back to 'void __iomem *' */
 #define ax_convert_addr(_a) ((void __force __iomem *)(_a))
 
-#define ei_inb(_a)     readb(ax_convert_addr(_a))
+#define ei_inb(_a) readb(ax_convert_addr(_a))
 #define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
 
-#define ei_inb_p(_a)   ei_inb(_a)
+#define ei_inb_p(_a) ei_inb(_a)
 #define ei_outb_p(_v, _a) ei_outb(_v, _a)
 
 /* define EI_SHIFT() to take into account our register offsets */
-#define EI_SHIFT(x)     (ei_local->reg_offset[(x)])
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
 
 /* Ensure we have our RCR base value */
 #define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
 #define NE_DATAPORT    EI_SHIFT(0x10)
 
 #define NE1SM_START_PG 0x20    /* First page of TX buffer */
-#define NE1SM_STOP_PG  0x40    /* Last page +1 of RX ring */
+#define NE1SM_STOP_PG  0x40    /* Last page +1 of RX ring */
 #define NESM_START_PG  0x40    /* First page of TX buffer */
 #define NESM_STOP_PG   0x80    /* Last page +1 of RX ring */
 
+#define AX_GPOC_PPDSET BIT(6)
+
 /* device private data */
 
 struct ax_device {
-       struct timer_list        mii_timer;
-       spinlock_t               mii_lock;
-       struct mii_if_info       mii;
-
-       u32                      msg_enable;
-       void __iomem            *map2;
-       struct platform_device  *dev;
-       struct resource         *mem;
-       struct resource         *mem2;
-       struct ax_plat_data     *plat;
-
-       unsigned char            running;
-       unsigned char            resume_open;
-       unsigned int             irqflags;
-
-       u32                      reg_offsets[0x20];
+       struct mii_bus *mii_bus;
+       struct mdiobb_ctrl bb_ctrl;
+       struct phy_device *phy_dev;
+       void __iomem *addr_memr;
+       u8 reg_memr;
+       int link;
+       int speed;
+       int duplex;
+
+       void __iomem *map2;
+       const struct ax_plat_data *plat;
+
+       unsigned char running;
+       unsigned char resume_open;
+       unsigned int irqflags;
+
+       u32 reg_offsets[0x20];
 };
 
 static inline struct ax_device *to_ax_dev(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       return (struct ax_device *)(ei_local+1);
+       return (struct ax_device *)(ei_local + 1);
 }
 
-/* ax_initial_check
+/*
+ * ax_initial_check
  *
  * do an initial probe for the card to check wether it exists
  * and is functional
  */
-
 static int ax_initial_check(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
        if (reg0 == 0xFF)
                return -ENODEV;
 
-       ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+       ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
        regd = ei_inb(ioaddr + 0x0d);
        ei_outb(0xff, ioaddr + 0x0d);
-       ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+       ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
        ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
        if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
                ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
        return 0;
 }
 
-/* Hard reset the card.  This used to pause for the same period that a
-   8390 reset command required, but that shouldn't be necessary. */
-
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
 static void ax_reset_8390(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        unsigned long reset_start_time = jiffies;
        void __iomem *addr = (void __iomem *)dev->base_addr;
 
        if (ei_debug > 1)
-               dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
+               netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
 
        ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
 
-       ei_status.txing = 0;
-       ei_status.dmaing = 0;
+       ei_local->txing = 0;
+       ei_local->dmaing = 0;
 
        /* This check _should_not_ be necessary, omit eventually. */
        while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
-               if (jiffies - reset_start_time > 2*HZ/100) {
-                       dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
-                              __func__, dev->name);
+               if (jiffies - reset_start_time > 2 * HZ / 100) {
+                       netdev_warn(dev, "%s: did not complete.\n", __func__);
                        break;
                }
        }
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
                            int ring_page)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        void __iomem *nic_base = ei_local->mem;
 
        /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing) {
-               dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+       if (ei_local->dmaing) {
+               netdev_err(dev, "DMAing conflict in %s "
                        "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, __func__,
-                       ei_status.dmaing, ei_status.irqlock);
+                       __func__,
+                       ei_local->dmaing, ei_local->irqlock);
                return;
        }
 
-       ei_status.dmaing |= 0x01;
-       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+       ei_local->dmaing |= 0x01;
+       ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
        ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
        ei_outb(0, nic_base + EN0_RCNTHI);
        ei_outb(0, nic_base + EN0_RSARLO);              /* On page boundary */
        ei_outb(ring_page, nic_base + EN0_RSARHI);
        ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
-       if (ei_status.word16)
-               readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+       if (ei_local->word16)
+               readsw(nic_base + NE_DATAPORT, hdr,
+                      sizeof(struct e8390_pkt_hdr) >> 1);
        else
-               readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+               readsb(nic_base + NE_DATAPORT, hdr,
+                      sizeof(struct e8390_pkt_hdr));
 
        ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
+       ei_local->dmaing &= ~0x01;
 
        le16_to_cpus(&hdr->count);
 }
 
 
-/* Block input and output, similar to the Crynwr packet driver.  If you
-   are porting to a new ethercard, look at the packet driver source for hints.
-   The NEx000 doesn't share the on-board packet memory -- you have to put
-   the packet out through the "remote DMA" dataport using ei_outb. */
-
+/*
+ * Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
 static void ax_block_input(struct net_device *dev, int count,
                           struct sk_buff *skb, int ring_offset)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        void __iomem *nic_base = ei_local->mem;
        char *buf = skb->data;
 
-       if (ei_status.dmaing) {
-               dev_err(&ax->dev->dev,
-                       "%s: DMAing conflict in %s "
+       if (ei_local->dmaing) {
+               netdev_err(dev,
+                       "DMAing conflict in %s "
                        "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, __func__,
-                       ei_status.dmaing, ei_status.irqlock);
+                       __func__,
+                       ei_local->dmaing, ei_local->irqlock);
                return;
        }
 
-       ei_status.dmaing |= 0x01;
+       ei_local->dmaing |= 0x01;
 
-       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
        ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
        ei_outb(count >> 8, nic_base + EN0_RCNTHI);
        ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
        ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
        ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
-       if (ei_status.word16) {
+       if (ei_local->word16) {
                readsw(nic_base + NE_DATAPORT, buf, count >> 1);
                if (count & 0x01)
                        buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
                readsb(nic_base + NE_DATAPORT, buf, count);
        }
 
-       ei_status.dmaing &= ~1;
+       ei_local->dmaing &= ~1;
 }
 
 static void ax_block_output(struct net_device *dev, int count,
                            const unsigned char *buf, const int start_page)
 {
        struct ei_device *ei_local = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
        void __iomem *nic_base = ei_local->mem;
        unsigned long dma_start;
 
-       /* Round the count up for word writes.  Do we need to do this?
-          What effect will an odd byte count have on the 8390?
-          I should check someday. */
-
-       if (ei_status.word16 && (count & 0x01))
+       /*
+        * Round the count up for word writes. Do we need to do this?
+        * What effect will an odd byte count have on the 8390?  I
+        * should check someday.
+        */
+       if (ei_local->word16 && (count & 0x01))
                count++;
 
        /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing) {
-               dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
+       if (ei_local->dmaing) {
+               netdev_err(dev, "DMAing conflict in %s."
                        "[DMAstat:%d][irqlock:%d]\n",
-                       dev->name, __func__,
-                      ei_status.dmaing, ei_status.irqlock);
+                       __func__,
+                      ei_local->dmaing, ei_local->irqlock);
                return;
        }
 
-       ei_status.dmaing |= 0x01;
+       ei_local->dmaing |= 0x01;
        /* We should already be in page 0, but to be safe... */
        ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
 
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
 
        /* Now the normal output. */
        ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
-       ei_outb(count >> 8,   nic_base + EN0_RCNTHI);
+       ei_outb(count >> 8, nic_base + EN0_RCNTHI);
        ei_outb(0x00, nic_base + EN0_RSARLO);
        ei_outb(start_page, nic_base + EN0_RSARHI);
 
        ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
-       if (ei_status.word16) {
-               writesw(nic_base + NE_DATAPORT, buf, count>>1);
-       } else {
+       if (ei_local->word16)
+               writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+       else
                writesb(nic_base + NE_DATAPORT, buf, count);
-       }
 
        dma_start = jiffies;
 
        while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
-               if (jiffies - dma_start > 2*HZ/100) {           /* 20ms */
-                       dev_warn(&ax->dev->dev,
-                                "%s: timeout waiting for Tx RDC.\n", dev->name);
+               if (jiffies - dma_start > 2 * HZ / 100) {               /* 20ms */
+                       netdev_warn(dev, "timeout waiting for Tx RDC.\n");
                        ax_reset_8390(dev);
-                       ax_NS8390_init(dev,1);
+                       ax_NS8390_init(dev, 1);
                        break;
                }
        }
 
        ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
+       ei_local->dmaing &= ~0x01;
 }
 
 /* definitions for accessing MII/EEPROM interface */
 
 #define AX_MEMR                        EI_SHIFT(0x14)
-#define AX_MEMR_MDC            (1<<0)
-#define AX_MEMR_MDIR           (1<<1)
-#define AX_MEMR_MDI            (1<<2)
-#define AX_MEMR_MDO            (1<<3)
-#define AX_MEMR_EECS           (1<<4)
-#define AX_MEMR_EEI            (1<<5)
-#define AX_MEMR_EEO            (1<<6)
-#define AX_MEMR_EECLK          (1<<7)
-
-/* ax_mii_ei_outbits
- *
- * write the specified set of bits to the phy
-*/
-
-static void
-ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
+#define AX_MEMR_MDC            BIT(0)
+#define AX_MEMR_MDIR           BIT(1)
+#define AX_MEMR_MDI            BIT(2)
+#define AX_MEMR_MDO            BIT(3)
+#define AX_MEMR_EECS           BIT(4)
+#define AX_MEMR_EEI            BIT(5)
+#define AX_MEMR_EEO            BIT(6)
+#define AX_MEMR_EECLK          BIT(7)
+
+static void ax_handle_link_change(struct net_device *dev)
 {
-       struct ei_device *ei_local = netdev_priv(dev);
-       void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
-       unsigned int memr;
-
-       /* clock low, data to output mode */
-       memr = ei_inb(memr_addr);
-       memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
-       ei_outb(memr, memr_addr);
-
-       for (len--; len >= 0; len--) {
-               if (bits & (1 << len))
-                       memr |= AX_MEMR_MDO;
-               else
-                       memr &= ~AX_MEMR_MDO;
-
-               ei_outb(memr, memr_addr);
-
-               /* clock high */
+       struct ax_device  *ax = to_ax_dev(dev);
+       struct phy_device *phy_dev = ax->phy_dev;
+       int status_change = 0;
 
-               ei_outb(memr | AX_MEMR_MDC, memr_addr);
-               udelay(1);
+       if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
+                            (ax->duplex != phy_dev->duplex))) {
 
-               /* clock low */
-               ei_outb(memr, memr_addr);
+               ax->speed = phy_dev->speed;
+               ax->duplex = phy_dev->duplex;
+               status_change = 1;
        }
 
-       /* leaves the clock line low, mdir input */
-       memr |= AX_MEMR_MDIR;
-       ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR);
-}
-
-/* ax_phy_ei_inbits
- *
- * read a specified number of bits from the phy
-*/
-
-static unsigned int
-ax_phy_ei_inbits(struct net_device *dev, int no)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
-       unsigned int memr;
-       unsigned int result = 0;
-
-       /* clock low, data to input mode */
-       memr = ei_inb(memr_addr);
-       memr &= ~AX_MEMR_MDC;
-       memr |= AX_MEMR_MDIR;
-       ei_outb(memr, memr_addr);
-
-       for (no--; no >= 0; no--) {
-               ei_outb(memr | AX_MEMR_MDC, memr_addr);
-
-               udelay(1);
-
-               if (ei_inb(memr_addr) & AX_MEMR_MDI)
-                       result |= (1<<no);
+       if (phy_dev->link != ax->link) {
+               if (!phy_dev->link) {
+                       ax->speed = 0;
+                       ax->duplex = -1;
+               }
+               ax->link = phy_dev->link;
 
-               ei_outb(memr, memr_addr);
+               status_change = 1;
        }
 
-       return result;
-}
-
-/* ax_phy_issueaddr
- *
- * use the low level bit shifting routines to send the address
- * and command to the specified phy
-*/
-
-static void
-ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
-{
-       if (phy_debug)
-               pr_debug("%s: dev %p, %04x, %04x, %d\n",
-                       __func__, dev, phy_addr, reg, opc);
-
-       ax_mii_ei_outbits(dev, 0x3f, 6);        /* pre-amble */
-       ax_mii_ei_outbits(dev, 1, 2);           /* frame-start */
-       ax_mii_ei_outbits(dev, opc, 2);         /* op code */
-       ax_mii_ei_outbits(dev, phy_addr, 5);    /* phy address */
-       ax_mii_ei_outbits(dev, reg, 5);         /* reg address */
+       if (status_change)
+               phy_print_status(phy_dev);
 }
 
-static int
-ax_phy_read(struct net_device *dev, int phy_addr, int reg)
+static int ax_mii_probe(struct net_device *dev)
 {
-       struct ei_device *ei_local = netdev_priv(dev);
-       unsigned long flags;
-       unsigned int result;
+       struct ax_device  *ax = to_ax_dev(dev);
+       struct phy_device *phy_dev = NULL;
+       int ret;
 
-       spin_lock_irqsave(&ei_local->page_lock, flags);
+       /* find the first phy */
+       phy_dev = phy_find_first(ax->mii_bus);
+       if (!phy_dev) {
+               netdev_err(dev, "no PHY found\n");
+               return -ENODEV;
+       }
 
-       ax_phy_issueaddr(dev, phy_addr, reg, 2);
+       ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+                                PHY_INTERFACE_MODE_MII);
+       if (ret) {
+               netdev_err(dev, "Could not attach to PHY\n");
+               return ret;
+       }
 
-       result = ax_phy_ei_inbits(dev, 17);
-       result &= ~(3<<16);
+       /* mask with MAC supported features */
+       phy_dev->supported &= PHY_BASIC_FEATURES;
+       phy_dev->advertising = phy_dev->supported;
 
-       spin_unlock_irqrestore(&ei_local->page_lock, flags);
+       ax->phy_dev = phy_dev;
 
-       if (phy_debug)
-               pr_debug("%s: %04x.%04x => read %04x\n", __func__,
-                        phy_addr, reg, result);
+       netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+                   phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
 
-       return result;
+       return 0;
 }
 
-static void
-ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
+static void ax_phy_switch(struct net_device *dev, int on)
 {
-       struct ei_device *ei = netdev_priv(dev);
-       struct ax_device  *ax = to_ax_dev(dev);
-       unsigned long flags;
-
-       dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
-               __func__, dev, phy_addr, reg, value);
-
-       spin_lock_irqsave(&ei->page_lock, flags);
-
-       ax_phy_issueaddr(dev, phy_addr, reg, 1);
-       ax_mii_ei_outbits(dev, 2, 2);           /* send TA */
-       ax_mii_ei_outbits(dev, value, 16);
-
-       spin_unlock_irqrestore(&ei->page_lock, flags);
-}
+       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
 
-static void ax_mii_expiry(unsigned long data)
-{
-       struct net_device *dev = (struct net_device *)data;
-       struct ax_device  *ax = to_ax_dev(dev);
-       unsigned long flags;
+       u8 reg_gpoc =  ax->plat->gpoc_val;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       mii_check_media(&ax->mii, netif_msg_link(ax), 0);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
+       if (!!on)
+               reg_gpoc &= ~AX_GPOC_PPDSET;
+       else
+               reg_gpoc |= AX_GPOC_PPDSET;
 
-       if (ax->running) {
-               ax->mii_timer.expires = jiffies + HZ*2;
-               add_timer(&ax->mii_timer);
-       }
+       ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
 }
 
 static int ax_open(struct net_device *dev)
 {
-       struct ax_device  *ax = to_ax_dev(dev);
-       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
        int ret;
 
-       dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
+       netdev_dbg(dev, "open\n");
 
        ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
                          dev->name, dev);
        if (ret)
-               return ret;
-
-       ret = ax_ei_open(dev);
-       if (ret) {
-               free_irq(dev->irq, dev);
-               return ret;
-       }
+               goto failed_request_irq;
 
        /* turn the phy on (if turned off) */
+       ax_phy_switch(dev, 1);
 
-       ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17));
-       ax->running = 1;
-
-       /* start the MII timer */
-
-       init_timer(&ax->mii_timer);
+       ret = ax_mii_probe(dev);
+       if (ret)
+               goto failed_mii_probe;
+       phy_start(ax->phy_dev);
 
-       ax->mii_timer.expires  = jiffies+1;
-       ax->mii_timer.data     = (unsigned long) dev;
-       ax->mii_timer.function = ax_mii_expiry;
+       ret = ax_ei_open(dev);
+       if (ret)
+               goto failed_ax_ei_open;
 
-       add_timer(&ax->mii_timer);
+       ax->running = 1;
 
        return 0;
+
+ failed_ax_ei_open:
+       phy_disconnect(ax->phy_dev);
+ failed_mii_probe:
+       ax_phy_switch(dev, 0);
+       free_irq(dev->irq, dev);
+ failed_request_irq:
+       return ret;
 }
 
 static int ax_close(struct net_device *dev)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       struct ei_device *ei_local = netdev_priv(dev);
 
-       dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
-
-       /* turn the phy off */
-
-       ei_outb(ax->plat->gpoc_val | (1<<6),
-              ei_local->mem + EI_SHIFT(0x17));
+       netdev_dbg(dev, "close\n");
 
        ax->running = 0;
        wmb();
 
-       del_timer_sync(&ax->mii_timer);
        ax_ei_close(dev);
 
+       /* turn the phy off */
+       ax_phy_switch(dev, 0);
+       phy_disconnect(ax->phy_dev);
+
        free_irq(dev->irq, dev);
        return 0;
 }
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
 static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       unsigned long flags;
-       int rc;
+       struct phy_device *phy_dev = ax->phy_dev;
 
        if (!netif_running(dev))
                return -EINVAL;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
+       if (!phy_dev)
+               return -ENODEV;
 
-       return rc;
+       return phy_mii_ioctl(phy_dev, req, cmd);
 }
 
 /* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 static void ax_get_drvinfo(struct net_device *dev,
                           struct ethtool_drvinfo *info)
 {
-       struct ax_device *ax = to_ax_dev(dev);
+       struct platform_device *pdev = to_platform_device(dev->dev.parent);
 
        strcpy(info->driver, DRV_NAME);
        strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, ax->dev->name);
+       strcpy(info->bus_info, pdev->name);
 }
 
 static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       unsigned long flags;
+       struct phy_device *phy_dev = ax->phy_dev;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       mii_ethtool_gset(&ax->mii, cmd);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
+       if (!phy_dev)
+               return -ENODEV;
 
-       return 0;
+       return phy_ethtool_gset(phy_dev, cmd);
 }
 
 static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct ax_device *ax = to_ax_dev(dev);
-       unsigned long flags;
-       int rc;
+       struct phy_device *phy_dev = ax->phy_dev;
 
-       spin_lock_irqsave(&ax->mii_lock, flags);
-       rc = mii_ethtool_sset(&ax->mii, cmd);
-       spin_unlock_irqrestore(&ax->mii_lock, flags);
-
-       return rc;
-}
-
-static int ax_nway_reset(struct net_device *dev)
-{
-       struct ax_device *ax = to_ax_dev(dev);
-       return mii_nway_restart(&ax->mii);
-}
+       if (!phy_dev)
+               return -ENODEV;
 
-static u32 ax_get_link(struct net_device *dev)
-{
-       struct ax_device *ax = to_ax_dev(dev);
-       return mii_link_ok(&ax->mii);
+       return phy_ethtool_sset(phy_dev, cmd);
 }
 
 static const struct ethtool_ops ax_ethtool_ops = {
        .get_drvinfo            = ax_get_drvinfo,
        .get_settings           = ax_get_settings,
        .set_settings           = ax_set_settings,
-       .nway_reset             = ax_nway_reset,
-       .get_link               = ax_get_link,
+       .get_link               = ethtool_op_get_link,
 };
 
 #ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
        .ndo_get_stats          = ax_ei_get_stats,
        .ndo_set_multicast_list = ax_ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ax_ei_poll,
 #endif
 };
 
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+       if (level)
+               ax->reg_memr |= AX_MEMR_MDC;
+       else
+               ax->reg_memr &= ~AX_MEMR_MDC;
+
+       ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+       if (output)
+               ax->reg_memr &= ~AX_MEMR_MDIR;
+       else
+               ax->reg_memr |= AX_MEMR_MDIR;
+
+       ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+       if (value)
+               ax->reg_memr |= AX_MEMR_MDO;
+       else
+               ax->reg_memr &= ~AX_MEMR_MDO;
+
+       ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+       struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+       int reg_memr = ei_inb(ax->addr_memr);
+
+       return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static struct mdiobb_ops bb_ops = {
+       .owner = THIS_MODULE,
+       .set_mdc = ax_bb_mdc,
+       .set_mdio_dir = ax_bb_dir,
+       .set_mdio_data = ax_bb_set_data,
+       .get_mdio_data = ax_bb_get_data,
+};
+
 /* setup code */
 
+static int ax_mii_init(struct net_device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev->dev.parent);
+       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
+       int err, i;
+
+       ax->bb_ctrl.ops = &bb_ops;
+       ax->addr_memr = ei_local->mem + AX_MEMR;
+       ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+       if (!ax->mii_bus) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       ax->mii_bus->name = "ax88796_mii_bus";
+       ax->mii_bus->parent = dev->dev.parent;
+       snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+       ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!ax->mii_bus->irq) {
+               err = -ENOMEM;
+               goto out_free_mdio_bitbang;
+       }
+
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               ax->mii_bus->irq[i] = PHY_POLL;
+
+       err = mdiobus_register(ax->mii_bus);
+       if (err)
+               goto out_free_irq;
+
+       return 0;
+
+ out_free_irq:
+       kfree(ax->mii_bus->irq);
+ out_free_mdio_bitbang:
+       free_mdio_bitbang(ax->mii_bus);
+ out:
+       return err;
+}
+
 static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
 {
        void __iomem *ioaddr = ei_local->mem;
        struct ax_device *ax = to_ax_dev(dev);
 
-       /* Select page 0*/
-       ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD);
+       /* Select page 0 */
+       ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
 
        /* set to byte access */
        ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
        ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
 }
 
-/* ax_init_dev
+/*
+ * ax_init_dev
  *
  * initialise the specified device, taking care to note the MAC
  * address it may already have (if configured), ensure
  * the device is ready to be used by lib8390.c and registerd with
  * the network layer.
  */
-
-static int ax_init_dev(struct net_device *dev, int first_init)
+static int ax_init_dev(struct net_device *dev)
 {
        struct ei_device *ei_local = netdev_priv(dev);
        struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
 
        /* read the mac from the card prom if we need it */
 
-       if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) {
+       if (ax->plat->flags & AXFLG_HAS_EEPROM) {
                unsigned char SA_prom[32];
 
-               for(i = 0; i < sizeof(SA_prom); i+=2) {
+               for (i = 0; i < sizeof(SA_prom); i += 2) {
                        SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
-                       SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT);
+                       SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
                }
 
                if (ax->plat->wordlength == 2)
                        for (i = 0; i < 16; i++)
                                SA_prom[i] = SA_prom[i+i];
 
-               memcpy(dev->dev_addr,  SA_prom, 6);
+               memcpy(dev->dev_addr, SA_prom, 6);
        }
 
 #ifdef CONFIG_AX88796_93CX6
-       if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) {
+       if (ax->plat->flags & AXFLG_HAS_93CX6) {
                unsigned char mac_addr[6];
                struct eeprom_93cx6 eeprom;
 
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
                                       (__le16 __force *)mac_addr,
                                       sizeof(mac_addr) >> 1);
 
-               memcpy(dev->dev_addr,  mac_addr, 6);
+               memcpy(dev->dev_addr, mac_addr, 6);
        }
 #endif
        if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
                stop_page = NE1SM_STOP_PG;
        }
 
-       /* load the mac-address from the device if this is the
-        * first time we've initialised */
-
-       if (first_init) {
-               if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
-                       ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
-                               ei_local->mem + E8390_CMD); /* 0x61 */
-                       for (i = 0; i < ETHER_ADDR_LEN; i++)
-                               dev->dev_addr[i] =
-                                       ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
-               }
-
-               if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
-                    ax->plat->mac_addr)
-                       memcpy(dev->dev_addr, ax->plat->mac_addr,
-                               ETHER_ADDR_LEN);
+       /* load the mac-address from the device */
+       if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+               ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+                       ei_local->mem + E8390_CMD); /* 0x61 */
+               for (i = 0; i < ETHER_ADDR_LEN; i++)
+                       dev->dev_addr[i] =
+                               ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
        }
 
+       if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
+           ax->plat->mac_addr)
+               memcpy(dev->dev_addr, ax->plat->mac_addr,
+                      ETHER_ADDR_LEN);
+
        ax_reset_8390(dev);
 
-       ei_status.name = "AX88796";
-       ei_status.tx_start_page = start_page;
-       ei_status.stop_page = stop_page;
-       ei_status.word16 = (ax->plat->wordlength == 2);
-       ei_status.rx_start_page = start_page + TX_PAGES;
+       ei_local->name = "AX88796";
+       ei_local->tx_start_page = start_page;
+       ei_local->stop_page = stop_page;
+       ei_local->word16 = (ax->plat->wordlength == 2);
+       ei_local->rx_start_page = start_page + TX_PAGES;
 
 #ifdef PACKETBUF_MEMSIZE
-        /* Allow the packet buffer size to be overridden by know-it-alls. */
-       ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+       /* Allow the packet buffer size to be overridden by know-it-alls. */
+       ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
 #endif
 
-       ei_status.reset_8390    = &ax_reset_8390;
-       ei_status.block_input   = &ax_block_input;
-       ei_status.block_output  = &ax_block_output;
-       ei_status.get_8390_hdr  = &ax_get_8390_hdr;
-       ei_status.priv = 0;
-
-       dev->netdev_ops         = &ax_netdev_ops;
-       dev->ethtool_ops        = &ax_ethtool_ops;
-
-       ax->msg_enable          = NETIF_MSG_LINK;
-       ax->mii.phy_id_mask     = 0x1f;
-       ax->mii.reg_num_mask    = 0x1f;
-       ax->mii.phy_id          = 0x10;         /* onboard phy */
-       ax->mii.force_media     = 0;
-       ax->mii.full_duplex     = 0;
-       ax->mii.mdio_read       = ax_phy_read;
-       ax->mii.mdio_write      = ax_phy_write;
-       ax->mii.dev             = dev;
+       ei_local->reset_8390 = &ax_reset_8390;
+       ei_local->block_input = &ax_block_input;
+       ei_local->block_output = &ax_block_output;
+       ei_local->get_8390_hdr = &ax_get_8390_hdr;
+       ei_local->priv = 0;
 
-       ax_NS8390_init(dev, 0);
+       dev->netdev_ops = &ax_netdev_ops;
+       dev->ethtool_ops = &ax_ethtool_ops;
 
-       if (first_init)
-               dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n",
-                        ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
-                        dev->dev_addr);
+       ret = ax_mii_init(dev);
+       if (ret)
+               goto out_irq;
+
+       ax_NS8390_init(dev, 0);
 
        ret = register_netdev(dev);
        if (ret)
                goto out_irq;
 
+       netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
+                   ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
+                   dev->dev_addr);
+
        return 0;
 
  out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
        return ret;
 }
 
-static int ax_remove(struct platform_device *_dev)
+static int ax_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = platform_get_drvdata(_dev);
-       struct ax_device  *ax;
-
-       ax = to_ax_dev(dev);
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct ei_device *ei_local = netdev_priv(dev);
+       struct ax_device *ax = to_ax_dev(dev);
+       struct resource *mem;
 
        unregister_netdev(dev);
        free_irq(dev->irq, dev);
 
-       iounmap(ei_status.mem);
-       release_resource(ax->mem);
-       kfree(ax->mem);
+       iounmap(ei_local->mem);
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
 
        if (ax->map2) {
                iounmap(ax->map2);
-               release_resource(ax->mem2);
-               kfree(ax->mem2);
+               mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               release_mem_region(mem->start, resource_size(mem));
        }
 
        free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
        return 0;
 }
 
-/* ax_probe
+/*
+ * ax_probe
  *
  * This is the entry point when the platform device system uses to
- * notify us of a new device to attach to. Allocate memory, find
- * the resources and information passed, and map the necessary registers.
-*/
-
+ * notify us of a new device to attach to. Allocate memory, find the
+ * resources and information passed, and map the necessary registers.
+ */
 static int ax_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
-       struct ax_device  *ax;
-       struct resource   *res;
-       size_t size;
+       struct ei_device *ei_local;
+       struct ax_device *ax;
+       struct resource *irq, *mem, *mem2;
+       resource_size_t mem_size, mem2_size = 0;
        int ret = 0;
 
        dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* ok, let's setup our device */
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       ei_local = netdev_priv(dev);
        ax = to_ax_dev(dev);
 
-       memset(ax, 0, sizeof(struct ax_device));
-
-       spin_lock_init(&ax->mii_lock);
-
-       ax->dev = pdev;
        ax->plat = pdev->dev.platform_data;
        platform_set_drvdata(pdev, dev);
 
-       ei_status.rxcr_base  = ax->plat->rcr_val;
+       ei_local->rxcr_base = ax->plat->rcr_val;
 
        /* find the platform resources */
-
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (res == NULL) {
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq) {
                dev_err(&pdev->dev, "no IRQ specified\n");
                ret = -ENXIO;
                goto exit_mem;
        }
 
-       dev->irq = res->start;
-       ax->irqflags = res->flags & IRQF_TRIGGER_MASK;
+       dev->irq = irq->start;
+       ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
                dev_err(&pdev->dev, "no MEM specified\n");
                ret = -ENXIO;
                goto exit_mem;
        }
 
-       size = (res->end - res->start) + 1;
-
-       /* setup the register offsets from either the platform data
-        * or by using the size of the resource provided */
+       mem_size = resource_size(mem);
 
+       /*
+        * setup the register offsets from either the platform data or
+        * by using the size of the resource provided
+        */
        if (ax->plat->reg_offsets)
-               ei_status.reg_offset = ax->plat->reg_offsets;
+               ei_local->reg_offset = ax->plat->reg_offsets;
        else {
-               ei_status.reg_offset = ax->reg_offsets;
+               ei_local->reg_offset = ax->reg_offsets;
                for (ret = 0; ret < 0x18; ret++)
-                       ax->reg_offsets[ret] = (size / 0x18) * ret;
+                       ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
        }
 
-       ax->mem = request_mem_region(res->start, size, pdev->name);
-       if (ax->mem == NULL) {
+       if (!request_mem_region(mem->start, mem_size, pdev->name)) {
                dev_err(&pdev->dev, "cannot reserve registers\n");
-               ret = -ENXIO;
+               ret = -ENXIO;
                goto exit_mem;
        }
 
-       ei_status.mem = ioremap(res->start, size);
-       dev->base_addr = (unsigned long)ei_status.mem;
+       ei_local->mem = ioremap(mem->start, mem_size);
+       dev->base_addr = (unsigned long)ei_local->mem;
 
-       if (ei_status.mem == NULL) {
-               dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n",
-                       (unsigned long long)res->start,
-                       (unsigned long long)res->end);
+       if (ei_local->mem == NULL) {
+               dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
 
-               ret = -ENXIO;
+               ret = -ENXIO;
                goto exit_req;
        }
 
        /* look for reset area */
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (res == NULL) {
+       mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!mem2) {
                if (!ax->plat->reg_offsets) {
                        for (ret = 0; ret < 0x20; ret++)
-                               ax->reg_offsets[ret] = (size / 0x20) * ret;
+                               ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
                }
-
-               ax->map2 = NULL;
        } else {
-               size = (res->end - res->start) + 1;
+               mem2_size = resource_size(mem2);
 
-               ax->mem2 = request_mem_region(res->start, size, pdev->name);
-               if (ax->mem2 == NULL) {
+               if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
                        dev_err(&pdev->dev, "cannot reserve registers\n");
                        ret = -ENXIO;
                        goto exit_mem1;
                }
 
-               ax->map2 = ioremap(res->start, size);
-               if (ax->map2 == NULL) {
+               ax->map2 = ioremap(mem2->start, mem2_size);
+               if (!ax->map2) {
                        dev_err(&pdev->dev, "cannot map reset register\n");
                        ret = -ENXIO;
                        goto exit_mem2;
                }
 
-               ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem;
+               ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
        }
 
        /* got resources, now initialise and register device */
-
-       ret = ax_init_dev(dev, 1);
+       ret = ax_init_dev(dev);
        if (!ret)
                return 0;
 
-       if (ax->map2 == NULL)
+       if (!ax->map2)
                goto exit_mem1;
 
        iounmap(ax->map2);
 
  exit_mem2:
-       release_resource(ax->mem2);
-       kfree(ax->mem2);
+       release_mem_region(mem2->start, mem2_size);
 
  exit_mem1:
-       iounmap(ei_status.mem);
+       iounmap(ei_local->mem);
 
  exit_req:
-       release_resource(ax->mem);
-       kfree(ax->mem);
+       release_mem_region(mem->start, mem_size);
 
  exit_mem:
        free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
 static int ax_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct net_device *ndev = platform_get_drvdata(dev);
-       struct ax_device  *ax = to_ax_dev(ndev);
+       struct ax_device *ax = to_ax_dev(ndev);
 
        ax->resume_open = ax->running;
 
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
 static int ax_resume(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
-       struct ax_device  *ax = to_ax_dev(ndev);
+       struct ax_device *ax = to_ax_dev(ndev);
 
        ax_initial_setup(ndev, netdev_priv(ndev));
        ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
 
 #else
 #define ax_suspend NULL
-#define ax_resume  NULL
+#define ax_resume NULL
 #endif
 
 static struct platform_driver axdrv = {
index add0b93350dd9fb14e73706cdf33c986f100c650..4ac0d72660fe9fca225a0a77b19b61001def4ae0 100644 (file)
@@ -220,9 +220,11 @@ struct be_rx_obj {
        struct be_rx_stats stats;
        u8 rss_id;
        bool rx_post_starved;   /* Zero rx frags have been posted to BE */
-       u16 last_frag_index;
-       u16 rsvd;
-       u32 cache_line_barrier[15];
+       u32 cache_line_barrier[16];
+};
+
+struct be_drv_stats {
+       u8 be_on_die_temperature;
 };
 
 struct be_vf_cfg {
@@ -234,6 +236,7 @@ struct be_vf_cfg {
 };
 
 #define BE_INVALID_PMAC_ID             0xffffffff
+
 struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
@@ -269,6 +272,7 @@ struct be_adapter {
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
        u8 msix_vec_next_idx;
+       struct be_drv_stats drv_stats;
 
        struct vlan_group *vlan_grp;
        u16 vlans_added;
@@ -281,6 +285,7 @@ struct be_adapter {
        struct be_dma_mem stats_cmd;
        /* Work queue used to perform periodic tasks like getting statistics */
        struct delayed_work work;
+       u16 work_counter;
 
        /* Ethtool knobs and info */
        bool rx_csum;           /* BE card must perform rx-checksumming */
@@ -298,7 +303,7 @@ struct be_adapter {
        u32 rx_fc;              /* Rx flow control */
        u32 tx_fc;              /* Tx flow control */
        bool ue_detected;
-       bool stats_ioctl_sent;
+       bool stats_cmd_sent;
        int link_speed;
        u8 port_type;
        u8 transceiver;
@@ -311,6 +316,8 @@ struct be_adapter {
        struct be_vf_cfg vf_cfg[BE_MAX_VF];
        u8 is_virtfn;
        u32 sli_family;
+       u8 hba_port_num;
+       u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +457,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
        mac[5] = (u8)(addr & 0xFF);
        mac[4] = (u8)((addr >> 8) & 0xFF);
        mac[3] = (u8)((addr >> 16) & 0xFF);
-       mac[2] = 0xC9;
-       mac[1] = 0x00;
-       mac[0] = 0x00;
+       /* Use the OUI from the current MAC address */
+       memcpy(mac, adapter->netdev->dev_addr, 3);
 }
 
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
index a179cc6d79f2e636aad7ac5ca4899855f8ec8de0..cc3a235475bc0a96ac52b048f615eb54821f1ce0 100644 (file)
 #include "be.h"
 #include "be_cmds.h"
 
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 32;
+
 static void be_mcc_notify(struct be_adapter *adapter)
 {
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
+       if (adapter->eeh_err) {
+               dev_info(&adapter->pdev->dev,
+                       "Error in Card Detected! Cannot issue commands\n");
+               return;
+       }
+
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        be_dws_le_to_cpu(&resp->hw_stats,
                                                sizeof(resp->hw_stats));
                        netdev_stats_update(adapter);
-                       adapter->stats_ioctl_sent = false;
+                       adapter->stats_cmd_sent = false;
                }
        } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
                   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 {
        if (evt->valid) {
                adapter->vlan_prio_bmap = evt->available_priority_bmap;
+               adapter->recommended_prio &= ~VLAN_PRIO_MASK;
                adapter->recommended_prio =
                        evt->reco_default_priority << VLAN_PRIO_SHIFT;
        }
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
        }
 }
 
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+               struct be_async_event_grp5_pvid_state *evt)
+{
+       if (evt->enabled)
+               adapter->pvid = evt->tag;
+       else
+               adapter->pvid = 0;
+}
+
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
                u32 trailer, struct be_mcc_compl *evt)
 {
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
                be_async_grp5_qos_speed_process(adapter,
                (struct be_async_event_grp5_qos_link_speed *)evt);
        break;
+       case ASYNC_EVENT_PVID_STATE:
+               be_async_grp5_pvid_state_process(adapter,
+               (struct be_async_event_grp5_pvid_state *)evt);
+       break;
        default:
                dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
                break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
        int i, num, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
+       if (adapter->eeh_err)
+               return -EIO;
+
        for (i = 0; i < mcc_timeout; i++) {
                num = be_process_mcc(adapter, &status);
                if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
        int msecs = 0;
        u32 ready;
 
+       if (adapter->eeh_err) {
+               dev_err(&adapter->pdev->dev,
+                       "Error detected in card.Cannot issue commands\n");
+               return -EIO;
+       }
+
        do {
                ready = ioread32(db);
                if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 
 /* Uses synchronous MCCQ */
 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-               u32 if_id, u32 *pmac_id)
+               u32 if_id, u32 *pmac_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
 
+       req->hdr.domain = domain;
        req->if_id = cpu_to_le32(if_id);
        memcpy(req->mac_address, mac_addr, ETH_ALEN);
 
@@ -634,7 +668,7 @@ err:
 }
 
 /* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
 
+       req->hdr.domain = dom;
        req->if_id = cpu_to_le32(if_id);
        req->pmac_id = cpu_to_le32(pmac_id);
 
@@ -691,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
 
        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
        if (lancer_chip(adapter)) {
-               req->hdr.version = 1;
+               req->hdr.version = 2;
                req->page_size = 1; /* 1 for 4K */
                AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
                                                                coalesce_wm);
@@ -827,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
                sizeof(*req));
 
+       if (lancer_chip(adapter)) {
+               req->hdr.version = 1;
+               AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
+                                       adapter->if_handle);
+       }
+
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -995,7 +1036,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 }
 
 /* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1057,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
 
+       req->hdr.domain = domain;
        req->interface_id = cpu_to_le32(interface_id);
 
        status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        struct be_sge *sge;
        int status = 0;
 
+       if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+               be_cmd_get_die_temperature(adapter);
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        sge->len = cpu_to_le32(nonemb_cmd->size);
 
        be_mcc_notify(adapter);
-       adapter->stats_ioctl_sent = true;
+       adapter->stats_cmd_sent = true;
 
 err:
        spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1148,44 @@ err:
        return status;
 }
 
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_get_cntl_addnl_attribs *req;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+                       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+               OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+       status = be_mcc_notify_wait(adapter);
+       if (!status) {
+               struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+                                               embedded_payload(wrb);
+               adapter->drv_stats.be_on_die_temperature =
+                                               resp->on_die_temperature;
+       }
+       /* If IOCTL fails once, do not bother issuing it again */
+       else
+               be_get_temp_freq = 0;
+
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 /* Uses Mbox */
 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
 {
@@ -1868,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
                        OPCODE_COMMON_SET_QOS, sizeof(*req));
 
        req->hdr.domain = domain;
-       req->valid_bits = BE_QOS_BITS_NIC;
-       req->max_bps_nic = bps;
+       req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+       req->max_bps_nic = cpu_to_le32(bps);
 
        status = be_mcc_notify_wait(adapter);
 
@@ -1877,3 +1960,57 @@ err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_cntl_attribs *req;
+       struct be_cmd_resp_cntl_attribs *resp;
+       struct be_sge *sge;
+       int status;
+       int payload_len = max(sizeof(*req), sizeof(*resp));
+       struct mgmt_controller_attrib *attribs;
+       struct be_dma_mem attribs_cmd;
+
+       memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+       attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+       attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+                                               &attribs_cmd.dma);
+       if (!attribs_cmd.va) {
+               dev_err(&adapter->pdev->dev,
+                               "Memory allocation failure\n");
+               return -ENOMEM;
+       }
+
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
+
+       wrb = wrb_from_mbox(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = attribs_cmd.va;
+       sge = nonembedded_sgl(wrb);
+
+       be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+                       OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                        OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+       sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+       sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+       sge->len = cpu_to_le32(attribs_cmd.size);
+
+       status = be_mbox_notify_wait(adapter);
+       if (!status) {
+               attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
+                                       sizeof(struct be_cmd_resp_hdr));
+               adapter->hba_port_num = attribs->hba_attribs.phy_port;
+       }
+
+err:
+       mutex_unlock(&adapter->mbox_lock);
+       pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+                                       attribs_cmd.dma);
+       return status;
+}
index 83d15c8a9fa30e1bf5e28a371c7cf7fade8e0580..b4ac3938b298a046269bddf5daa0bc784d7d3321 100644 (file)
@@ -88,6 +88,7 @@ struct be_mcc_compl {
 #define ASYNC_EVENT_CODE_GRP_5         0x5
 #define ASYNC_EVENT_QOS_SPEED          0x1
 #define ASYNC_EVENT_COS_PRIORITY       0x2
+#define ASYNC_EVENT_PVID_STATE         0x3
 struct be_async_event_trailer {
        u32 code;
 };
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
        struct be_async_event_trailer trailer;
 } __packed;
 
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+       u8 enabled;
+       u8 rsvd0;
+       u16 tag;
+       u32 event_tag;
+       u32 rsvd1;
+       struct be_async_event_trailer trailer;
+} __packed;
+
 struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_SET_QOS                          28
 #define OPCODE_COMMON_MCC_CREATE_EXT                   90
 #define OPCODE_COMMON_SEEPROM_READ                     30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES               32
 #define OPCODE_COMMON_NTWK_RX_FILTER                   34
 #define OPCODE_COMMON_GET_FW_VERSION                   35
 #define OPCODE_COMMON_SET_FLOW_CONTROL                 36
@@ -176,6 +190,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_BEACON_STATE                 70
 #define OPCODE_COMMON_READ_TRANSRECV_DATA              73
 #define OPCODE_COMMON_GET_PHY_DETAILS                  102
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES   121
 
 #define OPCODE_ETH_RSS_CONFIG                          1
 #define OPCODE_ETH_ACPI_CONFIG                         2
@@ -415,7 +430,7 @@ struct be_cmd_resp_mcc_create {
 /* Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field */
 struct amap_tx_context {
-       u8 rsvd0[16];           /* dword 0 */
+       u8 if_id[16];           /* dword 0 */
        u8 tx_ring_size[4];     /* dword 0 */
        u8 rsvd1[26];           /* dword 0 */
        u8 pci_func_id[8];      /* dword 1 */
@@ -503,7 +518,8 @@ enum be_if_flags {
        BE_IF_FLAGS_VLAN = 0x100,
        BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
        BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
-       BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
+       BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
+       BE_IF_FLAGS_MULTICAST = 0x1000
 };
 
 /* An RX interface is an object with one or more MAC addresses and
@@ -619,7 +635,10 @@ struct be_rxf_stats {
        u32 rx_drops_invalid_ring;      /* dword 145*/
        u32 forwarded_packets;  /* dword 146*/
        u32 rx_drops_mtu;       /* dword 147*/
-       u32 rsvd0[15];
+       u32 rsvd0[7];
+       u32 port0_jabber_events;
+       u32 port1_jabber_events;
+       u32 rsvd1[6];
 };
 
 struct be_erx_stats {
@@ -630,11 +649,16 @@ struct be_erx_stats {
        u32 debug_pmem_pbuf_dealloc;       /* dword 47*/
 };
 
+struct be_pmem_stats {
+       u32 eth_red_drops;
+       u32 rsvd[4];
+};
+
 struct be_hw_stats {
        struct be_rxf_stats rxf;
        u32 rsvd[48];
        struct be_erx_stats erx;
-       u32 rsvd1[6];
+       struct be_pmem_stats pmem;
 };
 
 struct be_cmd_req_get_stats {
@@ -647,6 +671,20 @@ struct be_cmd_resp_get_stats {
        struct be_hw_stats hw_stats;
 };
 
+struct be_cmd_req_get_cntl_addnl_attribs {
+       struct be_cmd_req_hdr hdr;
+       u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+       struct be_cmd_resp_hdr hdr;
+       u16 ipl_file_number;
+       u8 ipl_file_version;
+       u8 rsvd0;
+       u8 on_die_temperature; /* in degrees centigrade*/
+       u8 rsvd1[3];
+};
+
 struct be_cmd_req_vlan_config {
        struct be_cmd_req_hdr hdr;
        u8 interface_id;
@@ -994,17 +1032,29 @@ struct be_cmd_resp_set_qos {
        u32 rsvd;
 };
 
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+       struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+       struct be_cmd_resp_hdr hdr;
+       struct mgmt_controller_attrib attribs;
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
                        u8 type, bool permanent, u32 if_handle);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-                       u32 if_id, u32 *pmac_id);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
+                       u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+                       u32 pmac_id, u32 domain);
 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
                        u32 en_flags, u8 *mac, bool pmac_invalid,
                        u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+                       u32 domain);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
                        struct be_queue_info *eq, int eq_delay);
 extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1126,6 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
                struct be_dma_mem *cmd);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
 extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
 
index b4be0271efe06b88eed62358c56f3a0e9baeeaad..6e5e43380c2aa7b14fc54297d22aa33e137b3076 100644 (file)
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
        int offset;
 };
 
-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
+enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
+                       PMEMSTAT, DRVSTAT};
 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
                                        offsetof(_struct, field)
 #define NETSTAT_INFO(field)    #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
                                                field)
 #define ERXSTAT_INFO(field)    #field, ERXSTAT,\
                                        FIELDINFO(struct be_erx_stats, field)
+#define PMEMSTAT_INFO(field)   #field, PMEMSTAT,\
+                                       FIELDINFO(struct be_pmem_stats, field)
+#define        DRVSTAT_INFO(field)     #field, DRVSTAT,\
+                                       FIELDINFO(struct be_drv_stats, \
+                                               field)
 
 static const struct be_ethtool_stat et_stats[] = {
        {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
        {MISCSTAT_INFO(rx_drops_too_many_frags)},
        {MISCSTAT_INFO(rx_drops_invalid_ring)},
        {MISCSTAT_INFO(forwarded_packets)},
-       {MISCSTAT_INFO(rx_drops_mtu)}
+       {MISCSTAT_INFO(rx_drops_mtu)},
+       {MISCSTAT_INFO(port0_jabber_events)},
+       {MISCSTAT_INFO(port1_jabber_events)},
+       {PMEMSTAT_INFO(eth_red_drops)},
+       {DRVSTAT_INFO(be_on_die_temperature)}
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
        "MAC Loopback test",
        "PHY Loopback test",
        "External Loopback test",
-       "DDR DMA test"
+       "DDR DMA test",
        "Link test"
 };
 
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
                case MISCSTAT:
                        p = &hw_stats->rxf;
                        break;
+               case PMEMSTAT:
+                       p = &hw_stats->pmem;
+                       break;
+               case DRVSTAT:
+                       p = &adapter->drv_stats;
+                       break;
                }
 
                p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                }
 
                phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-               phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
-                                       &phy_cmd.dma);
+               phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+                                               phy_cmd.size, &phy_cmd.dma,
+                                               GFP_KERNEL);
                if (!phy_cmd.va) {
                        dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                        return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                adapter->port_type = ecmd->port;
                adapter->transceiver = ecmd->transceiver;
                adapter->autoneg = ecmd->autoneg;
-               pci_free_consistent(adapter->pdev, phy_cmd.size,
-                                       phy_cmd.va, phy_cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
+                                 phy_cmd.dma);
        } else {
                ecmd->speed = adapter->link_speed;
                ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
        int status;
        u32 cur;
 
-       be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
+       be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
 
        if (cur == BEACON_STATE_ENABLED)
                return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
        if (data < 2)
                data = 2;
 
-       status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+       status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
                        BEACON_STATE_ENABLED);
        set_current_state(TASK_INTERRUPTIBLE);
        schedule_timeout(data*HZ);
 
-       status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+       status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
                        BEACON_STATE_DISABLED);
 
        return status;
 }
 
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+       if (!be_physfn(adapter))
+               return false;
+       else
+               return true;
+}
+
 static void
 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       wol->supported = WAKE_MAGIC;
+       if (be_is_wol_supported(adapter))
+               wol->supported = WAKE_MAGIC;
+
        if (adapter->wol)
                wol->wolopts = WAKE_MAGIC;
        else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
 
-       if (wol->wolopts & WAKE_MAGIC)
+       if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
                adapter->wol = true;
        else
                adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
        };
 
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
-       ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
-                                       &ddrdma_cmd.dma);
+       ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+                                          &ddrdma_cmd.dma, GFP_KERNEL);
        if (!ddrdma_cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
        }
 
 err:
-       pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
-                       ddrdma_cmd.va, ddrdma_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+                         ddrdma_cmd.dma);
        return ret;
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
                                u64 *status)
 {
-       be_cmd_set_loopback(adapter, adapter->port_num,
+       be_cmd_set_loopback(adapter, adapter->hba_port_num,
                                loopback_type, 1);
-       *status = be_cmd_loopback_test(adapter, adapter->port_num,
+       *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
                                loopback_type, 1500,
                                2, 0xabc);
-       be_cmd_set_loopback(adapter, adapter->port_num,
+       be_cmd_set_loopback(adapter, adapter->hba_port_num,
                                BE_NO_LOOPBACK, 1);
        return *status;
 }
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
                                &qos_link_speed) != 0) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = -1;
-       } else if (mac_speed) {
+       } else if (!mac_speed) {
+               test->flags |= ETH_TEST_FL_FAILED;
                data[4] = 1;
        }
 }
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
        eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
-       eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
-                               &eeprom_cmd.dma);
+       eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+                                          &eeprom_cmd.dma, GFP_KERNEL);
 
        if (!eeprom_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
                resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
                memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
        }
-       pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
-                       eeprom_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+                         eeprom_cmd.dma);
 
        return status;
 }
index 4096d9778234384d5b1237d3ae82f4e3dee9f67b..dbe67f353e8f3c2d22e074578237539bbef24d1e 100644 (file)
 #define POST_STAGE_BE_RESET            0x3 /* Host wants to reset chip */
 #define POST_STAGE_ARMFW_RDY           0xc000  /* FW is done with POST */
 
+
+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+#define SLIPORT_STATUS_OFFSET          0x404
+#define SLIPORT_CONTROL_OFFSET         0x408
+
+#define SLIPORT_STATUS_ERR_MASK                0x80000000
+#define SLIPORT_STATUS_RN_MASK         0x01000000
+#define SLIPORT_STATUS_RDY_MASK                0x00800000
+
+
+#define SLI_PORT_CONTROL_IP_MASK       0x08000000
+
 /********* Memory BAR register ************/
 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET     0xfc
 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -327,6 +339,53 @@ struct be_eth_rx_compl {
        u32 dw[4];
 };
 
+struct mgmt_hba_attribs {
+       u8 flashrom_version_string[32];
+       u8 manufacturer_name[32];
+       u32 supported_modes;
+       u32 rsvd0[3];
+       u8 ncsi_ver_string[12];
+       u32 default_extended_timeout;
+       u8 controller_model_number[32];
+       u8 controller_description[64];
+       u8 controller_serial_number[32];
+       u8 ip_version_string[32];
+       u8 firmware_version_string[32];
+       u8 bios_version_string[32];
+       u8 redboot_version_string[32];
+       u8 driver_version_string[32];
+       u8 fw_on_flash_version_string[32];
+       u32 functionalities_supported;
+       u16 max_cdblength;
+       u8 asic_revision;
+       u8 generational_guid[16];
+       u8 hba_port_count;
+       u16 default_link_down_timeout;
+       u8 iscsi_ver_min_max;
+       u8 multifunction_device;
+       u8 cache_valid;
+       u8 hba_status;
+       u8 max_domains_supported;
+       u8 phy_port;
+       u32 firmware_post_status;
+       u32 hba_mtu[8];
+       u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+       struct mgmt_hba_attribs hba_attribs;
+       u16 pci_vendor_id;
+       u16 pci_device_id;
+       u16 pci_sub_vendor_id;
+       u16 pci_sub_system_id;
+       u8 pci_bus_number;
+       u8 pci_device_number;
+       u8 pci_function_number;
+       u8 interface_type;
+       u64 unique_identifier;
+       u32 rsvd0[5];
+};
+
 struct controller_id {
        u32 vendor;
        u32 device;
index 28a32a6c8bf135fe043a607379a65c9b60b599a0..68f107817326790f2d9e662199ffa6a6843e4103 100644 (file)
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 {
        struct be_dma_mem *mem = &q->dma_mem;
        if (mem->va)
-               pci_free_consistent(adapter->pdev, mem->size,
-                       mem->va, mem->dma);
+               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+                                 mem->dma);
 }
 
 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
        q->len = len;
        q->entry_size = entry_size;
        mem->size = len * entry_size;
-       mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
+       mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+                                    GFP_KERNEL);
        if (!mem->va)
                return -1;
        memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (!be_physfn(adapter))
                goto netdev_addr;
 
-       status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
+       status = be_cmd_pmac_del(adapter, adapter->if_handle,
+                               adapter->pmac_id, 0);
        if (status)
                return status;
 
        status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
-                       adapter->if_handle, &adapter->pmac_id);
+                               adapter->if_handle, &adapter->pmac_id, 0);
 netdev_addr:
        if (!status)
                memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -484,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
 }
 
-static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
                bool unmap_single)
 {
        dma_addr_t dma;
@@ -494,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
        dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
        if (wrb->frag_len) {
                if (unmap_single)
-                       pci_unmap_single(pdev, dma, wrb->frag_len,
-                               PCI_DMA_TODEVICE);
+                       dma_unmap_single(dev, dma, wrb->frag_len,
+                                        DMA_TO_DEVICE);
                else
-                       pci_unmap_page(pdev, dma, wrb->frag_len,
-                               PCI_DMA_TODEVICE);
+                       dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
        }
 }
 
@@ -507,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
 {
        dma_addr_t busaddr;
        int i, copied = 0;
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = &adapter->pdev->dev;
        struct sk_buff *first_skb = skb;
        struct be_queue_info *txq = &adapter->tx_obj.q;
        struct be_eth_wrb *wrb;
@@ -521,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
 
        if (skb->len > skb->data_len) {
                int len = skb_headlen(skb);
-               busaddr = pci_map_single(pdev, skb->data, len,
-                                        PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(pdev, busaddr))
+               busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, busaddr))
                        goto dma_err;
                map_single = true;
                wrb = queue_head_node(txq);
@@ -536,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                struct skb_frag_struct *frag =
                        &skb_shinfo(skb)->frags[i];
-               busaddr = pci_map_page(pdev, frag->page,
-                                      frag->page_offset,
-                                      frag->size, PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(pdev, busaddr))
+               busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+                                      frag->size, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, busaddr))
                        goto dma_err;
                wrb = queue_head_node(txq);
                wrb_fill(wrb, busaddr, frag->size);
@@ -563,7 +562,7 @@ dma_err:
        txq->head = map_head;
        while (copied) {
                wrb = queue_head_node(txq);
-               unmap_tx_frag(pdev, wrb, map_single);
+               unmap_tx_frag(dev, wrb, map_single);
                map_single = false;
                copied -= wrb->frag_len;
                queue_head_inc(txq);
@@ -743,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
                status = be_cmd_pmac_del(adapter,
                                        adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id);
+                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 
        status = be_cmd_pmac_add(adapter, mac,
                                adapter->vf_cfg[vf].vf_if_handle,
-                               &adapter->vf_cfg[vf].vf_pmac_id);
+                               &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -822,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
                rate = 10000;
 
        adapter->vf_cfg[vf].vf_tx_rate = rate;
-       status = be_cmd_set_qos(adapter, rate / 10, vf);
+       status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
        if (status)
                dev_info(&adapter->pdev->dev,
@@ -866,14 +865,17 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
 
 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
 {
-       u8 l4_cksm, ipv6, ipcksm;
+       u8 l4_cksm, ipv6, ipcksm, tcpf, udpf;
 
        l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
        ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
        ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
+       tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
+       udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
 
-       /* Ignore ipcksm for ipv6 pkts */
-       return l4_cksm && (ipcksm || ipv6);
+       /* L4 checksum is not reliable for non TCP/UDP packets.
+        * Also ignore ipcksm for ipv6 pkts */
+       return (tcpf || udpf) && l4_cksm && (ipcksm || ipv6);
 }
 
 static struct be_rx_page_info *
@@ -888,8 +890,9 @@ get_rx_page_info(struct be_adapter *adapter,
        BUG_ON(!rx_page_info->page);
 
        if (rx_page_info->last_page_user) {
-               pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
-                       adapter->big_page_size, PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&adapter->pdev->dev,
+                              dma_unmap_addr(rx_page_info, bus),
+                              adapter->big_page_size, DMA_FROM_DEVICE);
                rx_page_info->last_page_user = false;
        }
 
@@ -909,17 +912,11 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
 
-        /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
-       if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
-
-               rxo->last_frag_index = rxq_idx;
-
-               for (i = 0; i < num_rcvd; i++) {
-                       page_info = get_rx_page_info(adapter, rxo, rxq_idx);
-                       put_page(page_info->page);
-                       memset(page_info, 0, sizeof(*page_info));
-                       index_inc(&rxq_idx, rxq->len);
-               }
+       for (i = 0; i < num_rcvd; i++) {
+               page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+               put_page(page_info->page);
+               memset(page_info, 0, sizeof(*page_info));
+               index_inc(&rxq_idx, rxq->len);
        }
 }
 
@@ -1047,6 +1044,9 @@ static void be_rx_compl_process(struct be_adapter *adapter,
        if ((adapter->function_mode & 0x400) && !vtm)
                vlanf = 0;
 
+       if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
+               vlanf = 0;
+
        if (unlikely(vlanf)) {
                if (!adapter->vlan_grp || adapter->vlans_added == 0) {
                        kfree_skb(skb);
@@ -1087,6 +1087,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        if ((adapter->function_mode & 0x400) && !vtm)
                vlanf = 0;
 
+       if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
+               vlanf = 0;
+
        skb = napi_get_frags(&eq_obj->napi);
        if (!skb) {
                be_rx_compl_discard(adapter, rxo, rxcp);
@@ -1163,20 +1166,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
        rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
 }
 
-static inline struct page *be_alloc_pages(u32 size)
+static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
 {
-       gfp_t alloc_flags = GFP_ATOMIC;
        u32 order = get_order(size);
+
        if (order > 0)
-               alloc_flags |= __GFP_COMP;
-       return  alloc_pages(alloc_flags, order);
+               gfp |= __GFP_COMP;
+       return  alloc_pages(gfp, order);
 }
 
 /*
  * Allocate a page, split it to fragments of size rx_frag_size and post as
  * receive buffers to BE
  */
-static void be_post_rx_frags(struct be_rx_obj *rxo)
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
 {
        struct be_adapter *adapter = rxo->adapter;
        struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1190,14 +1193,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
        page_info = &rxo->page_info_tbl[rxq->head];
        for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
                if (!pagep) {
-                       pagep = be_alloc_pages(adapter->big_page_size);
+                       pagep = be_alloc_pages(adapter->big_page_size, gfp);
                        if (unlikely(!pagep)) {
                                rxo->stats.rx_post_fail++;
                                break;
                        }
-                       page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
-                                               adapter->big_page_size,
-                                               PCI_DMA_FROMDEVICE);
+                       page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+                                                   0, adapter->big_page_size,
+                                                   DMA_FROM_DEVICE);
                        page_info->page_offset = 0;
                } else {
                        get_page(pagep);
@@ -1270,8 +1273,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
        do {
                cur_index = txq->tail;
                wrb = queue_tail_node(txq);
-               unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
-                                       skb_headlen(sent_skb)));
+               unmap_tx_frag(&adapter->pdev->dev, wrb,
+                             (unmap_skb_hdr && skb_headlen(sent_skb)));
                unmap_skb_hdr = false;
 
                num_wrbs++;
@@ -1573,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
        adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
        for_all_rx_queues(adapter, rxo, i) {
                rxo->adapter = adapter;
-               /* Init last_frag_index so that the frag index in the first
-                * completion will never match */
-               rxo->last_frag_index = 0xffff;
                rxo->rx_eq.max_eqd = BE_MAX_EQD;
                rxo->rx_eq.enable_aic = true;
 
@@ -1716,7 +1716,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
        struct be_queue_info *rx_cq = &rxo->cq;
        struct be_eth_rx_compl *rxcp;
        u32 work_done;
-       u16 frag_index, num_rcvd;
+       u16 num_rcvd;
        u8 err;
 
        rxo->stats.rx_polls++;
@@ -1726,16 +1726,10 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
                        break;
 
                err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
-               frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
-                                                               rxcp);
                num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
                                                                rxcp);
-
-               /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
-               if (likely(frag_index != rxo->last_frag_index &&
-                               num_rcvd != 0)) {
-                       rxo->last_frag_index = frag_index;
-
+               /* Ignore flush completions */
+               if (num_rcvd) {
                        if (do_gro(rxo, rxcp, err))
                                be_rx_compl_process_gro(adapter, rxo, rxcp);
                        else
@@ -1747,7 +1741,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
 
        /* Refill the queue */
        if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
-               be_post_rx_frags(rxo);
+               be_post_rx_frags(rxo, GFP_ATOMIC);
 
        /* All consumed */
        if (work_done < budget) {
@@ -1827,6 +1821,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
 
        if (ue_status_lo || ue_status_hi) {
                adapter->ue_detected = true;
+               adapter->eeh_err = true;
                dev_err(&adapter->pdev->dev, "UE Detected!!\n");
        }
 
@@ -1865,10 +1860,14 @@ static void be_worker(struct work_struct *work)
                        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
                        be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
                }
+
+               if (!adapter->ue_detected && !lancer_chip(adapter))
+                       be_detect_dump_ue(adapter);
+
                goto reschedule;
        }
 
-       if (!adapter->stats_ioctl_sent)
+       if (!adapter->stats_cmd_sent)
                be_cmd_get_stats(adapter, &adapter->stats_cmd);
 
        be_tx_rate_update(adapter);
@@ -1879,7 +1878,7 @@ static void be_worker(struct work_struct *work)
 
                if (rxo->rx_post_starved) {
                        rxo->rx_post_starved = false;
-                       be_post_rx_frags(rxo);
+                       be_post_rx_frags(rxo, GFP_KERNEL);
                }
        }
        if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2083,13 +2082,24 @@ static int be_close(struct net_device *netdev)
 
        be_async_mcc_disable(adapter);
 
-       netif_stop_queue(netdev);
        netif_carrier_off(netdev);
        adapter->link_up = false;
 
        if (!lancer_chip(adapter))
                be_intr_set(adapter, false);
 
+       for_all_rx_queues(adapter, rxo, i)
+               napi_disable(&rxo->rx_eq.napi);
+
+       napi_disable(&tx_eq->napi);
+
+       if (lancer_chip(adapter)) {
+               be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
+               be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+               for_all_rx_queues(adapter, rxo, i)
+                        be_cq_notify(adapter, rxo->cq.id, false, 0);
+       }
+
        if (adapter->msix_enabled) {
                vec = be_msix_vec_get(adapter, tx_eq);
                synchronize_irq(vec);
@@ -2103,11 +2113,6 @@ static int be_close(struct net_device *netdev)
        }
        be_irq_unregister(adapter);
 
-       for_all_rx_queues(adapter, rxo, i)
-               napi_disable(&rxo->rx_eq.napi);
-
-       napi_disable(&tx_eq->napi);
-
        /* Wait for all pending tx completions to arrive so that
         * all tx skbs are freed.
         */
@@ -2127,7 +2132,7 @@ static int be_open(struct net_device *netdev)
        u16 link_speed;
 
        for_all_rx_queues(adapter, rxo, i) {
-               be_post_rx_frags(rxo);
+               be_post_rx_frags(rxo, GFP_KERNEL);
                napi_enable(&rxo->rx_eq.napi);
        }
        napi_enable(&tx_eq->napi);
@@ -2179,7 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        memset(mac, 0, ETH_ALEN);
 
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                   GFP_KERNEL);
        if (cmd.va == NULL)
                return -1;
        memset(cmd.va, 0, cmd.size);
@@ -2190,8 +2196,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Could not enable Wake-on-lan\n");
-                       pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
-                                       cmd.dma);
+                       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                         cmd.dma);
                        return status;
                }
                status = be_cmd_enable_magic_wol(adapter,
@@ -2204,7 +2210,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
        }
 
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -2225,7 +2231,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
        for (vf = 0; vf < num_vfs; vf++) {
                status = be_cmd_pmac_add(adapter, mac,
                                        adapter->vf_cfg[vf].vf_if_handle,
-                                       &adapter->vf_cfg[vf].vf_pmac_id);
+                                       &adapter->vf_cfg[vf].vf_pmac_id,
+                                       vf + 1);
                if (status)
                        dev_err(&adapter->pdev->dev,
                                "Mac address add failed for VF %d\n", vf);
@@ -2245,7 +2252,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
                if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
                        be_cmd_pmac_del(adapter,
                                        adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id);
+                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
        }
 }
 
@@ -2256,7 +2263,9 @@ static int be_setup(struct be_adapter *adapter)
        int status;
        u8 mac[ETH_ALEN];
 
-       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
+       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+                               BE_IF_FLAGS_BROADCAST |
+                               BE_IF_FLAGS_MULTICAST;
 
        if (be_physfn(adapter)) {
                cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2277,22 +2286,26 @@ static int be_setup(struct be_adapter *adapter)
                goto do_none;
 
        if (be_physfn(adapter)) {
-               while (vf < num_vfs) {
-                       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
-                                       | BE_IF_FLAGS_BROADCAST;
-                       status = be_cmd_if_create(adapter, cap_flags, en_flags,
-                                       mac, true,
+               if (adapter->sriov_enabled) {
+                       while (vf < num_vfs) {
+                               cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+                                                       BE_IF_FLAGS_BROADCAST;
+                               status = be_cmd_if_create(adapter, cap_flags,
+                                       en_flags, mac, true,
                                        &adapter->vf_cfg[vf].vf_if_handle,
                                        NULL, vf+1);
-                       if (status) {
-                               dev_err(&adapter->pdev->dev,
-                               "Interface Create failed for VF %d\n", vf);
-                               goto if_destroy;
+                               if (status) {
+                                       dev_err(&adapter->pdev->dev,
+                                       "Interface Create failed for VF %d\n",
+                                       vf);
+                                       goto if_destroy;
+                               }
+                               adapter->vf_cfg[vf].vf_pmac_id =
+                                                       BE_INVALID_PMAC_ID;
+                               vf++;
                        }
-                       adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
-                       vf++;
                }
-       } else if (!be_physfn(adapter)) {
+       } else {
                status = be_cmd_mac_addr_query(adapter, mac,
                        MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
                if (!status) {
@@ -2313,44 +2326,46 @@ static int be_setup(struct be_adapter *adapter)
        if (status != 0)
                goto rx_qs_destroy;
 
-       if (be_physfn(adapter)) {
-               status = be_vf_eth_addr_config(adapter);
-               if (status)
-                       goto mcc_q_destroy;
-       }
-
        adapter->link_speed = -1;
 
        return 0;
 
-mcc_q_destroy:
-       if (be_physfn(adapter))
-               be_vf_eth_addr_rem(adapter);
        be_mcc_queues_destroy(adapter);
 rx_qs_destroy:
        be_rx_queues_destroy(adapter);
 tx_qs_destroy:
        be_tx_queues_destroy(adapter);
 if_destroy:
-       for (vf = 0; vf < num_vfs; vf++)
-               if (adapter->vf_cfg[vf].vf_if_handle)
-                       be_cmd_if_destroy(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle);
-       be_cmd_if_destroy(adapter, adapter->if_handle);
+       if (be_physfn(adapter) && adapter->sriov_enabled)
+               for (vf = 0; vf < num_vfs; vf++)
+                       if (adapter->vf_cfg[vf].vf_if_handle)
+                               be_cmd_if_destroy(adapter,
+                                       adapter->vf_cfg[vf].vf_if_handle,
+                                       vf + 1);
+       be_cmd_if_destroy(adapter, adapter->if_handle, 0);
 do_none:
        return status;
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-       if (be_physfn(adapter))
+       int vf;
+
+       if (be_physfn(adapter) && adapter->sriov_enabled)
                be_vf_eth_addr_rem(adapter);
 
        be_mcc_queues_destroy(adapter);
        be_rx_queues_destroy(adapter);
        be_tx_queues_destroy(adapter);
 
-       be_cmd_if_destroy(adapter, adapter->if_handle);
+       if (be_physfn(adapter) && adapter->sriov_enabled)
+               for (vf = 0; vf < num_vfs; vf++)
+                       if (adapter->vf_cfg[vf].vf_if_handle)
+                               be_cmd_if_destroy(adapter,
+                                       adapter->vf_cfg[vf].vf_if_handle,
+                                       vf + 1);
+
+       be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
        /* tell fw we're done with firing cmds */
        be_cmd_fw_clean(adapter);
@@ -2453,8 +2468,8 @@ static int be_flash_data(struct be_adapter *adapter,
                        continue;
                if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
                        (!be_flash_redboot(adapter, fw->data,
-                        pflashcomp[i].offset, pflashcomp[i].size,
-                        filehdr_size)))
+                       pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+                       (num_of_images * sizeof(struct image_hdr)))))
                        continue;
                p = fw->data;
                p += filehdr_size + pflashcomp[i].offset
@@ -2528,8 +2543,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
        dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
 
        flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
-       flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
-                                       &flash_cmd.dma);
+       flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+                                         &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va) {
                status = -ENOMEM;
                dev_err(&adapter->pdev->dev,
@@ -2558,8 +2573,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
                status = -1;
        }
 
-       pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
-                               flash_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+                         flash_cmd.dma);
        if (status) {
                dev_err(&adapter->pdev->dev, "Firmware load error\n");
                goto fw_exit;
@@ -2700,13 +2715,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
        be_unmap_pci_bars(adapter);
 
        if (mem->va)
-               pci_free_consistent(adapter->pdev, mem->size,
-                       mem->va, mem->dma);
+               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+                                 mem->dma);
 
        mem = &adapter->mc_cmd_mem;
        if (mem->va)
-               pci_free_consistent(adapter->pdev, mem->size,
-                       mem->va, mem->dma);
+               dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+                                 mem->dma);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -2721,8 +2736,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
                goto done;
 
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
-                               mbox_mem_alloc->size, &mbox_mem_alloc->dma);
+       mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+                                               mbox_mem_alloc->size,
+                                               &mbox_mem_alloc->dma,
+                                               GFP_KERNEL);
        if (!mbox_mem_alloc->va) {
                status = -ENOMEM;
                goto unmap_pci_bars;
@@ -2734,8 +2751,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
        memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 
        mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
-       mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
-                       &mc_cmd_mem->dma);
+       mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
+                                           mc_cmd_mem->size, &mc_cmd_mem->dma,
+                                           GFP_KERNEL);
        if (mc_cmd_mem->va == NULL) {
                status = -ENOMEM;
                goto free_mbox;
@@ -2751,8 +2769,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
        return 0;
 
 free_mbox:
-       pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
-               mbox_mem_alloc->va, mbox_mem_alloc->dma);
+       dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+                         mbox_mem_alloc->va, mbox_mem_alloc->dma);
 
 unmap_pci_bars:
        be_unmap_pci_bars(adapter);
@@ -2766,8 +2784,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
        struct be_dma_mem *cmd = &adapter->stats_cmd;
 
        if (cmd->va)
-               pci_free_consistent(adapter->pdev, cmd->size,
-                       cmd->va, cmd->dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd->size,
+                                 cmd->va, cmd->dma);
 }
 
 static int be_stats_init(struct be_adapter *adapter)
@@ -2775,7 +2793,8 @@ static int be_stats_init(struct be_adapter *adapter)
        struct be_dma_mem *cmd = &adapter->stats_cmd;
 
        cmd->size = sizeof(struct be_cmd_req_get_stats);
-       cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
+       cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+                                    GFP_KERNEL);
        if (cmd->va == NULL)
                return -1;
        memset(cmd->va, 0, cmd->size);
@@ -2845,6 +2864,10 @@ static int be_get_config(struct be_adapter *adapter)
        else
                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
 
+       status = be_cmd_get_cntl_attributes(adapter);
+       if (status)
+               return status;
+
        return 0;
 }
 
@@ -2886,6 +2909,54 @@ static int be_dev_family_check(struct be_adapter *adapter)
        return 0;
 }
 
+static int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 500
+       u32 sliport_status;
+       int status = 0, i;
+
+       for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+               sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+               if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+                       break;
+
+               msleep(20);
+       }
+
+       if (i == SLIPORT_READY_TIMEOUT)
+               status = -1;
+
+       return status;
+}
+
+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+       int status;
+       u32 sliport_status, err, reset_needed;
+       status = lancer_wait_ready(adapter);
+       if (!status) {
+               sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+               err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+               reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+               if (err && reset_needed) {
+                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
+                                       adapter->db + SLIPORT_CONTROL_OFFSET);
+
+                       /* check adapter has corrected the error */
+                       status = lancer_wait_ready(adapter);
+                       sliport_status = ioread32(adapter->db +
+                                                       SLIPORT_STATUS_OFFSET);
+                       sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+                                               SLIPORT_STATUS_RN_MASK);
+                       if (status || sliport_status)
+                               status = -1;
+               } else if (err || reset_needed) {
+                       status = -1;
+               }
+       }
+       return status;
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -2918,11 +2989,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
        adapter->netdev = netdev;
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
-               status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (status) {
                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
                        goto free_netdev;
@@ -2935,6 +3006,14 @@ static int __devinit be_probe(struct pci_dev *pdev,
        if (status)
                goto free_netdev;
 
+       if (lancer_chip(adapter)) {
+               status = lancer_test_and_set_rdy_state(adapter);
+               if (status) {
+                       dev_err(&pdev->dev, "Adapter in non recoverable error\n");
+                       goto free_netdev;
+               }
+       }
+
        /* sync up with fw's ready state */
        if (be_physfn(adapter)) {
                status = be_cmd_POST(adapter);
@@ -2947,11 +3026,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
        if (status)
                goto ctrl_clean;
 
-       if (be_physfn(adapter)) {
-               status = be_cmd_reset_function(adapter);
-               if (status)
-                       goto ctrl_clean;
-       }
+       status = be_cmd_reset_function(adapter);
+       if (status)
+               goto ctrl_clean;
 
        status = be_stats_init(adapter);
        if (status)
@@ -2975,10 +3052,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
                goto unsetup;
        netif_carrier_off(netdev);
 
+       if (be_physfn(adapter) && adapter->sriov_enabled) {
+               status = be_vf_eth_addr_config(adapter);
+               if (status)
+                       goto unreg_netdev;
+       }
+
        dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 
+unreg_netdev:
+       unregister_netdev(netdev);
 unsetup:
        be_clear(adapter);
 msix_disable:
@@ -3005,6 +3090,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
 
+       cancel_delayed_work_sync(&adapter->work);
        if (adapter->wol)
                be_setup_wol(adapter, true);
 
@@ -3017,6 +3103,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
        be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
        be_clear(adapter);
 
+       be_msix_disable(adapter);
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3038,6 +3125,7 @@ static int be_resume(struct pci_dev *pdev)
        pci_set_power_state(pdev, 0);
        pci_restore_state(pdev);
 
+       be_msix_enable(adapter);
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
@@ -3053,6 +3141,8 @@ static int be_resume(struct pci_dev *pdev)
 
        if (adapter->wol)
                be_setup_wol(adapter, false);
+
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
        return 0;
 }
 
@@ -3064,6 +3154,9 @@ static void be_shutdown(struct pci_dev *pdev)
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
 
+       if (netif_running(netdev))
+               cancel_delayed_work_sync(&adapter->work);
+
        netif_device_detach(netdev);
 
        be_cmd_reset_function(adapter);
index fad912656fe4007f0c0a27edc480749bec485d17..9f356d5d0f3318c42630cd0ce2b479e73a28b75b 100644 (file)
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
                }
                unmap_array[unmap_cons].skb = NULL;
 
-               pci_unmap_single(bnad->pcidev,
-                                pci_unmap_addr(&unmap_array[unmap_cons],
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
                                                dma_addr), skb_headlen(skb),
-                                               PCI_DMA_TODEVICE);
+                                               DMA_TO_DEVICE);
 
-               pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+               dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
                if (++unmap_cons >= unmap_q->q_depth)
                        break;
 
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       pci_unmap_page(bnad->pcidev,
-                                      pci_unmap_addr(&unmap_array[unmap_cons],
+                       dma_unmap_page(&bnad->pcidev->dev,
+                                      dma_unmap_addr(&unmap_array[unmap_cons],
                                                      dma_addr),
                                       skb_shinfo(skb)->frags[i].size,
-                                      PCI_DMA_TODEVICE);
-                       pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+                                      DMA_TO_DEVICE);
+                       dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
                                           0);
                        if (++unmap_cons >= unmap_q->q_depth)
                                break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
                sent_bytes += skb->len;
                wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
 
-               pci_unmap_single(bnad->pcidev,
-                                pci_unmap_addr(&unmap_array[unmap_cons],
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
                                                dma_addr), skb_headlen(skb),
-                                PCI_DMA_TODEVICE);
-               pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+                                DMA_TO_DEVICE);
+               dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
                BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
 
                prefetch(&unmap_array[unmap_cons + 1]);
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        prefetch(&unmap_array[unmap_cons + 1]);
 
-                       pci_unmap_page(bnad->pcidev,
-                                      pci_unmap_addr(&unmap_array[unmap_cons],
+                       dma_unmap_page(&bnad->pcidev->dev,
+                                      dma_unmap_addr(&unmap_array[unmap_cons],
                                                      dma_addr),
                                       skb_shinfo(skb)->frags[i].size,
-                                      PCI_DMA_TODEVICE);
-                       pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+                                      DMA_TO_DEVICE);
+                       dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
                                           0);
                        BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
                }
@@ -340,19 +340,22 @@ static void
 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
 {
        struct bnad_unmap_q *unmap_q;
+       struct bnad_skb_unmap *unmap_array;
        struct sk_buff *skb;
        int unmap_cons;
 
        unmap_q = rcb->unmap_q;
+       unmap_array = unmap_q->unmap_array;
        for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
-               skb = unmap_q->unmap_array[unmap_cons].skb;
+               skb = unmap_array[unmap_cons].skb;
                if (!skb)
                        continue;
-               unmap_q->unmap_array[unmap_cons].skb = NULL;
-               pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
-                                       unmap_array[unmap_cons],
-                                       dma_addr), rcb->rxq->buffer_size,
-                                       PCI_DMA_FROMDEVICE);
+               unmap_array[unmap_cons].skb = NULL;
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
+                                               dma_addr),
+                                rcb->rxq->buffer_size,
+                                DMA_FROM_DEVICE);
                dev_kfree_skb(skb);
        }
        bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
                skb->dev = bnad->netdev;
                skb_reserve(skb, NET_IP_ALIGN);
                unmap_array[unmap_prod].skb = skb;
-               dma_addr = pci_map_single(bnad->pcidev, skb->data,
-                       rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
-               pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+               dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+                                         rcb->rxq->buffer_size,
+                                         DMA_FROM_DEVICE);
+               dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
                                   dma_addr);
                BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
        struct bna_rcb *rcb = NULL;
        unsigned int wi_range, packets = 0, wis = 0;
        struct bnad_unmap_q *unmap_q;
+       struct bnad_skb_unmap *unmap_array;
        struct sk_buff *skb;
-       u32 flags;
+       u32 flags, unmap_cons;
        u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                        rcb = ccb->rcb[1];
 
                unmap_q = rcb->unmap_q;
+               unmap_array = unmap_q->unmap_array;
+               unmap_cons = unmap_q->consumer_index;
 
-               skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+               skb = unmap_array[unmap_cons].skb;
                BUG_ON(!(skb));
-               unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
-               pci_unmap_single(bnad->pcidev,
-                                pci_unmap_addr(&unmap_q->
-                                               unmap_array[unmap_q->
-                                                           consumer_index],
+               unmap_array[unmap_cons].skb = NULL;
+               dma_unmap_single(&bnad->pcidev->dev,
+                                dma_unmap_addr(&unmap_array[unmap_cons],
                                                dma_addr),
-                                               rcb->rxq->buffer_size,
-                                               PCI_DMA_FROMDEVICE);
+                                rcb->rxq->buffer_size,
+                                DMA_FROM_DEVICE);
                BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
 
                /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
                        if (mem_info->mem_type == BNA_MEM_T_DMA) {
                                BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
                                                dma_pa);
-                               pci_free_consistent(bnad->pcidev,
-                                               mem_info->mdl[i].len,
-                                               mem_info->mdl[i].kva, dma_pa);
+                               dma_free_coherent(&bnad->pcidev->dev,
+                                                 mem_info->mdl[i].len,
+                                                 mem_info->mdl[i].kva, dma_pa);
                        } else
                                kfree(mem_info->mdl[i].kva);
                }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
                for (i = 0; i < mem_info->num; i++) {
                        mem_info->mdl[i].len = mem_info->len;
                        mem_info->mdl[i].kva =
-                               pci_alloc_consistent(bnad->pcidev,
-                                               mem_info->len, &dma_pa);
+                               dma_alloc_coherent(&bnad->pcidev->dev,
+                                               mem_info->len, &dma_pa,
+                                               GFP_KERNEL);
 
                        if (mem_info->mdl[i].kva == NULL)
                                goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        unmap_q->unmap_array[unmap_prod].skb = skb;
        BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
        txqent->vector[vect_id].length = htons(skb_headlen(skb));
-       dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
-               PCI_DMA_TODEVICE);
-       pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+       dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+                                 skb_headlen(skb), DMA_TO_DEVICE);
+       dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
                           dma_addr);
 
        BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
                BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
                txqent->vector[vect_id].length = htons(size);
-               dma_addr =
-                       pci_map_page(bnad->pcidev, frag->page,
-                                    frag->page_offset, size,
-                                    PCI_DMA_TODEVICE);
-               pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+               dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+                                       frag->page_offset, size, DMA_TO_DEVICE);
+               dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
                                   dma_addr);
                BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
        err = pci_request_regions(pdev, BNAD_NAME);
        if (err)
                goto disable_device;
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-           !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                *using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = pci_set_consistent_dma_mask(pdev,
-                                               DMA_BIT_MASK(32));
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   DMA_BIT_MASK(32));
                        if (err)
                                goto release_regions;
                }
index 8b1d51557defb8cd57257ef2b24cf26b4f157c46..a89117fa4970d6f4bf56827d52fe8b0d44831120 100644 (file)
@@ -181,7 +181,7 @@ struct bnad_rx_info {
 /* Unmap queues for Tx / Rx cleanup */
 struct bnad_skb_unmap {
        struct sk_buff          *skb;
-       DECLARE_PCI_UNMAP_ADDR(dma_addr)
+       DEFINE_DMA_UNMAP_ADDR(dma_addr);
 };
 
 struct bnad_unmap_q {
index 0ba59d5aeb7f54253171f56dc711b2a3e47f0f89..2a961b7f7e17ee313131544528ba82a2f23a38c7 100644 (file)
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
        struct cnic_ctl_info info;
 
        mutex_lock(&bp->cnic_lock);
-       c_ops = bp->cnic_ops;
+       c_ops = rcu_dereference_protected(bp->cnic_ops,
+                                         lockdep_is_held(&bp->cnic_lock));
        if (c_ops) {
                info.cmd = CNIC_CTL_STOP_CMD;
                c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
        struct cnic_ctl_info info;
 
        mutex_lock(&bp->cnic_lock);
-       c_ops = bp->cnic_ops;
+       c_ops = rcu_dereference_protected(bp->cnic_ops,
+                                         lockdep_is_held(&bp->cnic_lock));
        if (c_ops) {
                if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
                        struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
 #endif
 };
 
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
 {
        dev->vlan_features |= flags;
 }
index f459fb2f9add61ac8c16063104578f3e21e48e66..7a5e88f831f68aaaa8c684be440c6fd431e6e3f9 100644 (file)
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
 
 #define BNX2_CP_SCRATCH                                        0x001a0000
 
+#define BNX2_FW_MAX_ISCSI_CONN                          0x001a0080
+
 
 /*
  *  mcp_reg definition
@@ -6759,7 +6761,7 @@ struct bnx2 {
        u32             tx_wake_thresh;
 
 #ifdef BCM_CNIC
-       struct cnic_ops         *cnic_ops;
+       struct cnic_ops __rcu   *cnic_ops;
        void                    *cnic_data;
 #endif
 
index 8849699c66c42764511f0ec3a6852b4ee9cc3757..b7ff87b35fbb38eb42a217ec0a8ac22257b71b23 100644 (file)
@@ -22,8 +22,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.00-6"
-#define DRV_MODULE_RELDATE      "2011/01/30"
+#define DRV_MODULE_VERSION      "1.62.11-0"
+#define DRV_MODULE_RELDATE      "2011/01/31"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
@@ -31,7 +31,7 @@
 #define BNX2X_NEW_NAPI
 
 #if defined(CONFIG_DCB)
-#define BCM_DCB
+#define BCM_DCBNL
 #endif
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
 #endif
 
 #define bnx2x_mc_addr(ha)      ((ha)->addr)
+#define bnx2x_uc_addr(ha)      ((ha)->addr)
 
 #define U64_LO(x)                      (u32)(((u64)(x)) & 0xffffffff)
 #define U64_HI(x)                      (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
        /* chip independed shortcut into rx_prods_offset memory */
        u32                     ustorm_rx_prods_offset;
 
+       u32                     rx_buf_size;
+
        dma_addr_t              status_blk_mapping;
 
        struct sw_tx_bd         *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
 };
 
 #define bnx2x_fp(bp, nr, var)          (bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU      2500
+
 #ifdef BCM_CNIC
 /* FCoE L2 `fastpath' is right after the eth entries */
 #define FCOE_IDX                       BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
        struct eth_stats_query          fw_stats;
        struct mac_configuration_cmd    mac_config;
        struct mac_configuration_cmd    mcast_config;
+       struct mac_configuration_cmd    uc_mac_config;
        struct client_init_ramrod_data  client_init_data;
 
        /* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
        int                     tx_ring_size;
 
        u32                     rx_csum;
-       u32                     rx_buf_size;
 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 #define ETH_OVREHEAD           (ETH_HLEN + 8 + 8)
 #define ETH_MIN_PACKET_SIZE            60
@@ -939,7 +946,7 @@ struct bnx2x {
        struct eth_spe          *spq_prod_bd;
        struct eth_spe          *spq_last_bd;
        __le16                  *dsb_sp_prod;
-       atomic_t                spq_left; /* serialize spq */
+       atomic_t                cq_spq_left; /* ETH_XXX ramrods credit */
        /* used to synchronize spq accesses */
        spinlock_t              spq_lock;
 
@@ -949,6 +956,7 @@ struct bnx2x {
        u16                     eq_prod;
        u16                     eq_cons;
        __le16                  *eq_cons_sb;
+       atomic_t                eq_spq_left; /* COMMON_XXX ramrods credit */
 
        /* Flags for marking that there is a STAT_QUERY or
           SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
 #define MF_FUNC_DIS                    0x1000
 #define FCOE_MACS_SET                  0x2000
 #define NO_FCOE_FLAG                   0x4000
+#define NO_ISCSI_OOO_FLAG              0x8000
+#define NO_ISCSI_FLAG                  0x10000
 
 #define NO_FCOE(bp)            ((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp)           ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp)       ((bp)->flags & NO_ISCSI_OOO_FLAG)
 
        int                     pf_num; /* absolute PF number */
        int                     pfid;   /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
        int                     num_queues;
        int                     disable_tpa;
        int                     int_mode;
+       u32                     *rx_indir_table;
 
        struct tstorm_eth_mac_filter_config     mac_filters;
 #define BNX2X_ACCEPT_NONE              0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
 #define BNX2X_CNIC_FLAG_MAC_SET                1
        void                    *t2;
        dma_addr_t              t2_mapping;
-       struct cnic_ops         *cnic_ops;
+       struct cnic_ops __rcu   *cnic_ops;
        void                    *cnic_data;
        u32                     cnic_tag;
        struct cnic_eth_dev     cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
        u16                     cnic_kwq_pending;
        u16                     cnic_spq_pending;
        struct mutex            cnic_mutex;
-       u8                      iscsi_mac[ETH_ALEN];
        u8                      fip_mac[ETH_ALEN];
 #endif
 
        int                     dmae_ready;
        /* used to synchronize dmae accesses */
-       struct mutex            dmae_mutex;
+       spinlock_t              dmae_lock;
 
        /* used to protect the FW mail box */
        struct mutex            fw_mb_mutex;
@@ -1448,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 void bnx2x_calc_fc_adv(struct bnx2x *bp);
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
                  u32 data_hi, u32 data_lo, int common);
+
+/* Clears multicast and unicast list configuration in the chip. */
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_uc_list(struct bnx2x *bp);
+
 void bnx2x_update_coalesce(struct bnx2x *bp);
 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
 
@@ -1787,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
 
 extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_push_indir_table(struct bnx2x *bp);
 
 #endif /* bnx2x.h */
index a71b329405335b4a93c0b55e9fbb45808b7d39e9..e83ac6dd6fc076b85b752957b9d7a88985cb0412 100644 (file)
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
        /* move empty skb from pool to prod and map it */
        prod_rx_buf->skb = fp->tpa_pool[queue].skb;
        mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-                                bp->rx_buf_size, DMA_FROM_DEVICE);
+                                fp->rx_buf_size, DMA_FROM_DEVICE);
        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 
        /* move partial skb from cons to pool (don't unmap yet) */
@@ -367,13 +367,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
        struct sk_buff *skb = rx_buf->skb;
        /* alloc new skb */
-       struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 
        /* Unmap skb in the pool anyway, as we are going to change
           pool entry status to BNX2X_TPA_STOP even if new skb allocation
           fails. */
        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
-                        bp->rx_buf_size, DMA_FROM_DEVICE);
+                        fp->rx_buf_size, DMA_FROM_DEVICE);
 
        if (likely(new_skb)) {
                /* fix ip xsum and give it to the stack */
@@ -385,10 +385,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
 #ifdef BNX2X_STOP_ON_ERROR
-               if (pad + len > bp->rx_buf_size) {
+               if (pad + len > fp->rx_buf_size) {
                        BNX2X_ERR("skb_put is about to fail...  "
                                  "pad %d  len %d  rx_buf_size %d\n",
-                                 pad, len, bp->rx_buf_size);
+                                 pad, len, fp->rx_buf_size);
                        bnx2x_panic();
                        return;
                }
@@ -618,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
                                dma_unmap_single(&bp->pdev->dev,
                                        dma_unmap_addr(rx_buf, mapping),
-                                                bp->rx_buf_size,
+                                                fp->rx_buf_size,
                                                 DMA_FROM_DEVICE);
                                skb_reserve(skb, pad);
                                skb_put(skb, len);
@@ -858,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
        u16 ring_prod;
        int i, j;
 
-       bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-               IP_HEADER_ALIGNMENT_PADDING;
-
-       DP(NETIF_MSG_IFUP,
-          "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
        for_each_rx_queue(bp, j) {
                struct bnx2x_fastpath *fp = &bp->fp[j];
 
+               DP(NETIF_MSG_IFUP,
+                  "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
                if (!fp->disable_tpa) {
                        for (i = 0; i < max_agg_queues; i++) {
                                fp->tpa_pool[i].skb =
-                                  netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+                                  netdev_alloc_skb(bp->dev, fp->rx_buf_size);
                                if (!fp->tpa_pool[i].skb) {
                                        BNX2X_ERR("Failed to allocate TPA "
                                                  "skb pool for queue[%d] - "
@@ -978,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_size, DMA_FROM_DEVICE);
+                                        fp->rx_buf_size, DMA_FROM_DEVICE);
 
                        rx_buf->skb = NULL;
                        dev_kfree_skb(skb);
@@ -1303,6 +1300,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
        return rc;
 }
 
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+       int i;
+
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+
+               /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+               if (IS_FCOE_IDX(i))
+                       /*
+                        * Although there are no IP frames expected to arrive to
+                        * this ring we still want to add an
+                        * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+                        * overrun attack.
+                        */
+                       fp->rx_buf_size =
+                               BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+                               BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+               else
+                       fp->rx_buf_size =
+                               bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+                               IP_HEADER_ALIGNMENT_PADDING;
+       }
+}
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
@@ -1326,6 +1348,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* must be called before memory allocation and HW init */
        bnx2x_ilt_set_info(bp);
 
+       /* Set the receive queues buffer size */
+       bnx2x_set_rx_buf_size(bp);
+
        if (bnx2x_alloc_mem(bp))
                return -ENOMEM;
 
@@ -1481,6 +1506,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        bnx2x_set_eth_mac(bp, 1);
 
+       /* Clear MC configuration */
+       if (CHIP_IS_E1(bp))
+               bnx2x_invalidate_e1_mc_list(bp);
+       else
+               bnx2x_invalidate_e1h_mc_list(bp);
+
+       /* Clear UC lists configuration */
+       bnx2x_invalidate_uc_list(bp);
+
        if (bp->pending_max) {
                bnx2x_update_max_mf_config(bp, bp->pending_max);
                bp->pending_max = 0;
@@ -1489,25 +1523,23 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        if (bp->port.pmf)
                bnx2x_initial_phy_init(bp, load_mode);
 
+       /* Initialize Rx filtering */
+       bnx2x_set_rx_mode(bp->dev);
+
        /* Start fast path */
        switch (load_mode) {
        case LOAD_NORMAL:
                /* Tx queue should be only reenabled */
                netif_tx_wake_all_queues(bp->dev);
                /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
                break;
 
        case LOAD_OPEN:
                netif_tx_start_all_queues(bp->dev);
                smp_mb__after_clear_bit();
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
                break;
 
        case LOAD_DIAG:
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
                bp->state = BNX2X_STATE_DIAG;
                break;
 
index 85ea7f26b51f19778e8137d224bd1a245ab9d643..ef37b98d614694ebb972046c52a253fe28e7d846 100644 (file)
@@ -831,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
        dma_addr_t mapping;
 
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
        if (unlikely(skb == NULL))
                return -ENOMEM;
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+       mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
                                 DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                dev_kfree_skb(skb);
@@ -901,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
                if (fp->tpa_state[i] == BNX2X_TPA_START)
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_size, DMA_FROM_DEVICE);
+                                        fp->rx_buf_size, DMA_FROM_DEVICE);
 
                dev_kfree_skb(skb);
                rx_buf->skb = NULL;
index fb60021f81fb77ae90e3a5f87220b4741a28f0d4..9a24d79c71d93fc17f7ec60393cd3311156e4877 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/netdevice.h>
 #include <linux/types.h>
 #include <linux/errno.h>
+#ifdef BCM_DCBNL
+#include <linux/dcbnl.h>
+#endif
 
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
        return 0;
 }
 
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+       u8 pri;
+
+       /* Choose the highest priority */
+       for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+               if (ent->pri_bitmap & (1 << pri))
+                       break;
+       return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+       return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+               DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+               DCB_APP_IDTYPE_ETHTYPE;
+}
+
+static inline
+void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
+{
+       int i;
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+               bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
+                                                       ~DCBX_APP_ENTRY_VALID;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+       int i, err = 0;
+
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+               struct dcbx_app_priority_entry *ent =
+                       &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+               if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+                       u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+                       /* avoid invalid user-priority */
+                       if (up) {
+                               struct dcb_app app;
+                               app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+                               app.protocol = ent->app_id;
+                               app.priority = delall ? 0 : up;
+                               err = dcb_setapp(bp->dev, &app);
+                       }
+               }
+       }
+       return err;
+}
+#endif
+
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 {
        switch (state) {
        case BNX2X_DCBX_STATE_NEG_RECEIVED:
                {
                        DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
-
+#ifdef BCM_DCBNL
+                       /**
+                        * Delete app tlvs from dcbnl before reading new
+                        * negotiation results
+                        */
+                       bnx2x_dcbnl_update_applist(bp, true);
+#endif
                        /* Read neg results if dcbx is in the FW */
                        if (bnx2x_dcbx_read_shmem_neg_results(bp))
                                return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                                                 bp->dcbx_error);
 
                        if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+#ifdef BCM_DCBNL
+                               /**
+                                * Add new app tlvs to dcbnl
+                                */
+                               bnx2x_dcbnl_update_applist(bp, false);
+#endif
                                bnx2x_dcbx_stop_hw_tx(bp);
                                return;
                        }
                        /* fall through */
+#ifdef BCM_DCBNL
+                       /**
+                        * Invalidate the local app tlvs if they are not added
+                        * to the dcbnl app list to avoid deleting them from
+                        * the list later on
+                        */
+                       bnx2x_dcbx_invalidate_local_apps(bp);
+#endif
                }
        case BNX2X_DCBX_STATE_TX_PAUSED:
                DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
        bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
 }
 /* DCB netlink */
-#ifdef BCM_DCB
-#include <linux/dcbnl.h>
+#ifdef BCM_DCBNL
 
 #define BNX2X_DCBX_CAPS                (DCB_CAP_DCBX_LLD_MANAGED | \
                                DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
        bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
 }
 
-static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
-                              u8 idtype, u16 idval)
-{
-       if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
-               return false;
-
-       switch (idtype) {
-       case DCB_APP_IDTYPE_ETHTYPE:
-               if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
-                       DCBX_APP_SF_ETH_TYPE)
-                       return false;
-               break;
-       case DCB_APP_IDTYPE_PORTNUM:
-               if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
-                       DCBX_APP_SF_PORT)
-                       return false;
-               break;
-       default:
-               return false;
-       }
-       if (app_ent->app_id != idval)
-               return false;
-
-       return true;
-}
-
 static void bnx2x_admin_app_set_ent(
        struct bnx2x_admin_priority_app_table *app_ent,
        u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
        return bnx2x_set_admin_app_up(bp, idtype, idval, up);
 }
 
-static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
-                                u16 idval)
-{
-       int i;
-       u8 up = 0;
-
-       struct bnx2x *bp = netdev_priv(netdev);
-       DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
-
-       /* iterate over the app entries looking for idtype and idval */
-       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
-               if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
-                                      idtype, idval))
-                       break;
-
-       if (i < DCBX_MAX_APP_PROTOCOL)
-               /* if found return up */
-               up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
-       else
-               DP(NETIF_MSG_LINK, "app not found\n");
-
-       return up;
-}
-
 static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
 {
        struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
        .setnumtcs      = bnx2x_dcbnl_set_numtcs,
        .getpfcstate    = bnx2x_dcbnl_get_pfc_state,
        .setpfcstate    = bnx2x_dcbnl_set_pfc_state,
-       .getapp         = bnx2x_dcbnl_get_app_up,
        .setapp         = bnx2x_dcbnl_set_app_up,
        .getdcbx        = bnx2x_dcbnl_get_dcbx,
        .setdcbx        = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
        .setfeatcfg     = bnx2x_dcbnl_set_featcfg,
 };
 
-#endif /* BCM_DCB */
+#endif /* BCM_DCBNL */
index f650f98e4092d0e2956bab1d2a7a909577d6cb5d..71b8eda43bd0f27739cf33264104ad473a2fd0d8 100644 (file)
@@ -189,8 +189,9 @@ enum {
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
 
 /* DCB netlink */
-#ifdef BCM_DCB
+#ifdef BCM_DCBNL
 extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
-#endif /* BCM_DCB */
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
 
 #endif /* BNX2X_DCB_H */
index 7e92f9d0dcfdd49cb946bfd5ce112ae15b545af1..f5050155c6b5e04d87a62be1f8f22a78dc953d9e 100644 (file)
@@ -1617,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
        /* prepare the loopback packet */
        pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
                     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
        if (!skb) {
                rc = -ENOMEM;
                goto test_loopback_exit;
@@ -2131,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
        return 0;
 }
 
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                          void *rules __always_unused)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = BNX2X_NUM_ETH_QUEUES(bp);
+               return 0;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+                               struct ethtool_rxfh_indir *indir)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       size_t copy_size =
+               min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+
+       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+               return -EOPNOTSUPP;
+
+       indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
+       memcpy(indir->ring_index, bp->rx_indir_table,
+              copy_size * sizeof(bp->rx_indir_table[0]));
+       return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+                               const struct ethtool_rxfh_indir *indir)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       size_t i;
+
+       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+               return -EOPNOTSUPP;
+
+       /* Validate size and indices */
+       if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+               return -EINVAL;
+       for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+               if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+                       return -EINVAL;
+
+       memcpy(bp->rx_indir_table, indir->ring_index,
+              indir->size * sizeof(bp->rx_indir_table[0]));
+       bnx2x_push_indir_table(bp);
+       return 0;
+}
+
 static const struct ethtool_ops bnx2x_ethtool_ops = {
        .get_settings           = bnx2x_get_settings,
        .set_settings           = bnx2x_set_settings,
@@ -2167,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
        .get_strings            = bnx2x_get_strings,
        .phys_id                = bnx2x_phys_id,
        .get_ethtool_stats      = bnx2x_get_ethtool_stats,
+       .get_rxnfc              = bnx2x_get_rxnfc,
+       .get_rxfh_indir         = bnx2x_get_rxfh_indir,
+       .set_rxfh_indir         = bnx2x_set_rxfh_indir,
 };
 
 void bnx2x_set_ethtool_ops(struct net_device *netdev)
index 548f5631c0dc1e56a2b7df3c5e3596e75639e2e5..be503cc0a50baf8a9985d64096a91e756811098a 100644 (file)
 
 #include "bnx2x_fw_defs.h"
 
+#define FW_ENCODE_32BIT_PATTERN                0x1e1e1e1e
+
 struct license_key {
        u32 reserved[6];
 
-#if defined(__BIG_ENDIAN)
-       u16 max_iscsi_init_conn;
-       u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
-       u16 max_iscsi_trgt_conn;
-       u16 max_iscsi_init_conn;
-#endif
+       u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT        0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT        16
 
-       u32 reserved_a[6];
-};
+       u32 reserved_a;
+
+       u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK  0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK  0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
 
+       u32 reserved_b[4];
+};
 
 #define PORT_0                         0
 #define PORT_1                         1
@@ -237,8 +244,26 @@ struct port_hw_cfg {                           /* port 0: 0x12c  port 1: 0x2bc */
 #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT            16
 
 
-       u32 Reserved0[16];                                  /* 0x158 */
-
+       u32 Reserved0[3];                                   /* 0x158 */
+       /*      Controls the TX laser of the SFP+ module */
+       u32 sfp_ctrl;                                   /* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK                            0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT                           0
+#define PORT_HW_CFG_TX_LASER_MDIO                            0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0                           0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1                           0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2                           0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3                           0x00000004
+
+    /* Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK                    0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT                   8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0                   0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1                   0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2                   0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3                   0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED                0x00000400
+       u32 Reserved01[12];                                 /* 0x158 */
        /*  for external PHY, or forced mode or during AN */
        u16 xgxs_config_rx[4];                              /* 0x198 */
 
@@ -246,12 +271,78 @@ struct port_hw_cfg {                          /* port 0: 0x12c  port 1: 0x2bc */
 
        u32 Reserved1[56];                                  /* 0x1A8 */
        u32 default_cfg;                                    /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK                        0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT                       0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW                         0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH                        0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT                       0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK                        0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT                       2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW                         0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH                        0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT                       0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK                        0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT                       4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW                         0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH                        0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT                       0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK                        0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT                       6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA                          0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW                         0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH                        0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT                       0x000000c0
+
+       /*
+        * When KR link is required to be set to force which is not
+        * KR-compliant, this parameter determine what is the trigger for it.
+        * When GPIO is selected, low input will force the speed. Currently
+        * default speed is 1G. In the future, it may be widen to select the
+        * forced speed in with another parameter. Note when force-1G is
+        * enabled, it override option 56: Link Speed option.
+        */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK                    0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT                   8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED                      0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0                0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0                0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0                0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0                0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1                0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1                0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1                0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1                0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED                  0x00000900
+    /* Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK                    0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT                   16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE                0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0                0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0                0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0                0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0                0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1                0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1                0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1                0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1                0x00080000
        /*  Enable BAM on KR */
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK                    0x00100000
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT                   20
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED                0x00000000
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED                 0x00100000
 
+       /*  Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK                          0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT                         21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED                              0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED                       0x00200000
+
        u32 speed_capability_mask2;                         /* 0x28C */
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK                0x0000FFFF
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT               0
@@ -381,6 +472,7 @@ struct port_hw_cfg {                            /* port 0: 0x12c  port 1: 0x2bc */
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727      0x00000900
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823     0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833     0x00000d00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE      0x0000fd00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN     0x0000ff00
 
index dd1210fddfffafec1ef3c1b8ca13344715be4f7f..f2f367d4e74dab400f7b40ee927614ec5504de9b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
  *
  * Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
 
 /********************************************************/
 #define ETH_HLEN                       14
-#define ETH_OVREHEAD           (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD                   (ETH_HLEN + 8 + 8)
 #define ETH_MIN_PACKET_SIZE            60
 #define ETH_MAX_PACKET_SIZE            1500
 #define ETH_MAX_JUMBO_PACKET_SIZE      9600
 #define MDIO_ACCESS_TIMEOUT            1000
-#define BMAC_CONTROL_RX_ENABLE 2
+#define BMAC_CONTROL_RX_ENABLE         2
 
 /***********************************************************/
 /*                     Shortcut definitions               */
@@ -79,7 +80,7 @@
 
 #define AUTONEG_CL37           SHARED_HW_CFG_AN_ENABLE_CL37
 #define AUTONEG_CL73           SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM            SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM            SHARED_HW_CFG_AN_ENABLE_BAM
 #define AUTONEG_PARALLEL \
                                SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
 #define AUTONEG_SGMII_FIBER_AUTODET \
 #define GP_STATUS_10G_KX4 \
                        MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
 
-#define LINK_10THD                     LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD                     LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD             LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD             LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 #define LINK_100TXHD           LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4                     LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4             LINK_STATUS_SPEED_AND_DUPLEX_100T4
 #define LINK_100TXFD           LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
 #define LINK_1000THD           LINK_STATUS_SPEED_AND_DUPLEX_1000THD
 #define LINK_1000TFD           LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
 #define LINK_2500THD           LINK_STATUS_SPEED_AND_DUPLEX_2500THD
 #define LINK_2500TFD           LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
 #define LINK_2500XFD           LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD            LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD            LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD            LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD            LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
 #define LINK_12_5GTFD          LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
 #define LINK_12_5GXFD          LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD                    LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD                    LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD            LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD            LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD            LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD            LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD            LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD            LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
 
 #define PHY_XGXS_FLAG                  0x1
 #define PHY_SGMII_FLAG                 0x2
 
 /* */
 #define SFP_EEPROM_CON_TYPE_ADDR               0x2
-       #define SFP_EEPROM_CON_TYPE_VAL_LC              0x7
+       #define SFP_EEPROM_CON_TYPE_VAL_LC      0x7
        #define SFP_EEPROM_CON_TYPE_VAL_COPPER  0x21
 
 
 
 #define SFP_EEPROM_FC_TX_TECH_ADDR             0x8
        #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
-       #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE      0x8
+       #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE  0x8
 
-#define SFP_EEPROM_OPTIONS_ADDR                0x40
+#define SFP_EEPROM_OPTIONS_ADDR                        0x40
        #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE                2
+#define SFP_EEPROM_OPTIONS_SIZE                        2
 
-#define EDC_MODE_LINEAR                                0x0022
-#define EDC_MODE_LIMITING                              0x0044
-#define EDC_MODE_PASSIVE_DAC                   0x0055
+#define EDC_MODE_LINEAR                                0x0022
+#define EDC_MODE_LIMITING                              0x0044
+#define EDC_MODE_PASSIVE_DAC                   0x0055
 
 
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND                (0x5000)
 /*                     INTERFACE                          */
 /**********************************************************/
 
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
        bnx2x_cl45_write(_bp, _phy, \
                (_phy)->def_md_devad, \
                (_bank + (_addr & 0xf)), \
                _val)
 
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
        bnx2x_cl45_read(_bp, _phy, \
                (_phy)->def_md_devad, \
                (_bank + (_addr & 0xf)), \
                _val)
 
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-                         u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-                          u8 devad, u16 reg, u16 val);
-
 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 {
        u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
 
        DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
 
-       /**
+       /*
         * mapping between entry  priority to client number (0,1,2 -debug and
         * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
         * 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
         */
 
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-       /**
+       /*
         * Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries, 3 -
         * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
        /* defines which entries (clients) are subjected to WFQ arbitration */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-       /**
-       * For strict priority entries defines the number of consecutive
-       * slots for the highest priority.
-       */
+       /*
+        * For strict priority entries defines the number of consecutive
+        * slots for the highest priority.
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-       /**
+       /*
         * mapping between the CREDIT_WEIGHT registers and actual client
         * numbers
         */
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
        REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
        /* ETS mode disable */
        REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
-       /**
+       /*
         * If ETS mode is enabled (there is no strict priority) defines a WFQ
         * weight for COS0/COS1.
         */
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
        REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
 }
 
-void bnx2x_ets_bw_limit_common(const struct link_params *params)
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
 {
        /* ETS disabled configuration */
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
-       /**
-       * defines which entries (clients) are subjected to WFQ arbitration
-       * COS0 0x8
-       * COS1 0x10
-       */
+       /*
+        * defines which entries (clients) are subjected to WFQ arbitration
+        * COS0 0x8
+        * COS1 0x10
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-       /**
-       * mapping between the ARB_CREDIT_WEIGHT registers and actual
-       * client numbers (WEIGHT_0 does not actually have to represent
-       * client 0)
-       *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-       *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
-       */
+       /*
+        * mapping between the ARB_CREDIT_WEIGHT registers and actual
+        * client numbers (WEIGHT_0 does not actually have to represent
+        * client 0)
+        *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+        *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
 
        REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
 
        /* Defines the number of consecutive slots for the strict priority */
        REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-       /**
-       * Bitmap of 5bits length. Each bit specifies whether the entry behaves
-       * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
-       * entry, 4 - COS1 entry.
-       * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-       * bit4   bit3     bit2     bit1    bit0
-       * MCP and debug are strict
-       */
+       /*
+        * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+        * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
+        * entry, 4 - COS1 entry.
+        * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+        * bit4   bit3    bit2     bit1    bit0
+        * MCP and debug are strict
+        */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 
        /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
        if ((0 == total_bw) ||
            (0 == cos0_bw) ||
            (0 == cos1_bw)) {
-               DP(NETIF_MSG_LINK,
-                  "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+               DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
                return;
        }
 
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        u32 val = 0;
 
        DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
-       /**
+       /*
         * Bitmap of 5bits length. Each bit specifies whether the entry behaves
         * as strict.  Bits 0,1,2 - debug and management entries,
         * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
         * MCP and debug are strict
         */
        REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-       /**
+       /*
         * For strict priority entries defines the number of consecutive slots
         * for the highest priority.
         */
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        /* Defines the number of consecutive slots for the strict priority */
        REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 
-       /**
-       * mapping between entry  priority to client number (0,1,2 -debug and
-       * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-       * 3bits client num.
-       *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-       * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
-       * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
-       */
+       /*
+        * mapping between entry  priority to client number (0,1,2 -debug and
+        * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+        * 3bits client num.
+        *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+        * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
+        * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
+        */
        val = (0 == strict_cos) ? 0x2318 : 0x22E0;
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 /*                     MAC/PBF section                           */
 /******************************************************************/
 static void bnx2x_emac_init(struct link_params *params,
-                          struct link_vars *vars)
+                           struct link_vars *vars)
 {
        /* reset and unreset the emac core */
        struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
        u16 timeout;
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-                  (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
        udelay(5);
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-                  (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
 
        /* init emac - use read-modify-write */
        /* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
 }
 
 static u8 bnx2x_emac_enable(struct link_params *params,
-                         struct link_vars *vars, u8 lb)
+                           struct link_vars *vars, u8 lb)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
        /* enable emac and not bmac */
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
 
-       /* for paladium */
-       if (CHIP_REV_IS_EMUL(bp)) {
-               /* Use lane 1 (of lanes 0-3) */
-               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-                           port*4, 1);
-       }
-       /* for fpga */
-       else
-
-       if (CHIP_REV_IS_FPGA(bp)) {
-               /* Use lane 1 (of lanes 0-3) */
-               DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
-               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
-                           0);
-       } else
        /* ASIC */
        if (vars->phy_flags & PHY_XGXS_FLAG) {
                u32 ser_lane = ((params->lane_config &
-                           PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                           PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+                                PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+                               PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
                DP(NETIF_MSG_LINK, "XGXS\n");
                /* select the master lanes (out of 0-3) */
-               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
-                          port*4, ser_lane);
+               REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
                /* select XGXS */
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-                          port*4, 1);
+               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 
        } else { /* SerDes */
                DP(NETIF_MSG_LINK, "SerDes\n");
                /* select SerDes */
-               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-                          port*4, 0);
+               REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
        }
 
        bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
-                   EMAC_RX_MODE_RESET);
+                     EMAC_RX_MODE_RESET);
        bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
-                   EMAC_TX_MODE_RESET);
+                     EMAC_TX_MODE_RESET);
 
        if (CHIP_REV_IS_SLOW(bp)) {
                /* config GMII mode */
                val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
-               EMAC_WR(bp, EMAC_REG_EMAC_MODE,
-                           (val | EMAC_MODE_PORT_GMII));
+               EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
        } else { /* ASIC */
                /* pause enable/disable */
                bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
        val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
        val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 
-       /**
-       * Setting this bit causes MAC control frames (except for pause
-       * frames) to be passed on for processing. This setting has no
-       * affect on the operation of the pause frames. This bit effects
-       * all packets regardless of RX Parser packet sorting logic.
-       * Turn the PFC off to make sure we are in Xon state before
-       * enabling it.
-       */
+       /*
+        * Setting this bit causes MAC control frames (except for pause
+        * frames) to be passed on for processing. This setting has no
+        * affect on the operation of the pause frames. This bit effects
+        * all packets regardless of RX Parser packet sorting logic.
+        * Turn the PFC off to make sure we are in Xon state before
+        * enabling it.
+        */
        EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
        if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
                DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
        REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
        REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
 
-       if (CHIP_REV_IS_EMUL(bp)) {
-               /* take the BigMac out of reset */
-               REG_WR(bp,
-                          GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-                          (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
-               /* enable access for bmac registers */
-               REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
-       } else
-               REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+       REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
 
        vars->mac_type = MAC_TYPE_EMAC;
        return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
                val |= (1<<5);
        wb_data[0] = val;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
        udelay(30);
 
        /* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
 
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 
-       /**
-       * Set Time (based unit is 512 bit time) between automatic
-       * re-sending of PP packets amd enable automatic re-send of
-       * Per-Priroity Packet as long as pp_gen is asserted and
-       * pp_disable is low.
-       */
+       /*
+        * Set Time (based unit is 512 bit time) between automatic
+        * re-sending of PP packets amd enable automatic re-send of
+        * Per-Priroity Packet as long as pp_gen is asserted and
+        * pp_disable is low.
+        */
        val = 0x8000;
        if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
                val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
        wb_data[0] = val;
        wb_data[1] = 0;
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
-                       wb_data, 2);
+                   wb_data, 2);
 
        /* mac control */
        val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
 
        wb_data[0] = val;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
 static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
                        full_xon_th =
                          PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
                }
-       /* The number of free blocks below which the pause signal to class 0
-          of MAC #n is asserted. n=0,1 */
+       /*
+        * The number of free blocks below which the pause signal to class 0
+        * of MAC #n is asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
-       /* The number of free blocks above which the pause signal to class 0
-          of MAC #n is de-asserted. n=0,1 */
+       /*
+        * The number of free blocks above which the pause signal to class 0
+        * of MAC #n is de-asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
-       /* The number of free blocks below which the full signal to class 0
-          of MAC #n is asserted. n=0,1 */
+       /*
+        * The number of free blocks below which the full signal to class 0
+        * of MAC #n is asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
-       /* The number of free blocks above which the full signal to class 0
-          of MAC #n is de-asserted. n=0,1 */
+       /*
+        * The number of free blocks above which the full signal to class 0
+        * of MAC #n is de-asserted. n=0,1
+        */
        REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
 
        if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
                        full_xon_th =
                          PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
                }
-               /**
+               /*
                 * The number of free blocks below which the pause signal to
                 * class 1 of MAC #n is asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
-               /**
+               /*
                 * The number of free blocks above which the pause signal to
                 * class 1 of MAC #n is de-asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
-               /**
+               /*
                 * The number of free blocks below which the full signal to
                 * class 1 of MAC #n is asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
-               /**
+               /*
                 * The number of free blocks above which the full signal to
                 * class 1 of MAC #n is de-asserted. n=0,1
-                **/
+                */
                REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
        }
 }
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                FEATURE_CONFIG_PFC_ENABLED;
        DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 
-       /**
+       /*
         * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
         * MAC control frames (that are not pause packets)
         * will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
        xcm_mask = REG_RD(bp,
                                port ? NIG_REG_LLH1_XCM_MASK :
                                NIG_REG_LLH0_XCM_MASK);
-       /**
+       /*
         * nig params will override non PFC params, since it's possible to
         * do transition from PFC to SAFC
         */
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
                      struct link_vars *vars,
                      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
-       /**
+       /*
         * The PFC and pause are orthogonal to one another, meaning when
         * PFC is enabled, the pause are disabled, and when PFC is
         * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
 
 static u8 bnx2x_bmac1_enable(struct link_params *params,
                             struct link_vars *vars,
-                         u8 is_lb)
+                            u8 is_lb)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
        /* XGXS control */
        wb_data[0] = 0x3c;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr +
-                     BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
-                     wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+                   wb_data, 2);
 
        /* tx MAC SA */
        wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
                        params->mac_addr[5]);
        wb_data[1] = ((params->mac_addr[0] << 8) |
                        params->mac_addr[1]);
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
-                   wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
 
        /* mac control */
        val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
        }
        wb_data[0] = val;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
-                   wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
 
        /* set rx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
 
        bnx2x_update_pfc_bmac1(params, vars);
 
        /* set tx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
 
        /* set cnt max size */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
-                   wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
 
        /* configure safc */
        wb_data[0] = 0x1000200;
        wb_data[1] = 0;
        REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
                    wb_data, 2);
-       /* fix for emulation */
-       if (CHIP_REV_IS_EMUL(bp)) {
-               wb_data[0] = 0xf000;
-               wb_data[1] = 0;
-               REG_WR_DMAE(bp,
-                           bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
-                           wb_data, 2);
-       }
-
 
        return 0;
 }
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
 
        wb_data[0] = 0;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
        udelay(30);
 
        /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
        wb_data[0] = 0x3c;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr +
-                       BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+                   wb_data, 2);
 
        udelay(30);
 
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
        wb_data[1] = ((params->mac_addr[0] << 8) |
                        params->mac_addr[1]);
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
-                       wb_data, 2);
+                   wb_data, 2);
 
        udelay(30);
 
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
        wb_data[0] = 0x1000200;
        wb_data[1] = 0;
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
-                       wb_data, 2);
+                   wb_data, 2);
        udelay(30);
 
        /* set rx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
        udelay(30);
 
        /* set tx mtu */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
        udelay(30);
        /* set cnt max size */
        wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
        wb_data[1] = 0;
-       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
-                       wb_data, 2);
+       REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
        udelay(30);
        bnx2x_update_pfc_bmac2(params, vars, is_lb);
 
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
        u32 val;
        /* reset and unreset the BigMac */
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-                    (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
        msleep(1);
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-                    (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 
        /* enable access for bmac registers */
        REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
        struct bnx2x *bp = params->bp;
 
        REG_WR(bp, params->shmem_base +
-                  offsetof(struct shmem_region,
-                           port_mb[params->port].link_status),
-                       link_status);
+              offsetof(struct shmem_region,
+                       port_mb[params->port].link_status), link_status);
 }
 
 static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
 {
        u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
-               NIG_REG_INGRESS_BMAC0_MEM;
+                       NIG_REG_INGRESS_BMAC0_MEM;
        u32 wb_data[2];
        u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
                if (CHIP_IS_E2(bp)) {
                        /* Clear Rx Enable bit in BMAC_CONTROL register */
                        REG_RD_DMAE(bp, bmac_addr +
-                                       BIGMAC2_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
+                                   BIGMAC2_REGISTER_BMAC_CONTROL,
+                                   wb_data, 2);
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
                        REG_WR_DMAE(bp, bmac_addr +
-                                       BIGMAC2_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
+                                   BIGMAC2_REGISTER_BMAC_CONTROL,
+                                   wb_data, 2);
                } else {
                        /* Clear Rx Enable bit in BMAC_CONTROL register */
                        REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
 }
 
 static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
-                        u32 line_speed)
+                          u32 line_speed)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
                /* update threshold */
                REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
                /* update init credit */
-               init_crd = 778;         /* (800-18-4) */
+               init_crd = 778;         /* (800-18-4) */
 
        } else {
                u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
        return 0;
 }
 
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
 static u32 bnx2x_get_emac_base(struct bnx2x *bp,
                               u32 mdc_mdio_access, u8 port)
 {
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
 
 }
 
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-                   u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/*                     CL45 access functions                     */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+                          u8 devad, u16 reg, u16 val)
 {
        u32 tmp, saved_mode;
        u8 i, rc = 0;
-
-       /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+       /*
+        * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
         * (a value of 49==0x31) and make sure that the AUTO poll is off
         */
 
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        for (i = 0; i < 50; i++) {
                udelay(10);
 
-               tmp = REG_RD(bp, phy->mdio_ctrl +
-                                  EMAC_REG_EMAC_MDIO_COMM);
+               tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
                if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
                        udelay(5);
                        break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        }
        if (tmp & EMAC_MDIO_COMM_START_BUSY) {
                DP(NETIF_MSG_LINK, "write phy register failed\n");
+               netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                rc = -EFAULT;
        } else {
                /* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                        udelay(10);
 
                        tmp = REG_RD(bp, phy->mdio_ctrl +
-                                        EMAC_REG_EMAC_MDIO_COMM);
+                                    EMAC_REG_EMAC_MDIO_COMM);
                        if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
                                udelay(5);
                                break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                }
                if (tmp & EMAC_MDIO_COMM_START_BUSY) {
                        DP(NETIF_MSG_LINK, "write phy register failed\n");
+                       netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                        rc = -EFAULT;
                }
        }
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
        return rc;
 }
 
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-                  u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+                         u8 devad, u16 reg, u16 *ret_val)
 {
        u32 val, saved_mode;
        u16 i;
        u8 rc = 0;
-
-       /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+       /*
+        * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
         * (a value of 49==0x31) and make sure that the AUTO poll is off
         */
 
        saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
        val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
-                            EMAC_MDIO_MODE_CLOCK_CNT));
+                             EMAC_MDIO_MODE_CLOCK_CNT));
        val |= (EMAC_MDIO_MODE_CLAUSE_45 |
                (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
        REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
        }
        if (val & EMAC_MDIO_COMM_START_BUSY) {
                DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+               netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                *ret_val = 0;
                rc = -EFAULT;
 
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
                        udelay(10);
 
                        val = REG_RD(bp, phy->mdio_ctrl +
-                                         EMAC_REG_EMAC_MDIO_COMM);
+                                    EMAC_REG_EMAC_MDIO_COMM);
                        if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
                                *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
                                break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
                }
                if (val & EMAC_MDIO_COMM_START_BUSY) {
                        DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+                       netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                        *ret_val = 0;
                        rc = -EFAULT;
                }
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
                  u8 devad, u16 reg, u16 *ret_val)
 {
        u8 phy_index;
-       /**
+       /*
         * Probe for the phy according to the given phy_addr, and execute
         * the read request on it
         */
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
                   u8 devad, u16 reg, u16 val)
 {
        u8 phy_index;
-       /**
+       /*
         * Probe for the phy according to the given phy_addr, and execute
         * the write request on it
         */
@@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
                aer_val = 0x3800 + offset - 1;
        else
                aer_val = 0x3800 + offset;
-       CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_AER_BLOCK,
-                               MDIO_AER_BLOCK_AER_REG, aer_val);
+       CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, aer_val);
 }
 static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
                                     struct bnx2x_phy *phy)
 {
-       CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_AER_BLOCK,
-                               MDIO_AER_BLOCK_AER_REG, 0x3800);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_AER_BLOCK,
+                         MDIO_AER_BLOCK_AER_REG, 0x3800);
 }
 
 /******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
 
        bnx2x_set_serdes_access(bp, port);
 
-       REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
-                    port*0x10,
-                    DEFAULT_PHY_DEV_ADDR);
+       REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+              DEFAULT_PHY_DEV_ADDR);
 }
 
 static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
        udelay(500);
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
 
-       REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
-                    port*0x18, 0);
+       REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
        REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
-                    params->phy[INT_PHY].def_md_devad);
+              params->phy[INT_PHY].def_md_devad);
 }
 
 
 void bnx2x_link_status_update(struct link_params *params,
-                           struct link_vars   *vars)
+                             struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u8 link_10g;
        u8 port = params->port;
 
        vars->link_status = REG_RD(bp, params->shmem_base +
-                                         offsetof(struct shmem_region,
-                                          port_mb[port].link_status));
+                                  offsetof(struct shmem_region,
+                                           port_mb[port].link_status));
 
        vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
 
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
                vars->phy_link_up = 1;
                vars->duplex = DUPLEX_FULL;
                switch (vars->link_status &
-                                       LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+                       LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
                        case LINK_10THD:
                                vars->duplex = DUPLEX_HALF;
                                /* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
 {
        struct bnx2x *bp = params->bp;
        u16 new_master_ln, ser_lane;
-       ser_lane =  ((params->lane_config &
+       ser_lane = ((params->lane_config &
                     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
        /* set the master_ln for AN */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_XGXS_BLOCK2,
-                             MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
-                             &new_master_ln);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_XGXS_BLOCK2,
+                         MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+                         &new_master_ln);
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_XGXS_BLOCK2 ,
-                             MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
-                             (new_master_ln | ser_lane));
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_XGXS_BLOCK2 ,
+                         MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+                         (new_master_ln | ser_lane));
 }
 
 static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
        struct bnx2x *bp = params->bp;
        u16 mii_control;
        u16 i;
-
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
 
        /* reset the unicore */
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL,
-                             (mii_control |
-                              MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL,
+                         (mii_control |
+                          MDIO_COMBO_IEEO_MII_CONTROL_RESET));
        if (set_serdes)
                bnx2x_set_serdes_access(bp, params->port);
 
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
                udelay(5);
 
                /* the reset erased the previous bank value */
-               CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL,
-                             &mii_control);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 &mii_control);
 
                if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
                        udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
                }
        }
 
+       netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+                             " Port %d\n",
+                        params->port);
        DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
        return -EINVAL;
 
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
                                 struct bnx2x_phy *phy)
 {
        struct bnx2x *bp = params->bp;
-       /* Each two bits represents a lane number:
-          No swap is 0123 => 0x1b no need to enable the swap */
+       /*
+        *  Each two bits represents a lane number:
+        *  No swap is 0123 => 0x1b no need to enable the swap
+        */
        u16 ser_lane, rx_lane_swap, tx_lane_swap;
 
        ser_lane = ((params->lane_config &
-                        PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                       PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
        rx_lane_swap = ((params->lane_config &
-                            PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
-                           PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+                        PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+                       PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
        tx_lane_swap = ((params->lane_config &
-                            PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
-                           PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+                        PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+                       PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
 
        if (rx_lane_swap != 0x1b) {
-               CL45_WR_OVER_CL22(bp, phy,
-                                   MDIO_REG_BANK_XGXS_BLOCK2,
-                                   MDIO_XGXS_BLOCK2_RX_LN_SWAP,
-                                   (rx_lane_swap |
-                                   MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
-                                   MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+                                 (rx_lane_swap |
+                                  MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+                                  MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
        } else {
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_XGXS_BLOCK2,
-                                     MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
        }
 
        if (tx_lane_swap != 0x1b) {
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_XGXS_BLOCK2,
-                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP,
-                                     (tx_lane_swap |
-                                      MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+                                 (tx_lane_swap |
+                                  MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
        } else {
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_XGXS_BLOCK2,
-                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
        }
 }
 
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u16 control2;
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
-                             &control2);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+                         &control2);
        if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
                control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
        else
                control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
        DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
                phy->speed_cap_mask, control2);
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
-                             control2);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+                         control2);
 
        if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
             (phy->speed_cap_mask &
                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
                DP(NETIF_MSG_LINK, "XGXS\n");
 
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+               CL22_WR_OVER_CL45(bp, phy,
+                                MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
 
-               CL45_RD_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-                               &control2);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+                                 &control2);
 
 
                control2 |=
                    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
 
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                               MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-                               control2);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                                 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+                                 control2);
 
                /* Disable parallel detection of HiG */
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_XGXS_BLOCK2,
-                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
-                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
-                               MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_XGXS_BLOCK2,
+                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+                                 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
        }
 }
 
 static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                              struct link_params *params,
-                           struct link_vars *vars,
-                           u8 enable_cl73)
+                             struct link_vars *vars,
+                             u8 enable_cl73)
 {
        struct bnx2x *bp = params->bp;
        u16 reg_val;
 
        /* CL37 Autoneg */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 
        /* CL37 Autoneg Enabled */
        if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
                             MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
        /* Enable/Disable Autodetection */
 
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
        reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
                    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
        reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
        else
                reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
 
        /* Enable TetonII and BAM autoneg */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_BAM_NEXT_PAGE,
-                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_BAM_NEXT_PAGE,
+                         MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
                          &reg_val);
        if (vars->line_speed == SPEED_AUTO_NEG) {
                /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
        }
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_BAM_NEXT_PAGE,
-                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
-                             reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_BAM_NEXT_PAGE,
+                         MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+                         reg_val);
 
        if (enable_cl73) {
                /* Enable Cl73 FSM status bits */
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_CL73_USERB0,
-                                   MDIO_CL73_USERB0_CL73_UCTRL,
-                                     0xe);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_USERB0,
+                                 MDIO_CL73_USERB0_CL73_UCTRL,
+                                 0xe);
 
                /* Enable BAM Station Manager*/
-               CL45_WR_OVER_CL22(bp, phy,
+               CL22_WR_OVER_CL45(bp, phy,
                        MDIO_REG_BANK_CL73_USERB0,
                        MDIO_CL73_USERB0_CL73_BAM_CTRL1,
                        MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                        MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
 
                /* Advertise CL73 link speeds */
-               CL45_RD_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_CL73_IEEEB1,
-                                             MDIO_CL73_IEEEB1_AN_ADV2,
-                                             &reg_val);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB1,
+                                 MDIO_CL73_IEEEB1_AN_ADV2,
+                                 &reg_val);
                if (phy->speed_cap_mask &
                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
                        reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
                    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
                        reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
 
-               CL45_WR_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_CL73_IEEEB1,
-                                             MDIO_CL73_IEEEB1_AN_ADV2,
-                                     reg_val);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB1,
+                                 MDIO_CL73_IEEEB1_AN_ADV2,
+                                 reg_val);
 
                /* CL73 Autoneg Enabled */
                reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
        } else /* CL73 Autoneg Disabled */
                reg_val = 0;
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB0,
-                             MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB0,
+                         MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
 }
 
 /* program SerDes, forced speed */
 static void bnx2x_program_serdes(struct bnx2x_phy *phy,
                                 struct link_params *params,
-                              struct link_vars *vars)
+                                struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u16 reg_val;
 
        /* program duplex, disable autoneg and sgmii*/
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
        reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
                     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
                     MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
        if (phy->req_duplex == DUPLEX_FULL)
                reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
-
-       /* program speed
-          - needed only if the speed is greater than 1G (2.5G or 10G) */
-       CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_SERDES_DIGITAL,
-                                     MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+       /*
+        * program speed
+        *  - needed only if the speed is greater than 1G (2.5G or 10G)
+        */
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_MISC1, &reg_val);
        /* clearing the speed value before setting the right speed */
        DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
 
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
                                MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
        }
 
-       CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_SERDES_DIGITAL,
-                                     MDIO_SERDES_DIGITAL_MISC1, reg_val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_MISC1, reg_val);
 
 }
 
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
                val |= MDIO_OVER_1G_UP1_2_5G;
        if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
                val |= MDIO_OVER_1G_UP1_10G;
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_OVER_1G,
-                             MDIO_OVER_1G_UP1, val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_OVER_1G,
+                         MDIO_OVER_1G_UP1, val);
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_OVER_1G,
-                             MDIO_OVER_1G_UP3, 0x400);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_OVER_1G,
+                         MDIO_OVER_1G_UP3, 0x400);
 }
 
 static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-       /* resolve pause mode and advertisement
-        * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+       /*
+        * Resolve pause mode and advertisement.
+        * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+        */
 
        switch (phy->req_flow_ctrl) {
        case BNX2X_FLOW_CTRL_AUTO:
-               if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
-                       *ieee_fc |=
-                            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
-               } else {
+               if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+                       *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+               else
                        *ieee_fc |=
-                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
-               }
+                       MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
                break;
        case BNX2X_FLOW_CTRL_TX:
-               *ieee_fc |=
-                      MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+               *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
                break;
 
        case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
 
 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
                                             struct link_params *params,
-                                          u16 ieee_fc)
+                                            u16 ieee_fc)
 {
        struct bnx2x *bp = params->bp;
        u16 val;
        /* for AN, we are always publishing full duplex */
 
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_COMBO_IEEE0,
-                             MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB1,
-                             MDIO_CL73_IEEEB1_AN_ADV1, &val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_COMBO_IEEE0,
+                         MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB1,
+                         MDIO_CL73_IEEEB1_AN_ADV1, &val);
        val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
        val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB1,
-                             MDIO_CL73_IEEEB1_AN_ADV1, val);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB1,
+                         MDIO_CL73_IEEEB1_AN_ADV1, val);
 }
 
 static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
        /* Enable and restart BAM/CL37 aneg */
 
        if (enable_cl73) {
-               CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_CL73_IEEEB0,
-                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                                     &mii_control);
-
-               CL45_WR_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_CL73_IEEEB0,
-                               MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                               (mii_control |
-                               MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
-                               MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB0,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                                 &mii_control);
+
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB0,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                                 (mii_control |
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
        } else {
 
-               CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     &mii_control);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 &mii_control);
                DP(NETIF_MSG_LINK,
                         "bnx2x_restart_autoneg mii_control before = 0x%x\n",
                         mii_control);
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     (mii_control |
-                                      MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
-                                      MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 (mii_control |
+                                  MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+                                  MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
        }
 }
 
 static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
                                           struct link_params *params,
-                                        struct link_vars *vars)
+                                          struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u16 control1;
 
        /* in SGMII mode, the unicore is always slave */
 
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
-                     &control1);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+                         &control1);
        control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
        /* set sgmii mode (and not fiber) */
        control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
                      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
                      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
-                             control1);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+                         control1);
 
        /* if forced speed */
        if (!(vars->line_speed == SPEED_AUTO_NEG)) {
                /* set speed, disable autoneg */
                u16 mii_control;
 
-               CL45_RD_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     &mii_control);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 &mii_control);
                mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
                                 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
                if (phy->req_duplex == DUPLEX_FULL)
                        mii_control |=
                                MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_COMBO_IEEE0,
-                                     MDIO_COMBO_IEEE0_MII_CONTROL,
-                                     mii_control);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_COMBO_IEEE0,
+                                 MDIO_COMBO_IEEE0_MII_CONTROL,
+                                 mii_control);
 
        } else { /* AN mode */
                /* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
 
 static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
 {                                              /*  LD      LP   */
-       switch (pause_result) {                 /* ASYM P ASYM P */
-       case 0xb:                               /*   1  0   1  1 */
+       switch (pause_result) {                 /* ASYM P ASYM P */
+       case 0xb:                               /*   1  0   1  1 */
                vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
                break;
 
-       case 0xe:                               /*   1  1   1  0 */
+       case 0xe:                               /*   1  1   1  0 */
                vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
                break;
 
-       case 0x5:                               /*   0  1   0  1 */
-       case 0x7:                               /*   0  1   1  1 */
-       case 0xd:                               /*   1  1   0  1 */
-       case 0xf:                               /*   1  1   1  1 */
+       case 0x5:                               /*   0  1   0  1 */
+       case 0x7:                               /*   0  1   1  1 */
+       case 0xd:                               /*   1  1   0  1 */
+       case 0xf:                               /*   1  1   1  1 */
                vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
                break;
 
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
        u16 pd_10g, status2_1000x;
        if (phy->req_line_speed != SPEED_AUTO_NEG)
                return 0;
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
-                             &status2_1000x);
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_SERDES_DIGITAL,
-                             MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
-                             &status2_1000x);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+                         &status2_1000x);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_SERDES_DIGITAL,
+                         MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+                         &status2_1000x);
        if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
                DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
                         params->port);
                return 1;
        }
 
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_10G_PARALLEL_DETECT,
-                             MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
-                             &pd_10g);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_10G_PARALLEL_DETECT,
+                         MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+                         &pd_10g);
 
        if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
                DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
                    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
                     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
 
-                       CL45_RD_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_CL73_IEEEB1,
-                                             MDIO_CL73_IEEEB1_AN_ADV1,
-                                             &ld_pause);
-                       CL45_RD_OVER_CL22(bp, phy,
-                                            MDIO_REG_BANK_CL73_IEEEB1,
-                                            MDIO_CL73_IEEEB1_AN_LP_ADV1,
-                                            &lp_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                                         MDIO_REG_BANK_CL73_IEEEB1,
+                                         MDIO_CL73_IEEEB1_AN_ADV1,
+                                         &ld_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                                         MDIO_REG_BANK_CL73_IEEEB1,
+                                         MDIO_CL73_IEEEB1_AN_LP_ADV1,
+                                         &lp_pause);
                        pause_result = (ld_pause &
                                        MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
                                        >> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
                                 pause_result);
                } else {
-                       CL45_RD_OVER_CL22(bp, phy,
-                                             MDIO_REG_BANK_COMBO_IEEE0,
-                                             MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
-                                             &ld_pause);
-                       CL45_RD_OVER_CL22(bp, phy,
-                              MDIO_REG_BANK_COMBO_IEEE0,
-                              MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
-                              &lp_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                                         MDIO_REG_BANK_COMBO_IEEE0,
+                                         MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+                                         &ld_pause);
+                       CL22_RD_OVER_CL45(bp, phy,
+                               MDIO_REG_BANK_COMBO_IEEE0,
+                               MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+                               &lp_pause);
                        pause_result = (ld_pause &
                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
                        pause_result |= (lp_pause &
-                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+                               MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
                        DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
                                 pause_result);
                }
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
        u16 rx_status, ustat_val, cl37_fsm_recieved;
        DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
        /* Step 1: Make sure signal is detected */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_RX0,
-                             MDIO_RX0_RX_STATUS,
-                             &rx_status);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_RX0,
+                         MDIO_RX0_RX_STATUS,
+                         &rx_status);
        if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
            (MDIO_RX0_RX_STATUS_SIGDET)) {
                DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
                             "rx_status(0x80b0) = 0x%x\n", rx_status);
-               CL45_WR_OVER_CL22(bp, phy,
-                                     MDIO_REG_BANK_CL73_IEEEB0,
-                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+               CL22_WR_OVER_CL45(bp, phy,
+                                 MDIO_REG_BANK_CL73_IEEEB0,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
                return;
        }
        /* Step 2: Check CL73 state machine */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_USERB0,
-                             MDIO_CL73_USERB0_CL73_USTAT1,
-                             &ustat_val);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_USERB0,
+                         MDIO_CL73_USERB0_CL73_USTAT1,
+                         &ustat_val);
        if ((ustat_val &
             (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
              MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
                             "ustat_val(0x8371) = 0x%x\n", ustat_val);
                return;
        }
-       /* Step 3: Check CL37 Message Pages received to indicate LP
-       supports only CL37 */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_REMOTE_PHY,
-                             MDIO_REMOTE_PHY_MISC_RX_STATUS,
-                             &cl37_fsm_recieved);
+       /*
+        * Step 3: Check CL37 Message Pages received to indicate LP
+        * supports only CL37
+        */
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_REMOTE_PHY,
+                         MDIO_REMOTE_PHY_MISC_RX_STATUS,
+                         &cl37_fsm_recieved);
        if ((cl37_fsm_recieved &
             (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
             MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
                         cl37_fsm_recieved);
                return;
        }
-       /* The combined cl37/cl73 fsm state information indicating that we are
-       connected to a device which does not support cl73, but does support
-       cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+       /*
+        * The combined cl37/cl73 fsm state information indicating that
+        * we are connected to a device which does not support cl73, but
+        * does support cl37 BAM. In this case we disable cl73 and
+        * restart cl37 auto-neg
+        */
+
        /* Disable CL73 */
-       CL45_WR_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_CL73_IEEEB0,
-                             MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-                             0);
+       CL22_WR_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_CL73_IEEEB0,
+                         MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+                         0);
        /* Restart CL37 autoneg */
        bnx2x_restart_autoneg(phy, params, 0);
        DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
                                     struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 new_line_speed , gp_status;
+       u16 new_line_speed, gp_status;
        u8 rc = 0;
 
        /* Read gp_status */
-       CL45_RD_OVER_CL22(bp, phy,
-                               MDIO_REG_BANK_GP_STATUS,
-                               MDIO_GP_STATUS_TOP_AN_STATUS1,
-                               &gp_status);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_GP_STATUS,
+                         MDIO_GP_STATUS_TOP_AN_STATUS1,
+                         &gp_status);
 
        if (phy->req_line_speed == SPEED_AUTO_NEG)
                vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
        u16 bank;
 
        /* read precomp */
-       CL45_RD_OVER_CL22(bp, phy,
-                             MDIO_REG_BANK_OVER_1G,
-                             MDIO_OVER_1G_LP_UP2, &lp_up2);
+       CL22_RD_OVER_CL45(bp, phy,
+                         MDIO_REG_BANK_OVER_1G,
+                         MDIO_OVER_1G_LP_UP2, &lp_up2);
 
        /* bits [10:7] at lp_up2, positioned at [15:12] */
        lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
 
        for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
              bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
-               CL45_RD_OVER_CL22(bp, phy,
-                                     bank,
-                                     MDIO_TX0_TX_DRIVER, &tx_driver);
+               CL22_RD_OVER_CL45(bp, phy,
+                                 bank,
+                                 MDIO_TX0_TX_DRIVER, &tx_driver);
 
                /* replace tx_driver bits [15:12] */
                if (lp_up2 !=
                    (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
                        tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
                        tx_driver |= lp_up2;
-                       CL45_WR_OVER_CL22(bp, phy,
-                                             bank,
-                                             MDIO_TX0_TX_DRIVER, tx_driver);
+                       CL22_WR_OVER_CL45(bp, phy,
+                                         bank,
+                                         MDIO_TX0_TX_DRIVER, tx_driver);
                }
        }
 }
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
 
        DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
        bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
-                    EMAC_REG_EMAC_MODE,
-                    (EMAC_MODE_25G_MODE |
-                    EMAC_MODE_PORT_MII_10M |
-                    EMAC_MODE_HALF_DUPLEX));
+                      EMAC_REG_EMAC_MODE,
+                      (EMAC_MODE_25G_MODE |
+                       EMAC_MODE_PORT_MII_10M |
+                       EMAC_MODE_HALF_DUPLEX));
        switch (vars->line_speed) {
        case SPEED_10:
                mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
        if (vars->duplex == DUPLEX_HALF)
                mode |= EMAC_MODE_HALF_DUPLEX;
        bnx2x_bits_en(bp,
-                   GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
-                   mode);
+                     GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+                     mode);
 
        bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
        return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
 
        for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
              bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
-                       CL45_WR_OVER_CL22(bp, phy,
+                       CL22_WR_OVER_CL45(bp, phy,
                                          bank,
                                          MDIO_RX0_RX_EQ_BOOST,
                                          phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
 
        for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
                      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
-                       CL45_WR_OVER_CL22(bp, phy,
+                       CL22_WR_OVER_CL45(bp, phy,
                                          bank,
                                          MDIO_TX0_TX_DRIVER,
                                          phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
                /* forced speed requested? */
                if (vars->line_speed != SPEED_AUTO_NEG ||
                    (SINGLE_MEDIA_DIRECT(params) &&
-                         params->loopback_mode == LOOPBACK_EXT)) {
+                    params->loopback_mode == LOOPBACK_EXT)) {
                        DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
 
                        /* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
 
                        /* program duplex & pause advertisement (for aneg) */
                        bnx2x_set_ieee_aneg_advertisment(phy, params,
-                                                      vars->ieee_fc);
+                                                        vars->ieee_fc);
 
                        /* enable autoneg */
                        bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
 }
 
 static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
-                                    struct bnx2x_phy *phy)
+                                    struct bnx2x_phy *phy,
+                                    struct link_params *params)
 {
        u16 cnt, ctrl;
        /* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
                        break;
                msleep(1);
        }
+
+       if (cnt == 1000)
+               netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+                                     " Port %d\n",
+                        params->port);
        DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
        return cnt;
 }
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
        u32 mask;
        struct bnx2x *bp = params->bp;
 
-       /* setting the status to report on link up
-          for either XGXS or SerDes */
-
+       /* Setting the status to report on link up for either XGXS or SerDes */
        if (params->switch_cfg == SWITCH_CFG_10G) {
                mask = (NIG_MASK_XGXS0_LINK10G |
                        NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
 {
        u32 latch_status = 0;
 
-       /**
+       /*
         * Disable the MI INT ( external phy int ) by writing 1 to the
         * status register. Link down indication is high-active-signal,
         * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
 
                /* For all latched-signal=up : Re-Arm Latch signals */
                REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
-                            (latch_status & 0xfffe) | (latch_status & 1));
+                      (latch_status & 0xfffe) | (latch_status & 1));
        }
        /* For all latched-signal=up,Write original_signal to status */
 }
 
 static void bnx2x_link_int_ack(struct link_params *params,
-                            struct link_vars *vars, u8 is_10g)
+                              struct link_vars *vars, u8 is_10g)
 {
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
 
-       /* first reset all status
-        * we assume only one line will be change at a time */
+       /*
+        * First reset all status we assume only one line will be
+        * change at a time
+        */
        bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-                    (NIG_STATUS_XGXS0_LINK10G |
-                     NIG_STATUS_XGXS0_LINK_STATUS |
-                     NIG_STATUS_SERDES0_LINK_STATUS));
+                      (NIG_STATUS_XGXS0_LINK10G |
+                       NIG_STATUS_XGXS0_LINK_STATUS |
+                       NIG_STATUS_SERDES0_LINK_STATUS));
        if (vars->phy_link_up) {
                if (is_10g) {
-                       /* Disable the 10G link interrupt
-                        * by writing 1 to the status register
+                       /*
+                        * Disable the 10G link interrupt by writing 1 to the
+                        * status register
                         */
                        DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
                        bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
                                      NIG_STATUS_XGXS0_LINK10G);
 
                } else if (params->switch_cfg == SWITCH_CFG_10G) {
-                       /* Disable the link interrupt
-                        * by writing 1 to the relevant lane
-                        * in the status register
+                       /*
+                        * Disable the link interrupt by writing 1 to the
+                        * relevant lane in the status register
                         */
                        u32 ser_lane = ((params->lane_config &
                                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
 
                } else { /* SerDes */
                        DP(NETIF_MSG_LINK, "SerDes phy link up\n");
-                       /* Disable the link interrupt
-                        * by writing 1 to the status register
+                       /*
+                        * Disable the link interrupt by writing 1 to the status
+                        * register
                         */
                        bnx2x_bits_en(bp,
                                      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
        }
        if ((params->num_phys == MAX_PHYS) &&
            (params->phy[EXT_PHY2].ver_addr != 0)) {
-               spirom_ver = REG_RD(bp,
-                                         params->phy[EXT_PHY2].ver_addr);
+               spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
                if (params->phy[EXT_PHY2].format_fw_ver) {
                        *ver_p = '/';
                        ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
 
                /* change the uni_phy_addr in the nig */
                md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
-                                         port*0x18));
+                                      port*0x18));
 
                REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
 
                bnx2x_cl45_write(bp, phy,
-                              5,
-                              (MDIO_REG_BANK_AER_BLOCK +
-                               (MDIO_AER_BLOCK_AER_REG & 0xf)),
-                              0x2800);
+                                5,
+                                (MDIO_REG_BANK_AER_BLOCK +
+                                 (MDIO_AER_BLOCK_AER_REG & 0xf)),
+                                0x2800);
 
                bnx2x_cl45_write(bp, phy,
-                              5,
-                              (MDIO_REG_BANK_CL73_IEEEB0 +
-                               (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
-                              0x6041);
+                                5,
+                                (MDIO_REG_BANK_CL73_IEEEB0 +
+                                 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+                                0x6041);
                msleep(200);
                /* set aer mmd back */
                bnx2x_set_aer_mmd_xgxs(params, phy);
 
                /* and md_devad */
-               REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
-                           md_devad);
-
+               REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
        } else {
                u16 mii_ctrl;
                DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params,
        case LED_MODE_OFF:
                REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
                REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                          SHARED_HW_CFG_LED_MAC1);
+                      SHARED_HW_CFG_LED_MAC1);
 
                tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
                EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
                break;
 
        case LED_MODE_OPER:
-               /**
+               /*
                 * For all other phys, OPER mode is same as ON, so in case
                 * link is down, do nothing
-                **/
+                */
                if (!vars->link_up)
                        break;
        case LED_MODE_ON:
                if (params->phy[EXT_PHY1].type ==
                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
                    CHIP_IS_E2(bp) && params->num_phys == 2) {
-                       /**
-                       * This is a work-around for E2+8727 Configurations
-                       */
+                       /*
+                        * This is a work-around for E2+8727 Configurations
+                        */
                        if (mode == LED_MODE_ON ||
                                speed == SPEED_10000){
                                REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params,
                                return rc;
                        }
                } else if (SINGLE_MEDIA_DIRECT(params)) {
-                       /**
-                       * This is a work-around for HW issue found when link
-                       * is up in CL73
-                       */
+                       /*
+                        * This is a work-around for HW issue found when link
+                        * is up in CL73
+                        */
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
                        REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
                } else {
-                       REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                                  hw_led_mode);
+                       REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
                }
 
-               REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
-                          port*4, 0);
+               REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
                /* Set blinking rate to ~15.9Hz */
                REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
-                          LED_BLINK_RATE_VAL);
+                      LED_BLINK_RATE_VAL);
                REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
-                          port*4, 1);
+                      port*4, 1);
                tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-               EMAC_WR(bp, EMAC_REG_EMAC_LED,
-                           (tmp & (~EMAC_LED_OVERRIDE)));
+               EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
 
                if (CHIP_IS_E1(bp) &&
                    ((speed == SPEED_2500) ||
                     (speed == SPEED_1000) ||
                     (speed == SPEED_100) ||
                     (speed == SPEED_10))) {
-                       /* On Everest 1 Ax chip versions for speeds less than
-                       10G LED scheme is different */
+                       /*
+                        * On Everest 1 Ax chip versions for speeds less than
+                        * 10G LED scheme is different
+                        */
                        REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
-                                  + port*4, 1);
+                              + port*4, 1);
                        REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
-                                  port*4, 0);
+                              port*4, 0);
                        REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
-                                  port*4, 1);
+                              port*4, 1);
                }
                break;
 
@@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
 
 }
 
-/**
+/*
  * This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
@@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
        u8 ext_phy_link_up = 0, serdes_phy_type;
        struct link_vars temp_vars;
 
-       CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
-                             MDIO_REG_BANK_GP_STATUS,
-                             MDIO_GP_STATUS_TOP_AN_STATUS1,
-                             &gp_status);
+       CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+                         MDIO_REG_BANK_GP_STATUS,
+                         MDIO_GP_STATUS_TOP_AN_STATUS1,
+                         &gp_status);
        /* link is up only if both local phy and external phy are up */
        if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
                return -ESRCH;
@@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
        u8 rc = 0;
        u8 phy_index, non_ext_phy;
        struct bnx2x *bp = params->bp;
-       /**
-       * In case of external phy existence, the line speed would be the
-       * line speed linked up by the external phy. In case it is direct
-       * only, then the line_speed during initialization will be
-       * equal to the req_line_speed
-       */
+       /*
+        * In case of external phy existence, the line speed would be the
+        * line speed linked up by the external phy. In case it is direct
+        * only, then the line_speed during initialization will be
+        * equal to the req_line_speed
+        */
        vars->line_speed = params->phy[INT_PHY].req_line_speed;
 
-       /**
+       /*
         * Initialize the internal phy in case this is a direct board
         * (no external phys), or this board has external phy which requires
         * to first.
@@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
        if (!non_ext_phy)
                for (phy_index = EXT_PHY1; phy_index < params->num_phys;
                      phy_index++) {
-                       /**
+                       /*
                         * No need to initialize second phy in case of first
                         * phy only selection. In case of second phy, we do
                         * need to initialize the first phy, since they are
                         * connected.
-                        **/
+                        */
                        if (phy_index == EXT_PHY2 &&
                            (bnx2x_phy_selection(params) ==
                             PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
-                               DP(NETIF_MSG_LINK, "Not initializing"
-                                                  "second phy\n");
+                               DP(NETIF_MSG_LINK, "Ignoring second phy\n");
                                continue;
                        }
                        params->phy[phy_index].config_init(
@@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
                                 struct link_params *params)
 {
        /* reset the SerDes/XGXS */
-       REG_WR(params->bp, GRCBASE_MISC +
-                    MISC_REGISTERS_RESET_REG_3_CLEAR,
-                    (0x1ff << (params->port*16)));
+       REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+              (0x1ff << (params->port*16)));
 }
 
 static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
        else
                gpio_port = params->port;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      gpio_port);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      gpio_port);
        DP(NETIF_MSG_LINK, "reset external PHY\n");
 }
 
@@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
 
        /* reset BigMac */
        bnx2x_bmac_rx_disable(bp, params->port);
-       REG_WR(bp, GRCBASE_MISC +
-                  MISC_REGISTERS_RESET_REG_2_CLEAR,
-                  (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
        return 0;
 }
 
@@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
        msleep(20);
        return rc;
 }
-/**
+/*
  * The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
@@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
 
        is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
-                                   port*0x18) > 0);
+                               port*0x18) > 0);
        DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
                 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
                 is_mi_int,
-                REG_RD(bp,
-                           NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+                REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
 
        DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
          REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        /* disable emac */
        REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
-       /**
-       * Step 1:
-       * Check external link change only for external phys, and apply
-       * priority selection between them in case the link on both phys
-       * is up. Note that the instead of the common vars, a temporary
-       * vars argument is used since each phy may have different link/
-       * speed/duplex result
-       */
+       /*
+        * Step 1:
+        * Check external link change only for external phys, and apply
+        * priority selection between them in case the link on both phys
+        * is up. Note that the instead of the common vars, a temporary
+        * vars argument is used since each phy may have different link/
+        * speed/duplex result
+        */
        for (phy_index = EXT_PHY1; phy_index < params->num_phys;
              phy_index++) {
                struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                        switch (bnx2x_phy_selection(params)) {
                        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
                        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-                       /**
+                       /*
                         * In this option, the first PHY makes sure to pass the
                         * traffic through itself only.
                         * Its not clear how to reset the link on the second phy
-                        **/
+                        */
                                active_external_phy = EXT_PHY1;
                                break;
                        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-                       /**
+                       /*
                         * In this option, the first PHY makes sure to pass the
                         * traffic through the second PHY.
-                        **/
+                        */
                                active_external_phy = EXT_PHY2;
                                break;
                        default:
-                       /**
+                       /*
                         * Link indication on both PHYs with the following cases
                         * is invalid:
                         * - FIRST_PHY means that second phy wasn't initialized,
@@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                         * - SECOND_PHY means that first phy should not be able
                         * to link up by itself (using configuration)
                         * - DEFAULT should be overriden during initialiazation
-                        **/
+                        */
                                DP(NETIF_MSG_LINK, "Invalid link indication"
                                           "mpc=0x%x. DISABLING LINK !!!\n",
                                           params->multi_phy_config);
@@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                }
        }
        prev_line_speed = vars->line_speed;
-       /**
-       * Step 2:
-       * Read the status of the internal phy. In case of
-       * DIRECT_SINGLE_MEDIA board, this link is the external link,
-       * otherwise this is the link between the 577xx and the first
-       * external phy
-       */
+       /*
+        * Step 2:
+        * Read the status of the internal phy. In case of
+        * DIRECT_SINGLE_MEDIA board, this link is the external link,
+        * otherwise this is the link between the 577xx and the first
+        * external phy
+        */
        if (params->phy[INT_PHY].read_status)
                params->phy[INT_PHY].read_status(
                        &params->phy[INT_PHY],
                        params, vars);
-       /**
+       /*
         * The INT_PHY flow control reside in the vars. This include the
         * case where the speed or flow control are not set to AUTO.
         * Otherwise, the active external phy flow control result is set
@@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
         */
        if (active_external_phy > INT_PHY) {
                vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
-               /**
+               /*
                 * Link speed is taken from the XGXS. AN and FC result from
                 * the external phy.
                 */
                vars->link_status |= phy_vars[active_external_phy].link_status;
 
-               /**
+               /*
                 * if active_external_phy is first PHY and link is up - disable
                 * disable TX on second external PHY
                 */
@@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
                   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
                   vars->link_status, ext_phy_line_speed);
-       /**
+       /*
         * Upon link speed change set the NIG into drain mode. Comes to
         * deals with possible FIFO glitch due to clk change when speed
         * is decreased without link down indicator
@@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                                   ext_phy_line_speed);
                        vars->phy_link_up = 0;
                } else if (prev_line_speed != vars->line_speed) {
-                       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
-                                    + params->port*4, 0);
+                       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+                              0);
                        msleep(1);
                }
        }
@@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 
        bnx2x_link_int_ack(params, vars, link_10g);
 
-       /**
-       * In case external phy link is up, and internal link is down
-       * (not initialized yet probably after link initialization, it
-       * needs to be initialized.
-       * Note that after link down-up as result of cable plug, the xgxs
-       * link would probably become up again without the need
-       * initialize it
-       */
+       /*
+        * In case external phy link is up, and internal link is down
+        * (not initialized yet probably after link initialization, it
+        * needs to be initialized.
+        * Note that after link down-up as result of cable plug, the xgxs
+        * link would probably become up again without the need
+        * initialize it
+        */
        if (!(SINGLE_MEDIA_DIRECT(params))) {
                DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
                           " init_preceding = %d\n", ext_phy_link_up,
@@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
                                                vars);
                }
        }
-       /**
-        *  Link is up only if both local phy and external phy (in case of
-        *  non-direct board) are up
+       /*
+        * Link is up only if both local phy and external phy (in case of
+        * non-direct board) are up
         */
        vars->link_up = (vars->phy_link_up &&
                         (ext_phy_link_up ||
@@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
 {
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
        msleep(1);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 }
 
 static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
        u16 fw_ver1, fw_ver2;
 
        bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+                       MDIO_PMA_REG_ROM_VER1, &fw_ver1);
        bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+                       MDIO_PMA_REG_ROM_VER2, &fw_ver2);
        bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
                                  phy->ver_addr);
 }
@@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
        if ((vars->ieee_fc &
            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
-               val |=  MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+               val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
        }
        if ((vars->ieee_fc &
            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
        else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
                ret = 1;
                bnx2x_cl45_read(bp, phy,
-                             MDIO_AN_DEVAD,
-                             MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+                               MDIO_AN_DEVAD,
+                               MDIO_AN_REG_ADV_PAUSE, &ld_pause);
                bnx2x_cl45_read(bp, phy,
-                             MDIO_AN_DEVAD,
-                             MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+                               MDIO_AN_DEVAD,
+                               MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
                pause_result = (ld_pause &
                                MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
                pause_result |= (lp_pause &
@@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
        /* Boot port from external ROM  */
        /* EDC grst */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      0x0001);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        0x0001);
 
        /* ucode reboot and rst */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      0x008c);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        0x008c);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
        /* Reset internal microprocessor */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
        /* Release srst bit */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
        /* Delay 100ms per the PHY specifications */
        msleep(100);
@@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
 
        /* Clear ser_boot_ctl bit */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0000);
        bnx2x_save_bcm_spirom_ver(bp, phy, port);
 
        DP(NETIF_MSG_LINK,
@@ -3958,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
 
        /* Read 8073 HW revision*/
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_8073_CHIP_REV, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_8073_CHIP_REV, &val);
 
        if (val != 1) {
                /* No need to workaround in 8073 A1 */
@@ -3967,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
        }
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_ROM_VER2, &val);
 
        /* SNR should be applied only for version 0x102 */
        if (val != 0x102)
@@ -3982,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
        u16 val, cnt, cnt1 ;
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_8073_CHIP_REV, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_8073_CHIP_REV, &val);
 
        if (val > 0) {
                /* No need to workaround in 8073 A1 */
@@ -3991,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
        }
        /* XAUI workaround in 8073 A0: */
 
-       /* After loading the boot ROM and restarting Autoneg,
-       poll Dev1, Reg $C820: */
+       /*
+        * After loading the boot ROM and restarting Autoneg, poll
+        * Dev1, Reg $C820:
+        */
 
        for (cnt = 0; cnt < 1000; cnt++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
-                             &val);
-                 /* If bit [14] = 0 or bit [13] = 0, continue on with
-                  system initialization (XAUI work-around not required,
-                   as these bits indicate 2.5G or 1G link up). */
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+                               &val);
+                 /*
+                  * If bit [14] = 0 or bit [13] = 0, continue on with
+                  * system initialization (XAUI work-around not required, as
+                  * these bits indicate 2.5G or 1G link up).
+                  */
                if (!(val & (1<<14)) || !(val & (1<<13))) {
                        DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
                        return 0;
                } else if (!(val & (1<<15))) {
-                       DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
-                        /* If bit 15 is 0, then poll Dev1, Reg $C841 until
-                         it's MSB (bit 15) goes to 1 (indicating that the
-                         XAUI workaround has completed),
-                         then continue on with system initialization.*/
+                       DP(NETIF_MSG_LINK, "bit 15 went off\n");
+                       /*
+                        * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+                        * MSB (bit15) goes to 1 (indicating that the XAUI
+                        * workaround has completed), then continue on with
+                        * system initialization.
+                        */
                        for (cnt1 = 0; cnt1 < 1000; cnt1++) {
                                bnx2x_cl45_read(bp, phy,
                                        MDIO_PMA_DEVAD,
@@ -4093,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
                gpio_port = params->port;
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
        /* enable LASI */
        bnx2x_cl45_write(bp, phy,
@@ -4114,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
-       /**
-        * If this is forced speed, set to KR or KX (all other are not
-        * supported)
-        */
        /* Swap polarity if required - Must be done only in non-1G mode */
        if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
                /* Configure the 8073 to swap _P and _N of the KR lines */
@@ -4160,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
                        val = (1<<7);
                } else if (phy->req_line_speed ==  SPEED_2500) {
                        val = (1<<5);
-                       /* Note that 2.5G works only
-                       when used with 1G advertisment */
+                       /*
+                        * Note that 2.5G works only when used with 1G
+                        * advertisment
+                        */
                } else
                        val = (1<<5);
        } else {
@@ -4170,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
                        val |= (1<<7);
 
-               /* Note that 2.5G works only when
-               used with 1G advertisment */
+               /* Note that 2.5G works only when used with 1G advertisment */
                if (phy->speed_cap_mask &
                        (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4211,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
        /* Add support for CL37 (passive mode) III */
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 
-       /* The SNR will improve about 2db by changing
-       BW and FEE main tap. Rest commands are executed
-       after link is up*/
+       /*
+        * The SNR will improve about 2db by changing BW and FEE main
+        * tap. Rest commands are executed after link is up
+        * Change FFE main cursor to 5 in EDC register
+        */
        if (bnx2x_8073_is_snr_needed(bp, phy))
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4297,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
 
        link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
        if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
-               /* The SNR will improve about 2dbby
-               changing the BW and FEE main tap.*/
-               /* The 1st write to change FFE main
-               tap is set before restart AN */
-               /* Change PLL Bandwidth in EDC
-               register */
+               /*
+                * The SNR will improve about 2dbby changing the BW and FEE main
+                * tap. The 1st write to change FFE main tap is set before
+                * restart AN. Change PLL Bandwidth in EDC register
+                */
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
                                 0x26BC);
@@ -4346,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
                        bnx2x_cl45_read(bp, phy,
                                        MDIO_XS_DEVAD,
                                        MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
-                       /**
-                       * Set bit 3 to invert Rx in 1G mode and clear this bit
-                       * when it`s in 10G mode.
-                       */
+                       /*
+                        * Set bit 3 to invert Rx in 1G mode and clear this bit
+                        * when it`s in 10G mode.
+                        */
                        if (vars->line_speed == SPEED_1000) {
                                DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
                                              "the 8073\n");
@@ -4381,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
           gpio_port);
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           gpio_port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      gpio_port);
 }
 
 /******************************************************************/
@@ -4396,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "init 8705\n");
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4451,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
 /******************************************************************/
 /*                     SFP+ module Section                       */
 /******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+       u8 gpio_port;
+       u32 swap_val, swap_override;
+       struct bnx2x *bp = params->bp;
+       if (CHIP_IS_E2(bp))
+               gpio_port = BP_PATH(bp);
+       else
+               gpio_port = params->port;
+       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+       swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+       return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
                                      struct bnx2x_phy *phy,
-                                     u8 port,
                                      u8 tx_en)
 {
        u16 val;
+       u8 port = params->port;
+       struct bnx2x *bp = params->bp;
+       u32 tx_en_mode;
 
-       DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
-                tx_en, port);
        /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
-       bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_PHY_IDENTIFIER,
-                     &val);
+       tx_en_mode = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                                    dev_info.port_hw_config[port].sfp_ctrl)) &
+               PORT_HW_CFG_TX_LASER_MASK;
+       DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+                          "mode = %x\n", tx_en, port, tx_en_mode);
+       switch (tx_en_mode) {
+       case PORT_HW_CFG_TX_LASER_MDIO:
 
-       if (tx_en)
-               val &= ~(1<<15);
-       else
-               val |= (1<<15);
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_PHY_IDENTIFIER,
+                               &val);
 
-       bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      val);
+               if (tx_en)
+                       val &= ~(1<<15);
+               else
+                       val |= (1<<15);
+
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_PHY_IDENTIFIER,
+                                val);
+       break;
+       case PORT_HW_CFG_TX_LASER_GPIO0:
+       case PORT_HW_CFG_TX_LASER_GPIO1:
+       case PORT_HW_CFG_TX_LASER_GPIO2:
+       case PORT_HW_CFG_TX_LASER_GPIO3:
+       {
+               u16 gpio_pin;
+               u8 gpio_port, gpio_mode;
+               if (tx_en)
+                       gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+               else
+                       gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+               gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+               gpio_port = bnx2x_get_gpio_port(params);
+               bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+               break;
+       }
+       default:
+               DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+               break;
+       }
 }
 
 static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                            struct link_params *params,
-                                         u16 addr, u8 byte_cnt, u8 *o_buf)
+                                           u16 addr, u8 byte_cnt, u8 *o_buf)
 {
        struct bnx2x *bp = params->bp;
        u16 val = 0;
@@ -4492,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* Set the read command byte count */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
-                      (byte_cnt | 0xa000));
+                        (byte_cnt | 0xa000));
 
        /* Set the read command address */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
-                      addr);
+                        addr);
 
        /* Activate read command */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-                      0x2c0f);
+                        0x2c0f);
 
        /* Wait up to 500us for command complete status */
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
                        break;
@@ -4526,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* Read the buffer */
        for (i = 0; i < byte_cnt; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
                o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
        }
 
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
                        return 0;
@@ -4545,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 
 static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                            struct link_params *params,
-                                         u16 addr, u8 byte_cnt, u8 *o_buf)
+                                           u16 addr, u8 byte_cnt, u8 *o_buf)
 {
        struct bnx2x *bp = params->bp;
        u16 val, i;
@@ -4558,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 
        /* Need to read from 1.8000 to clear it */
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-                     &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+                       &val);
 
        /* Set the read command byte count */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
-                      ((byte_cnt < 2) ? 2 : byte_cnt));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+                        ((byte_cnt < 2) ? 2 : byte_cnt));
 
        /* Set the read command address */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
-                      addr);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+                        addr);
        /* Set the destination address */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      0x8004,
-                      MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+                        MDIO_PMA_DEVAD,
+                        0x8004,
+                        MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
 
        /* Activate read command */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-                      0x8002);
-       /* Wait appropriate time for two-wire command to finish before
-       polling the status register */
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+                        0x8002);
+       /*
+        * Wait appropriate time for two-wire command to finish before
+        * polling the status register
+        */
        msleep(1);
 
        /* Wait up to 500us for command complete status */
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
                        break;
@@ -4604,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK,
                         "Got bad status 0x%x when reading from SFP+ EEPROM\n",
                         (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
-               return -EINVAL;
+               return -EFAULT;
        }
 
        /* Read the buffer */
        for (i = 0; i < byte_cnt; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
                o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
        }
 
        for (i = 0; i < 100; i++) {
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
                        return 0;
@@ -4628,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-                                      struct link_params *params, u16 addr,
-                                      u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+                               struct link_params *params, u16 addr,
+                               u8 byte_cnt, u8 *o_buf)
 {
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
                return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
-                                                      byte_cnt, o_buf);
+                                                        byte_cnt, o_buf);
        else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
                return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
-                                                      byte_cnt, o_buf);
+                                                        byte_cnt, o_buf);
        return -EINVAL;
 }
 
 static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                             struct link_params *params,
-                                 u16 *edc_mode)
+                            u16 *edc_mode)
 {
        struct bnx2x *bp = params->bp;
        u8 val, check_limiting_mode = 0;
@@ -4664,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
        {
                u8 copper_module_type;
 
-               /* Check if its active cable( includes SFP+ module)
-               of passive cable*/
+               /*
+                * Check if its active cable (includes SFP+ module)
+                * of passive cable
+                */
                if (bnx2x_read_sfp_module_eeprom(phy,
                                               params,
                                               SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4724,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
        return 0;
 }
-/* This function read the relevant field from the module ( SFP+ ),
-       and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
 static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
                                  struct link_params *params)
 {
@@ -4774,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
        /* format the warning message */
        if (bnx2x_read_sfp_module_eeprom(phy,
                                         params,
-                                      SFP_EEPROM_VENDOR_NAME_ADDR,
-                                      SFP_EEPROM_VENDOR_NAME_SIZE,
-                                      (u8 *)vendor_name))
+                                        SFP_EEPROM_VENDOR_NAME_ADDR,
+                                        SFP_EEPROM_VENDOR_NAME_SIZE,
+                                        (u8 *)vendor_name))
                vendor_name[0] = '\0';
        else
                vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
        if (bnx2x_read_sfp_module_eeprom(phy,
                                         params,
-                                      SFP_EEPROM_PART_NO_ADDR,
-                                      SFP_EEPROM_PART_NO_SIZE,
-                                      (u8 *)vendor_pn))
+                                        SFP_EEPROM_PART_NO_ADDR,
+                                        SFP_EEPROM_PART_NO_SIZE,
+                                        (u8 *)vendor_pn))
                vendor_pn[0] = '\0';
        else
                vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
 
-       netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
-                            " Port %d from %s part number %s\n",
-                   params->port, vendor_name, vendor_pn);
+       netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
+                             " Port %d from %s part number %s\n",
+                        params->port, vendor_name, vendor_pn);
        phy->flags |= FLAGS_SFP_NOT_APPROVED;
        return -EINVAL;
 }
@@ -4803,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
        u8 val;
        struct bnx2x *bp = params->bp;
        u16 timeout;
-       /* Initialization time after hot-plug may take up to 300ms for some
-       phys type ( e.g. JDSU ) */
+       /*
+        * Initialization time after hot-plug may take up to 300ms for
+        * some phys type ( e.g. JDSU )
+        */
+
        for (timeout = 0; timeout < 60; timeout++) {
                if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
                    == 0) {
@@ -4823,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
        /* Make sure GPIOs are not using for LED mode */
        u16 val;
        /*
-        * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+        * In the GPIO register, bit 4 is use to determine if the GPIOs are
         * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
         * output
         * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
         * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
         * where the 1st bit is the over-current(only input), and 2nd bit is
         * for power( only output )
-       */
-
-       /*
+        *
         * In case of NOC feature is disabled and power is up, set GPIO control
         *  as input to enable listening of over-current indication
         */
@@ -4861,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
        u16 cur_limiting_mode;
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2,
-                     &cur_limiting_mode);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_ROM_VER2,
+                       &cur_limiting_mode);
        DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
                 cur_limiting_mode);
 
        if (edc_mode == EDC_MODE_LIMITING) {
-               DP(NETIF_MSG_LINK,
-                        "Setting LIMITING MODE\n");
+               DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD,
                                 MDIO_PMA_REG_ROM_VER2,
@@ -4878,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
 
                DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 
-               /* Changing to LRM mode takes quite few seconds.
-               So do it only if current mode is limiting
-               ( default is LRM )*/
+               /*
+                * Changing to LRM mode takes quite few seconds. So do it only
+                * if current mode is limiting (default is LRM)
+                */
                if (cur_limiting_mode != EDC_MODE_LIMITING)
                        return 0;
 
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_LRM_MODE,
-                              0);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_LRM_MODE,
+                                0);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_ROM_VER2,
-                              0x128);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_ROM_VER2,
+                                0x128);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_MISC_CTRL0,
-                              0x4008);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_MISC_CTRL0,
+                                0x4008);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_LRM_MODE,
-                              0xaaaa);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_LRM_MODE,
+                                0xaaaa);
        }
        return 0;
 }
 
 static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
                                       struct bnx2x_phy *phy,
-                                       u16 edc_mode)
+                                      u16 edc_mode)
 {
        u16 phy_identifier;
        u16 rom_ver2_val;
        bnx2x_cl45_read(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      &phy_identifier);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_PHY_IDENTIFIER,
+                       &phy_identifier);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      (phy_identifier & ~(1<<9)));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_PHY_IDENTIFIER,
+                        (phy_identifier & ~(1<<9)));
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_ROM_VER2,
-                     &rom_ver2_val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_ROM_VER2,
+                       &rom_ver2_val);
        /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_ROM_VER2,
-                      (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_ROM_VER2,
+                        (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_PHY_IDENTIFIER,
-                      (phy_identifier | (1<<9)));
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_PHY_IDENTIFIER,
+                        (phy_identifier | (1<<9)));
 
        return 0;
 }
@@ -4946,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
 
        switch (action) {
        case DISABLE_TX:
-               bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+               bnx2x_sfp_set_transmitter(params, phy, 0);
                break;
        case ENABLE_TX:
                if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
-                       bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+                       bnx2x_sfp_set_transmitter(params, phy, 1);
                break;
        default:
                DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4959,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
        }
 }
 
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+                                          u8 gpio_mode)
+{
+       struct bnx2x *bp = params->bp;
+
+       u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].sfp_ctrl)) &
+               PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+       switch (fault_led_gpio) {
+       case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+               return;
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+       case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+       {
+               u8 gpio_port = bnx2x_get_gpio_port(params);
+               u16 gpio_pin = fault_led_gpio -
+                       PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+               DP(NETIF_MSG_LINK, "Set fault module-detected led "
+                                  "pin %x port %x mode %x\n",
+                              gpio_pin, gpio_port, gpio_mode);
+               bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+       }
+       break;
+       default:
+               DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+                              fault_led_gpio);
+       }
+}
+
 static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
                                     struct link_params *params)
 {
@@ -4976,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
        if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
                DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
                return -EINVAL;
-       } else if (bnx2x_verify_sfp_module(phy, params) !=
-                  0) {
+       } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
                /* check SFP+ module compatibility */
                DP(NETIF_MSG_LINK, "Module verification failed!!\n");
                rc = -EINVAL;
                /* Turn on fault module-detected led */
-               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                 MISC_REGISTERS_GPIO_HIGH,
-                                 params->port);
+               bnx2x_set_sfp_module_fault_led(params,
+                                              MISC_REGISTERS_GPIO_HIGH);
+
                if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
                    ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
                     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4995,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
                }
        } else {
                /* Turn off fault module-detected led */
-               DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
-               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                         MISC_REGISTERS_GPIO_LOW,
-                                         params->port);
+               bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
        }
 
        /* power up the SFP module */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
                bnx2x_8727_power_module(bp, phy, 1);
 
-       /* Check and set limiting mode / LRM mode on 8726.
-       On 8727 it is done automatically */
+       /*
+        * Check and set limiting mode / LRM mode on 8726. On 8727 it
+        * is done automatically
+        */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
                bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
        else
@@ -5018,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
        if (rc == 0 ||
            (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
            PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-               bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+               bnx2x_sfp_set_transmitter(params, phy, 1);
        else
-               bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+               bnx2x_sfp_set_transmitter(params, phy, 0);
 
        return rc;
 }
@@ -5033,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
        u8 port = params->port;
 
        /* Set valid module led off */
-       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                         MISC_REGISTERS_GPIO_HIGH,
-                         params->port);
+       bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
 
-       /* Get current gpio val refelecting module plugged in / out*/
+       /* Get current gpio val reflecting module plugged in / out*/
        gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
 
        /* Call the handling function in case module is detected */
@@ -5053,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
                        DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
        } else {
                u32 val = REG_RD(bp, params->shmem_base +
-                                    offsetof(struct shmem_region, dev_info.
-                                             port_feature_config[params->port].
-                                             config));
+                                offsetof(struct shmem_region, dev_info.
+                                         port_feature_config[params->port].
+                                         config));
 
                bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
                                   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
                                   port);
-               /* Module was plugged out. */
-               /* Disable transmit for this module */
+               /*
+                * Module was plugged out.
+                * Disable transmit for this module
+                */
                if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
                    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-                       bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+                       bnx2x_sfp_set_transmitter(params, phy, 0);
        }
 }
 
@@ -5100,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
                        " link_status 0x%x\n", rx_sd, pcs_status, val2);
-       /* link is up if both bit 0 of pmd_rx_sd and
-        * bit 0 of pcs_status are set, or if the autoneg bit
-        * 1 is set
+       /*
+        * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+        * are set, or if the autoneg bit 1 is set
         */
        link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
        if (link_up) {
@@ -5123,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
                                 struct link_params *params,
                                 struct link_vars *vars)
 {
-       u16 cnt, val;
+       u32 tx_en_mode;
+       u16 cnt, val, tmp1;
        struct bnx2x *bp = params->bp;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        /* Wait until fw is loaded */
        for (cnt = 0; cnt < 100; cnt++) {
@@ -5197,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
                                 0x0004);
        }
        bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+       /*
+        * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+        * power mode, if TX Laser is disabled
+        */
+
+       tx_en_mode = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                               dev_info.port_hw_config[params->port].sfp_ctrl))
+                       & PORT_HW_CFG_TX_LASER_MASK;
+
+       if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+               DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+               bnx2x_cl45_read(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+               tmp1 |= 0x1;
+               bnx2x_cl45_write(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+       }
+
        return 0;
 }
 
@@ -5231,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
 
        /* Set soft reset */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_GEN_CTRL,
-                      MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_GEN_CTRL,
+                        MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
        /* wait for 150ms for microcode load */
        msleep(150);
 
        /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
        bnx2x_cl45_write(bp, phy,
-                      MDIO_PMA_DEVAD,
-                      MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+                        MDIO_PMA_DEVAD,
+                        MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 
        msleep(200);
        bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5285,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
        u32 val;
        u32 swap_val, swap_override, aeu_gpio_mask, offset;
        DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
-       /* Restore normal power mode*/
-       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
-       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_8726_external_rom_boot(phy, params);
 
-       /* Need to call module detected on initialization since
-       the module detection triggered by actual module
-       insertion might occur before driver is loaded, and when
-       driver is loaded, it reset all registers, including the
-       transmitter */
+       /*
+        * Need to call module detected on initialization since the module
+        * detection triggered by actual module insertion might occur before
+        * driver is loaded, and when driver is loaded, it reset all
+        * registers, including the transmitter
+        */
        bnx2x_sfp_module_detection(phy, params);
 
        if (phy->req_line_speed == SPEED_1000) {
@@ -5334,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
                bnx2x_cl45_write(bp, phy,
                                MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
-               /* Enable RX-ALARM control to receive
-               interrupt for 1G speed change */
+               /*
+                * Enable RX-ALARM control to receive interrupt for 1G speed
+                * change
+                */
                bnx2x_cl45_write(bp, phy,
                                 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
                bnx2x_cl45_write(bp, phy,
@@ -5367,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
 
        /* Set GPIO3 to trigger SFP+ module insertion/removal */
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-                           MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+                      MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
 
        /* The GPIO should be swapped if the swap register is set and active */
        swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5458,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params) {
        u32 swap_val, swap_override;
        u8 port;
-       /**
+       /*
         * The PHY reset is controlled by GPIO 1. Fake the port number
         * to cancel the swap done in set_gpio()
         */
@@ -5467,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
        swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
        port = (swap_val && swap_override) ^ 1;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 }
 
 static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 struct link_params *params,
                                 struct link_vars *vars)
 {
-       u16 tmp1, val, mod_abs;
+       u32 tx_en_mode;
+       u16 tmp1, val, mod_abs, tmp2;
        u16 rx_alarm_ctrl_val;
        u16 lasi_ctrl_val;
        struct bnx2x *bp = params->bp;
        /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
        rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
        lasi_ctrl_val = 0x0004;
 
@@ -5493,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
 
-       /* Initially configure  MOD_ABS to interrupt when
-       module is presence( bit 8) */
+       /*
+        * Initially configure MOD_ABS to interrupt when module is
+        * presence( bit 8)
+        */
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
-       /* Set EDC off by setting OPTXLOS signal input to low
-       (bit 9).
-       When the EDC is off it locks onto a reference clock and
-       avoids becoming 'lost'.*/
+       /*
+        * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+        * When the EDC is off it locks onto a reference clock and avoids
+        * becoming 'lost'
+        */
        mod_abs &= ~(1<<8);
        if (!(phy->flags & FLAGS_NOC))
                mod_abs &= ~(1<<9);
@@ -5515,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
        if (phy->flags & FLAGS_NOC)
                val |= (3<<5);
 
-       /**
+       /*
         * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
         * status which reflect SFP+ module over-current
         */
@@ -5542,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
                DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-               /**
+               /*
                 * Power down the XAUI until link is up in case of dual-media
                 * and 1G
                 */
@@ -5568,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
        } else {
-               /**
+               /*
                 * Since the 8727 has only single reset pin, need to set the 10G
                 * registers although it is default
                 */
@@ -5584,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 0x0008);
        }
 
-       /* Set 2-wire transfer rate of SFP+ module EEPROM
+       /*
+        * Set 2-wire transfer rate of SFP+ module EEPROM
         * to 100Khz since some DACs(direct attached cables) do
         * not work at 400Khz.
         */
@@ -5607,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                 phy->tx_preemphasis[1]);
        }
 
+       /*
+        * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+        * power mode, if TX Laser is disabled
+        */
+       tx_en_mode = REG_RD(bp, params->shmem_base +
+                           offsetof(struct shmem_region,
+                               dev_info.port_hw_config[params->port].sfp_ctrl))
+                       & PORT_HW_CFG_TX_LASER_MASK;
+
+       if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+               DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+               bnx2x_cl45_read(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+               tmp2 |= 0x1000;
+               tmp2 &= 0xFFEF;
+               bnx2x_cl45_write(bp, phy,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+       }
+
        return 0;
 }
 
@@ -5620,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                                      port_feature_config[params->port].
                                      config));
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
        if (mod_abs & (1<<8)) {
 
                /* Module is absent */
                DP(NETIF_MSG_LINK, "MOD_ABS indication "
                            "show module is absent\n");
 
-               /* 1. Set mod_abs to detect next module
-               presence event
-                  2. Set EDC off by setting OPTXLOS signal input to low
-                       (bit 9).
-                       When the EDC is off it locks onto a reference clock and
-                       avoids becoming 'lost'.*/
+               /*
+                * 1. Set mod_abs to detect next module
+                *    presence event
+                * 2. Set EDC off by setting OPTXLOS signal input to low
+                *    (bit 9).
+                *    When the EDC is off it locks onto a reference clock and
+                *    avoids becoming 'lost'.
+                */
                mod_abs &= ~(1<<8);
                if (!(phy->flags & FLAGS_NOC))
                        mod_abs &= ~(1<<9);
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-               /* Clear RX alarm since it stays up as long as
-               the mod_abs wasn't changed */
+               /*
+                * Clear RX alarm since it stays up as long as
+                * the mod_abs wasn't changed
+                */
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
 
        } else {
                /* Module is present */
                DP(NETIF_MSG_LINK, "MOD_ABS indication "
                            "show module is present\n");
-               /* First thing, disable transmitter,
-               and if the module is ok, the
-               module_detection will enable it*/
-
-               /* 1. Set mod_abs to detect next module
-               absent event ( bit 8)
-                  2. Restore the default polarity of the OPRXLOS signal and
-               this signal will then correctly indicate the presence or
-               absence of the Rx signal. (bit 9) */
+               /*
+                * First disable transmitter, and if the module is ok, the
+                * module_detection will enable it
+                * 1. Set mod_abs to detect next module absent event ( bit 8)
+                * 2. Restore the default polarity of the OPRXLOS signal and
+                * this signal will then correctly indicate the presence or
+                * absence of the Rx signal. (bit 9)
+                */
                mod_abs |= (1<<8);
                if (!(phy->flags & FLAGS_NOC))
                        mod_abs |= (1<<9);
@@ -5667,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
                                 MDIO_PMA_DEVAD,
                                 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-               /* Clear RX alarm since it stays up as long as
-               the mod_abs wasn't changed. This is need to be done
-               before calling the module detection, otherwise it will clear
-               the link update alarm */
+               /*
+                * Clear RX alarm since it stays up as long as the mod_abs
+                * wasn't changed. This is need to be done before calling the
+                * module detection, otherwise it will clear* the link update
+                * alarm
+                */
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5678,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
 
                if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
                    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-                       bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+                       bnx2x_sfp_set_transmitter(params, phy, 0);
 
                if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
                        bnx2x_sfp_module_detection(phy, params);
@@ -5687,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
        }
 
        DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
-                rx_alarm_status);
-       /* No need to check link status in case of
-       module plugged in/out */
+                  rx_alarm_status);
+       /* No need to check link status in case of module plugged in/out */
 }
 
 static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5725,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 
-       /**
+       /*
         * If a module is present and there is need to check
         * for over current
         */
@@ -5745,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                                            " Please remove the SFP+ module and"
                                            " restart the system to clear this"
                                            " error.\n",
-                                  params->port);
-
-                       /*
-                        * Disable all RX_ALARMs except for
-                        * mod_abs
-                        */
+                        params->port);
+                       /* Disable all RX_ALARMs except for mod_abs */
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5793,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                        MDIO_PMA_DEVAD,
                        MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 
-       /* Bits 0..2 --> speed detected,
-          bits 13..15--> link is down */
+       /*
+        * Bits 0..2 --> speed detected,
+        * Bits 13..15--> link is down
+        */
        if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
                link_up = 1;
                vars->line_speed = SPEED_10000;
+               DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+                          params->port);
        } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
                link_up = 1;
                vars->line_speed = SPEED_1000;
@@ -5819,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_8727_PCS_GP, &val1);
-               /**
+               /*
                 * In case of dual-media board and 1G, power up the XAUI side,
                 * otherwise power it down. For 10G it is done automatically
                 */
@@ -5839,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        /* Disable Transmitter */
-       bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+       bnx2x_sfp_set_transmitter(params, phy, 0);
        /* Clear LASI */
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
 
@@ -5851,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
                                           struct link_params *params)
 {
-       u16 val, fw_ver1, fw_ver2, cnt;
+       u16 val, fw_ver1, fw_ver2, cnt, adj;
        struct bnx2x *bp = params->bp;
 
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = -1;
+
        /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
        /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
 
        for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
                if (val & 1)
                        break;
                udelay(5);
@@ -5877,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 
 
        /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
        for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
                if (val & 1)
                        break;
                udelay(5);
@@ -5894,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
        }
 
        /* lower 16 bits of the register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
        /* upper 16 bits of register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
 
        bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
                                  phy->ver_addr);
@@ -5905,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
-       u16 val;
+       u16 val, adj;
+
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = -1;
 
        /* PHYC_CTL_LED_CTL */
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+                       MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
        val &= 0xFE00;
        val |= 0x0092;
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+                        MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED1_MASK,
+                        MDIO_PMA_REG_8481_LED1_MASK + adj,
                         0x80);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED2_MASK,
+                        MDIO_PMA_REG_8481_LED2_MASK + adj,
                         0x18);
 
        /* Select activity source by Tx and Rx, as suggested by PHY AE */
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_8481_LED3_MASK,
+                        MDIO_PMA_REG_8481_LED3_MASK + adj,
                         0x0006);
 
        /* Select the closest activity blink rate to that in 10/100/1000 */
        bnx2x_cl45_write(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_8481_LED3_BLINK,
+                       MDIO_PMA_REG_8481_LED3_BLINK + adj,
                        0);
 
        bnx2x_cl45_read(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+                       MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
        val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+                        MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
 
        /* 'Interrupt Mask' */
        bnx2x_cl45_write(bp, phy,
@@ -5961,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u16 autoneg_val, an_1000_val, an_10_100_val;
-
+       /*
+        * This phy uses the NIG latch mechanism since link indication
+        * arrives through its LED4 and not via its LASI signal, so we
+        * get steady signal instead of clear on read
+        */
        bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
                      1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
@@ -6086,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6102,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u8 port, initialize = 1;
-       u16 val;
+       u16 val, adj;
        u16 temp;
-       u32 actual_phy_selection;
+       u32 actual_phy_selection, cms_enable;
        u8 rc = 0;
 
        /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = 3;
 
        msleep(1);
        if (CHIP_IS_E2(bp))
@@ -6117,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
                       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
                       port);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       /* BCM84823 requires that XGXS links up first @ 10G for normal
-       behavior */
+       /*
+        * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+        */
        temp = vars->line_speed;
        vars->line_speed = SPEED_10000;
        bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6131,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        /* Set dual-media configuration according to configuration */
 
        bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_CTL_REG_84823_MEDIA, &val);
+                       MDIO_CTL_REG_84823_MEDIA + adj, &val);
        val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
                 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
                 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6164,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
 
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                        MDIO_CTL_REG_84823_MEDIA, val);
+                        MDIO_CTL_REG_84823_MEDIA + adj, val);
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
@@ -6172,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
        else
                bnx2x_save_848xx_spirom_version(phy, params);
+       cms_enable = REG_RD(bp, params->shmem_base +
+                       offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].default_cfg)) &
+                       PORT_HW_CFG_ENABLE_CMS_MASK;
+
+       bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+               MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+       if (cms_enable)
+               val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+       else
+               val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+       bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+               MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
        return rc;
 }
 
 static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
-                                      struct link_params *params,
-                                      struct link_vars *vars)
+                                 struct link_params *params,
+                                 struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 val, val1, val2;
+       u16 val, val1, val2, adj;
        u8 link_up = 0;
 
+       /* Reg offset adjustment for 84833 */
+       adj = 0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               adj = -1;
+
        /* Check 10G-BaseT link status */
        /* Check PMD signal ok */
        bnx2x_cl45_read(bp, phy,
                        MDIO_AN_DEVAD, 0xFFFA, &val1);
        bnx2x_cl45_read(bp, phy,
-                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+                       MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
                        &val2);
        DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
 
@@ -6273,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params)
 {
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
 }
 
 static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6297,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
        else
                port = params->port;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW,
-                           port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      port);
 }
 
 static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6353,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 
                        /* Set LED masks */
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED2_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED2_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED3_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED3_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED5_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED5_MASK,
+                                        0x20);
 
                } else {
                        bnx2x_cl45_write(bp, phy,
@@ -6394,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                        val |= 0x2492;
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LINK_SIGNAL,
-                                       val);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LINK_SIGNAL,
+                                        val);
 
                        /* Set LED masks */
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x0);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED2_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED2_MASK,
+                                        0x20);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED3_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED3_MASK,
+                                        0x20);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED5_MASK,
-                                       0x0);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED5_MASK,
+                                        0x0);
                } else {
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x20);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x20);
                }
                break;
 
@@ -6440,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                        &val);
 
                        if (!((val &
-                             MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
-                          >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
-                               DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+                              MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+                         >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+                               DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
                                bnx2x_cl45_write(bp, phy,
                                                 MDIO_PMA_DEVAD,
                                                 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6451,24 +6596,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 
                        /* Set LED masks */
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED1_MASK,
-                                       0x10);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED1_MASK,
+                                        0x10);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED2_MASK,
-                                       0x80);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED2_MASK,
+                                        0x80);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED3_MASK,
-                                       0x98);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED3_MASK,
+                                        0x98);
 
                        bnx2x_cl45_write(bp, phy,
-                                       MDIO_PMA_DEVAD,
-                                       MDIO_PMA_REG_8481_LED5_MASK,
-                                       0x40);
+                                        MDIO_PMA_DEVAD,
+                                        MDIO_PMA_REG_8481_LED5_MASK,
+                                        0x40);
 
                } else {
                        bnx2x_cl45_write(bp, phy,
@@ -6513,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
 
        /* Restore normal power mode*/
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
-       bnx2x_wait_reset_complete(bp, phy);
+       bnx2x_wait_reset_complete(bp, phy, params);
 
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6563,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
                   val2, val1);
        link_up = ((val1 & 4) == 4);
-       /* if link is up
-        * print the AN outcome of the SFX7101 PHY
-        */
+       /* if link is up print the AN outcome of the SFX7101 PHY */
        if (link_up) {
                bnx2x_cl45_read(bp, phy,
                                MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -6599,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
        u16 val, cnt;
 
        bnx2x_cl45_read(bp, phy,
-                     MDIO_PMA_DEVAD,
-                     MDIO_PMA_REG_7101_RESET, &val);
+                       MDIO_PMA_DEVAD,
+                       MDIO_PMA_REG_7101_RESET, &val);
 
        for (cnt = 0; cnt < 10; cnt++) {
                msleep(50);
                /* Writes a self-clearing reset */
                bnx2x_cl45_write(bp, phy,
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_7101_RESET,
-                              (val | (1<<15)));
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_7101_RESET,
+                                (val | (1<<15)));
                /* Wait for clear */
                bnx2x_cl45_read(bp, phy,
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_7101_RESET, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_7101_RESET, &val);
 
                if ((val & (1<<15)) == 0)
                        break;
@@ -6623,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
                                struct link_params *params) {
        /* Low power mode is controlled by GPIO 2 */
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
        /* The PHY reset is controlled by GPIO 1 */
        bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-                           MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+                      MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
 }
 
 static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6668,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
        .supported      = 0,
        .media_type     = ETH_PHY_NOT_PRESENT,
        .ver_addr       = 0,
-       .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)NULL,
@@ -6705,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_init_serdes,
@@ -6742,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_init_xgxs,
@@ -6773,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
        .media_type     = ETH_PHY_BASE_T,
        .ver_addr       = 0,
        .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_7101_config_init,
@@ -6804,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
                           SUPPORTED_Asym_Pause),
        .media_type     = ETH_PHY_UNSPECIFIED,
        .ver_addr       = 0,
-       .req_flow_ctrl  = 0,
-       .req_line_speed = 0,
-       .speed_cap_mask = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
        .req_duplex     = 0,
        .rsrv           = 0,
        .config_init    = (config_init_t)bnx2x_8073_config_init,
@@ -7015,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 
+static struct bnx2x_phy phy_84833 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+       .addr           = 0xff,
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
+                           FLAGS_REARM_LATCH_SIGNAL,
+       .def_md_devad   = 0,
+       .reserved       = 0,
+       .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .mdio_ctrl      = 0,
+       .supported      = (SUPPORTED_10baseT_Half |
+                          SUPPORTED_10baseT_Full |
+                          SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_TP |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
+       .media_type     = ETH_PHY_BASE_T,
+       .ver_addr       = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+       .config_init    = (config_init_t)bnx2x_848x3_config_init,
+       .read_status    = (read_status_t)bnx2x_848xx_read_status,
+       .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+       .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+       .hw_reset       = (hw_reset_t)NULL,
+       .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)NULL
+};
+
 /*****************************************************************/
 /*                                                               */
 /* Populate the phy according. Main function: bnx2x_populate_phy   */
@@ -7028,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
        /* Get the 4 lanes xgxs config rx and tx */
        u32 rx = 0, tx = 0, i;
        for (i = 0; i < 2; i++) {
-               /**
+               /*
                 * INT_PHY and EXT_PHY1 share the same value location in the
                 * shmem. When num_phys is greater than 1, than this value
                 * applies only to EXT_PHY1
@@ -7036,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
                if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
                        rx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                          dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+                         dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
 
                        tx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                          dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+                         dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
                } else {
                        rx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                         dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+                        dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
 
                        tx = REG_RD(bp, shmem_base +
                                    offsetof(struct shmem_region,
-                         dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+                        dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
                }
 
                phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7168,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
                *phy = phy_84823;
                break;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+               *phy = phy_84833;
+               break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                *phy = phy_7101;
                break;
@@ -7182,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
        phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
        bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 
-       /**
-       * The shmem address of the phy version is located on different
-       * structures. In case this structure is too old, do not set
-       * the address
-       */
+       /*
+        * The shmem address of the phy version is located on different
+        * structures. In case this structure is too old, do not set
+        * the address
+        */
        config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
                                        dev_info.shared_hw_config.config2));
        if (phy_index == EXT_PHY1) {
                phy->ver_addr = shmem_base + offsetof(struct shmem_region,
                                port_mb[port].ext_phy_fw_version);
 
-       /* Check specific mdc mdio settings */
-       if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
-               mdc_mdio_access = config2 &
-               SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+               /* Check specific mdc mdio settings */
+               if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+                       mdc_mdio_access = config2 &
+                       SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
        } else {
                u32 size = REG_RD(bp, shmem2_base);
 
@@ -7215,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-       /**
+       /*
         * In case mdc/mdio_access of the external phy is different than the
         * mdc/mdio access of the XGXS, a HW lock must be taken in each access
         * to prevent one port interfere with another port's CL45 operations.
@@ -7250,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
        /* Populate the default phy configuration for MF mode */
        if (phy_index == EXT_PHY2) {
                link_config = REG_RD(bp, params->shmem_base +
-                                        offsetof(struct shmem_region, dev_info.
+                                    offsetof(struct shmem_region, dev_info.
                        port_feature_config[params->port].link_config2));
                phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
-                                       offsetof(struct shmem_region, dev_info.
+                                            offsetof(struct shmem_region,
+                                                     dev_info.
                        port_hw_config[params->port].speed_capability_mask2));
        } else {
                link_config = REG_RD(bp, params->shmem_base +
-                               offsetof(struct shmem_region, dev_info.
+                                    offsetof(struct shmem_region, dev_info.
                                port_feature_config[params->port].link_config));
                phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
-                               offsetof(struct shmem_region, dev_info.
-                          port_hw_config[params->port].speed_capability_mask));
+                                            offsetof(struct shmem_region,
+                                                     dev_info.
+                       port_hw_config[params->port].speed_capability_mask));
        }
        DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
                       " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7408,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
                        else if (phy_index == EXT_PHY2)
                                actual_phy_idx = EXT_PHY1;
                }
-               params->phy[actual_phy_idx].req_flow_ctrl  =
+               params->phy[actual_phy_idx].req_flow_ctrl =
                        params->req_flow_ctrl[link_cfg_idx];
 
                params->phy[actual_phy_idx].req_line_speed =
@@ -7461,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        set_phy_vars(params);
 
        DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
-       if (CHIP_REV_IS_FPGA(bp)) {
-
-               vars->link_up = 1;
-               vars->line_speed = SPEED_10000;
-               vars->duplex = DUPLEX_FULL;
-               vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-               vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-               /* enable on E1.5 FPGA */
-               if (CHIP_IS_E1H(bp)) {
-                       vars->flow_ctrl |=
-                                       (BNX2X_FLOW_CTRL_TX |
-                                        BNX2X_FLOW_CTRL_RX);
-                       vars->link_status |=
-                                       (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
-                                        LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
-               }
-
-               bnx2x_emac_enable(params, vars, 0);
-               if (!(CHIP_IS_E2(bp)))
-                       bnx2x_pbf_update(params, vars->flow_ctrl,
-                                        vars->line_speed);
-               /* disable drain */
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
-               /* update shared memory */
-               bnx2x_update_mng(params, vars->link_status);
-
-               return 0;
-
-       } else
-       if (CHIP_REV_IS_EMUL(bp)) {
-
-               vars->link_up = 1;
-               vars->line_speed = SPEED_10000;
-               vars->duplex = DUPLEX_FULL;
-               vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-               vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
-               bnx2x_bmac_enable(params, vars, 0);
-
-               bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
-               /* Disable drain */
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
-                                   + params->port*4, 0);
-
-               /* update shared memory */
-               bnx2x_update_mng(params, vars->link_status);
-
-               return 0;
-
-       } else
        if (params->loopback_mode == LOOPBACK_BMAC) {
 
                vars->link_up = 1;
@@ -7527,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                /* set bmac loopback */
                bnx2x_bmac_enable(params, vars, 1);
 
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-                   params->port*4, 0);
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
        } else if (params->loopback_mode == LOOPBACK_EMAC) {
 
@@ -7544,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                /* set bmac loopback */
                bnx2x_emac_enable(params, vars, 1);
                bnx2x_emac_program(params, vars);
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-                   params->port*4, 0);
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
        } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
                   (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7568,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                        bnx2x_emac_program(params, vars);
                        bnx2x_emac_enable(params, vars, 0);
                } else
-               bnx2x_bmac_enable(params, vars, 0);
-
+                       bnx2x_bmac_enable(params, vars, 0);
                if (params->loopback_mode == LOOPBACK_XGXS) {
                        /* set 10G XGXS loopback */
                        params->phy[INT_PHY].config_loopback(
@@ -7587,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
                                                params);
                        }
                }
-
-               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-                           params->port*4, 0);
+               REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
                bnx2x_set_led(params, vars,
                              LED_MODE_OPER, vars->line_speed);
@@ -7608,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        return 0;
 }
 u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
-                 u8 reset_ext_phy)
+                   u8 reset_ext_phy)
 {
        struct bnx2x *bp = params->bp;
        u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7617,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
        vars->link_status = 0;
        bnx2x_update_mng(params, vars->link_status);
        bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-                    (NIG_MASK_XGXS0_LINK_STATUS |
-                     NIG_MASK_XGXS0_LINK10G |
-                     NIG_MASK_SERDES0_LINK_STATUS |
-                     NIG_MASK_MI_INT));
+                      (NIG_MASK_XGXS0_LINK_STATUS |
+                       NIG_MASK_XGXS0_LINK10G |
+                       NIG_MASK_SERDES0_LINK_STATUS |
+                       NIG_MASK_MI_INT));
 
        /* activate nig drain */
        REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7719,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
                /* disable attentions */
                bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
                               port_of_path*4,
-                            (NIG_MASK_XGXS0_LINK_STATUS |
-                             NIG_MASK_XGXS0_LINK10G |
-                             NIG_MASK_SERDES0_LINK_STATUS |
-                             NIG_MASK_MI_INT));
+                              (NIG_MASK_XGXS0_LINK_STATUS |
+                               NIG_MASK_XGXS0_LINK10G |
+                               NIG_MASK_SERDES0_LINK_STATUS |
+                               NIG_MASK_MI_INT));
 
                /* Need to take the phy out of low power mode in order
                        to write to access its registers */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                                 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+                              MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+                              port);
 
                /* Reset the phy */
                bnx2x_cl45_write(bp, &phy[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_CTRL,
-                              1<<15);
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_CTRL,
+                                1<<15);
        }
 
        /* Add delay of 150ms after reset */
@@ -7762,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
 
                /* Only set bit 10 = 1 (Tx power down) */
                bnx2x_cl45_read(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_TX_POWER_DOWN, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
                /* Phase1 of TX_POWER_DOWN reset */
                bnx2x_cl45_write(bp, phy_blk[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_TX_POWER_DOWN,
-                              (val | 1<<10));
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_TX_POWER_DOWN,
+                                (val | 1<<10));
        }
 
-       /* Toggle Transmitter: Power down and then up with 600ms
-          delay between */
+       /*
+        * Toggle Transmitter: Power down and then up with 600ms delay
+        * between
+        */
        msleep(600);
 
        /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7781,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
                /* Phase2 of POWER_DOWN_RESET */
                /* Release bit 10 (Release Tx power down) */
                bnx2x_cl45_read(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_TX_POWER_DOWN, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
                bnx2x_cl45_write(bp, phy_blk[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
                msleep(15);
 
                /* Read modify write the SPI-ROM version select register */
                bnx2x_cl45_read(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_EDC_FFE_MAIN, &val);
                bnx2x_cl45_write(bp, phy_blk[port],
-                             MDIO_PMA_DEVAD,
-                             MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
 
                /* set GPIO2 back to LOW */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                                 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                              MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
        }
        return 0;
 }
@@ -7846,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
 
                /* Set fault module detected LED on */
                bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                 MISC_REGISTERS_GPIO_HIGH,
-                                 port);
+                              MISC_REGISTERS_GPIO_HIGH,
+                              port);
        }
 
        return 0;
 }
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+                                        u8 *io_gpio, u8 *io_port)
+{
+
+       u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+                                         offsetof(struct shmem_region,
+                               dev_info.port_hw_config[PORT_0].default_cfg));
+       switch (phy_gpio_reset) {
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+               *io_gpio = 0;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+               *io_gpio = 1;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+               *io_gpio = 2;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+               *io_gpio = 3;
+               *io_port = 0;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+               *io_gpio = 0;
+               *io_port = 1;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+               *io_gpio = 1;
+               *io_port = 1;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+               *io_gpio = 2;
+               *io_port = 1;
+               break;
+       case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+               *io_gpio = 3;
+               *io_port = 1;
+               break;
+       default:
+               /* Don't override the io_gpio and io_port */
+               break;
+       }
+}
 static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
                                     u32 shmem_base_path[],
                                     u32 shmem2_base_path[], u8 phy_index,
                                     u32 chip_id)
 {
-       s8 port;
+       s8 port, reset_gpio;
        u32 swap_val, swap_override;
        struct bnx2x_phy phy[PORT_MAX];
        struct bnx2x_phy *phy_blk[PORT_MAX];
        s8 port_of_path;
-       swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
-       swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+       swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 
+       reset_gpio = MISC_REGISTERS_GPIO_1;
        port = 1;
 
-       bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+       /*
+        * Retrieve the reset gpio/port which control the reset.
+        * Default is GPIO1, PORT1
+        */
+       bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+                                    (u8 *)&reset_gpio, (u8 *)&port);
 
        /* Calculate the port based on port swap */
        port ^= (swap_val && swap_override);
 
+       /* Initiate PHY reset*/
+       bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+                      port);
+       msleep(1);
+       bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+                      port);
+
        msleep(5);
 
        /* PART1 - Reset both phys */
@@ -7907,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
 
                /* Reset the phy */
                bnx2x_cl45_write(bp, &phy[port],
-                              MDIO_PMA_DEVAD,
-                              MDIO_PMA_REG_CTRL,
-                              1<<15);
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
        }
 
        /* Add delay of 150ms after reset */
@@ -7923,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
        }
        /* PART2 - Download firmware to both phys */
        for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-                if (CHIP_IS_E2(bp))
+               if (CHIP_IS_E2(bp))
                        port_of_path = 0;
                else
                        port_of_path = port;
@@ -7958,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
 
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-               /* GPIO1 affects both ports, so there's need to pull
-               it for single port alone */
+               /*
+                * GPIO1 affects both ports, so there's need to pull
+                * it for single port alone
+                */
                rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
                                                shmem2_base_path,
                                                phy_index, chip_id);
@@ -7969,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
        default:
                DP(NETIF_MSG_LINK,
-                        "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
-                        ext_phy_type);
+                          "ext_phy 0x%x common init not required\n",
+                          ext_phy_type);
                break;
        }
 
+       if (rc != 0)
+               netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+                                     " Port %d\n",
+                        0);
        return rc;
 }
 
@@ -7986,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
        u32 ext_phy_type, ext_phy_config;
        DP(NETIF_MSG_LINK, "Begin common phy init\n");
 
-       if (CHIP_REV_IS_EMUL(bp))
-               return 0;
-
        /* Check if common init was already done */
        phy_ver = REG_RD(bp, shmem_base_path[0] +
                         offsetof(struct shmem_region,
index bedab1a942c419ff81640d93f843a263288f4b10..92f36b6950dc22246b94b0dde53d79c77e8713d0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
  *
  * Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
 #define BNX2X_FLOW_CTRL_BOTH           PORT_FEATURE_FLOW_CONTROL_BOTH
 #define BNX2X_FLOW_CTRL_NONE           PORT_FEATURE_FLOW_CONTROL_NONE
 
-#define SPEED_AUTO_NEG     0
+#define SPEED_AUTO_NEG         0
 #define SPEED_12000            12000
 #define SPEED_12500            12500
 #define SPEED_13000            13000
@@ -44,8 +44,8 @@
 #define SFP_EEPROM_VENDOR_NAME_SIZE            16
 #define SFP_EEPROM_VENDOR_OUI_ADDR             0x25
 #define SFP_EEPROM_VENDOR_OUI_SIZE             3
-#define SFP_EEPROM_PART_NO_ADDR                0x28
-#define SFP_EEPROM_PART_NO_SIZE                16
+#define SFP_EEPROM_PART_NO_ADDR                        0x28
+#define SFP_EEPROM_PART_NO_SIZE                        16
 #define PWR_FLT_ERR_MSG_LEN                    250
 
 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
 #define SINGLE_MEDIA(params)           (params->num_phys == 2)
 /* Dual Media board contains two external phy with different media */
 #define DUAL_MEDIA(params)             (params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET              16
 #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
        (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
 
@@ -201,12 +201,14 @@ struct link_params {
 
        /* Default / User Configuration */
        u8 loopback_mode;
-#define LOOPBACK_NONE  0
-#define LOOPBACK_EMAC  1
-#define LOOPBACK_BMAC  2
+#define LOOPBACK_NONE          0
+#define LOOPBACK_EMAC          1
+#define LOOPBACK_BMAC          2
 #define LOOPBACK_XGXS          3
 #define LOOPBACK_EXT_PHY       4
-#define LOOPBACK_EXT   5
+#define LOOPBACK_EXT           5
+#define LOOPBACK_UMAC          6
+#define LOOPBACK_XMAC          7
 
        /* Device parameters */
        u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
        /* Phy register parameter */
        u32 chip_id;
 
+       /* features */
        u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_PFC_ENABLED             (1<<1)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY        (1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED    (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED                     (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY                (1<<2)
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY       (1<<3)
        /* Will be populated during common init */
        struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
 /* Reset the external of SFX7101 */
 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
 
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+                               struct link_params *params, u16 addr,
+                               u8 byte_cnt, u8 *o_buf);
+
 void bnx2x_hw_reset_phy(struct link_params *params);
 
 /* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
 
 /* Used to configure the ETS to BW limited */
 void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
-                                               const u32 cos1_bw);
+                       const u32 cos1_bw);
 
 /* Used to configure the ETS to strict */
 u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
index aa032339e321b48889ae521fb198c6a590b92c58..bba21d5f708b6a3f14c726539605ee11c502d73c 100644 (file)
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
           bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
        /* lock the dmae channel */
-       mutex_lock(&bp->dmae_mutex);
+       spin_lock_bh(&bp->dmae_lock);
 
        /* reset completion */
        *wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
           bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
 unlock:
-       mutex_unlock(&bp->dmae_mutex);
+       spin_unlock_bh(&bp->dmae_lock);
        return rc;
 }
 
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
        }
 
        smp_mb__before_atomic_inc();
-       atomic_inc(&bp->spq_left);
+       atomic_inc(&bp->cq_spq_left);
        /* push the change in fp->state and towards the memory */
        smp_wmb();
 
@@ -2484,8 +2484,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
        rxq_init->sge_map = fp->rx_sge_mapping;
        rxq_init->rcq_map = fp->rx_comp_mapping;
        rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
-       rxq_init->mtu = bp->dev->mtu;
-       rxq_init->buf_sz = bp->rx_buf_size;
+
+       /* Always use mini-jumbo MTU for FCoE L2 ring */
+       if (IS_FCOE_FP(fp))
+               rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+       else
+               rxq_init->mtu = bp->dev->mtu;
+
+       rxq_init->buf_sz = fp->rx_buf_size;
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->cl_id = fp->cl_id;
        rxq_init->spcl_id = fp->cl_id;
@@ -2737,11 +2743,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 
        spin_lock_bh(&bp->spq_lock);
 
-       if (!atomic_read(&bp->spq_left)) {
-               BNX2X_ERR("BUG! SPQ ring full!\n");
-               spin_unlock_bh(&bp->spq_lock);
-               bnx2x_panic();
-               return -EBUSY;
+       if (common) {
+               if (!atomic_read(&bp->eq_spq_left)) {
+                       BNX2X_ERR("BUG! EQ ring full!\n");
+                       spin_unlock_bh(&bp->spq_lock);
+                       bnx2x_panic();
+                       return -EBUSY;
+               }
+       } else if (!atomic_read(&bp->cq_spq_left)) {
+                       BNX2X_ERR("BUG! SPQ ring full!\n");
+                       spin_unlock_bh(&bp->spq_lock);
+                       bnx2x_panic();
+                       return -EBUSY;
        }
 
        spe = bnx2x_sp_get_next(bp);
@@ -2772,20 +2785,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
        spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
 
        /* stats ramrod has it's own slot on the spq */
-       if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+       if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
                /* It's ok if the actual decrement is issued towards the memory
                 * somewhere between the spin_lock and spin_unlock. Thus no
                 * more explict memory barrier is needed.
                 */
-               atomic_dec(&bp->spq_left);
+               if (common)
+                       atomic_dec(&bp->eq_spq_left);
+               else
+                       atomic_dec(&bp->cq_spq_left);
+       }
+
 
        DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
           "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
-          "type(0x%x) left %x\n",
+          "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
           bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
           (u32)(U64_LO(bp->spq_mapping) +
           (void *)bp->spq_prod_bd - (void *)bp->spq), command,
-          HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
+          HW_CID(bp, cid), data_hi, data_lo, type,
+          atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
 
        bnx2x_sp_prod_update(bp);
        spin_unlock_bh(&bp->spq_lock);
@@ -3697,8 +3716,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
        sw_cons = bp->eq_cons;
        sw_prod = bp->eq_prod;
 
-       DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
-                       hw_cons, sw_cons, atomic_read(&bp->spq_left));
+       DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->cq_spq_left %u\n",
+                       hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
 
        for (; sw_cons != hw_cons;
              sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3763,13 +3782,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
                case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
                        DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
-                       bp->set_mac_pending = 0;
+                       if (elem->message.data.set_mac_event.echo)
+                               bp->set_mac_pending = 0;
                        break;
 
                case (EVENT_RING_OPCODE_SET_MAC |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
                        DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
-                       bp->set_mac_pending = 0;
+                       if (elem->message.data.set_mac_event.echo)
+                               bp->set_mac_pending = 0;
                        break;
                default:
                        /* unknown event log error and continue */
@@ -3781,7 +3802,7 @@ next_spqe:
        } /* for */
 
        smp_mb__before_atomic_inc();
-       atomic_add(spqe_cnt, &bp->spq_left);
+       atomic_add(spqe_cnt, &bp->eq_spq_left);
 
        bp->eq_cons = sw_cons;
        bp->eq_prod = sw_prod;
@@ -4214,7 +4235,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
 static void bnx2x_init_sp_ring(struct bnx2x *bp)
 {
        spin_lock_init(&bp->spq_lock);
-       atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
+       atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
 
        bp->spq_prod_idx = 0;
        bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4239,9 +4260,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
        bp->eq_cons = 0;
        bp->eq_prod = NUM_EQ_DESC;
        bp->eq_cons_sb = BNX2X_EQ_INDEX;
+       /* we want a warning message before it gets rought... */
+       atomic_set(&bp->eq_spq_left,
+               min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 }
 
-static void bnx2x_init_ind_table(struct bnx2x *bp)
+void bnx2x_push_indir_table(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
        int i;
@@ -4249,13 +4273,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
        if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
                return;
 
-       DP(NETIF_MSG_IFUP,
-          "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
        for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
                REG_WR8(bp, BAR_TSTRORM_INTMEM +
                        TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-                       bp->fp->cl_id + (i % (bp->num_queues -
-                               NONE_ETH_CONTEXT_USE)));
+                       bp->fp->cl_id + bp->rx_indir_table[i]);
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+       int i;
+
+       for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+               bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+
+       bnx2x_push_indir_table(bp);
 }
 
 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -5851,7 +5882,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
           BP_ABS_FUNC(bp), load_code);
 
        bp->dmae_ready = 0;
-       mutex_init(&bp->dmae_mutex);
+       spin_lock_init(&bp->dmae_lock);
        rc = bnx2x_gunzip_init(bp);
        if (rc)
                return rc;
@@ -6003,6 +6034,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
        BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 
+       BNX2X_FREE(bp->rx_indir_table);
+
 #undef BNX2X_PCI_FREE
 #undef BNX2X_KFREE
 }
@@ -6133,6 +6166,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
        /* EQ */
        BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+       BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
+                   TSTORM_INDIRECTION_TABLE_SIZE);
        return 0;
 
 alloc_mem_err:
@@ -6186,12 +6222,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
        int ramrod_flags = WAIT_RAMROD_COMMON;
 
        bp->set_mac_pending = 1;
-       smp_wmb();
 
        config->hdr.length = 1;
        config->hdr.offset = cam_offset;
        config->hdr.client_id = 0xff;
-       config->hdr.reserved1 = 0;
+       /* Mark the single MAC configuration ramrod as opposed to a
+        * UC/MC list configuration).
+        */
+       config->hdr.echo = 1;
 
        /* primary MAC */
        config->config_table[0].msb_mac_addr =
@@ -6223,6 +6261,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
           config->config_table[0].middle_mac_addr,
           config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
 
+       mb();
+
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
                      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6287,20 +6327,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
        if (CHIP_IS_E1H(bp))
                return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
        else if (CHIP_MODE_IS_4_PORT(bp))
-               return BP_FUNC(bp) * 32  + rel_offset;
+               return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
        else
-               return BP_VN(bp) * 32  + rel_offset;
+               return E2_FUNC_MAX * rel_offset + BP_VN(bp);
 }
 
 /**
  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
  *  relevant. In addition, current implementation is tuned for a
  *  single ETH MAC.
- *
- *  When multiple unicast ETH MACs PF configuration in switch
- *  independent mode is required (NetQ, multiple netdev MACs,
- *  etc.), consider better utilisation of 16 per function MAC
- *  entries in the LLH memory.
  */
 enum {
        LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6375,14 +6410,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
                bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
        }
 }
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+{
+       return CHIP_REV_IS_SLOW(bp) ?
+               (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
+               (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+}
+
+/* set mc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of a very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
 {
        int i = 0, old;
        struct net_device *dev = bp->dev;
+       u8 offset = bnx2x_e1_cam_mc_offset(bp);
        struct netdev_hw_addr *ha;
        struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
        dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
 
+       if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
+               return -EINVAL;
+
        netdev_for_each_mc_addr(ha, dev) {
                /* copy mac */
                config_cmd->config_table[i].msb_mac_addr =
@@ -6423,32 +6481,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
                }
        }
 
+       wmb();
+
        config_cmd->hdr.length = i;
        config_cmd->hdr.offset = offset;
        config_cmd->hdr.client_id = 0xff;
-       config_cmd->hdr.reserved1 = 0;
+       /* Mark that this ramrod doesn't use bp->set_mac_pending for
+        * synchronization.
+        */
+       config_cmd->hdr.echo = 0;
 
-       bp->set_mac_pending = 1;
-       smp_wmb();
+       mb();
 
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 }
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
+
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
 {
        int i;
        struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
        dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
        int ramrod_flags = WAIT_RAMROD_COMMON;
+       u8 offset = bnx2x_e1_cam_mc_offset(bp);
 
-       bp->set_mac_pending = 1;
-       smp_wmb();
-
-       for (i = 0; i < config_cmd->hdr.length; i++)
+       for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
                SET_FLAG(config_cmd->config_table[i].flags,
                        MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
                        T_ETH_MAC_COMMAND_INVALIDATE);
 
+       wmb();
+
+       config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
+       config_cmd->hdr.offset = offset;
+       config_cmd->hdr.client_id = 0xff;
+       /* We'll wait for a completion this time... */
+       config_cmd->hdr.echo = 1;
+
+       bp->set_mac_pending = 1;
+
+       mb();
+
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 
@@ -6458,6 +6531,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
 
 }
 
+/* Accept one or more multicasts */
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+{
+       struct net_device *dev = bp->dev;
+       struct netdev_hw_addr *ha;
+       u32 mc_filter[MC_HASH_SIZE];
+       u32 crc, bit, regidx;
+       int i;
+
+       memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+       netdev_for_each_mc_addr(ha, dev) {
+               DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+                  bnx2x_mc_addr(ha));
+
+               crc = crc32c_le(0, bnx2x_mc_addr(ha),
+                               ETH_ALEN);
+               bit = (crc >> 24) & 0xff;
+               regidx = bit >> 5;
+               bit &= 0x1f;
+               mc_filter[regidx] |= (1 << bit);
+       }
+
+       for (i = 0; i < MC_HASH_SIZE; i++)
+               REG_WR(bp, MC_HASH_OFFSET(bp, i),
+                      mc_filter[i]);
+
+       return 0;
+}
+
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+{
+       int i;
+
+       for (i = 0; i < MC_HASH_SIZE; i++)
+               REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+}
+
 #ifdef BCM_CNIC
 /**
  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6476,12 +6587,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
        u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
                BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+       u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
 
        /* Send a SET_MAC ramrod */
-       bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+       bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
                               cam_offset, 0);
 
-       bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+       bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
 
        return 0;
 }
@@ -7123,20 +7235,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
        /* Give HW time to discard old tx messages */
        msleep(1);
 
-       if (CHIP_IS_E1(bp)) {
-               /* invalidate mc list,
-                * wait and poll (interrupts are off)
-                */
-               bnx2x_invlidate_e1_mc_list(bp);
-               bnx2x_set_eth_mac(bp, 0);
-
-       } else {
-               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+       bnx2x_set_eth_mac(bp, 0);
 
-               bnx2x_set_eth_mac(bp, 0);
+       bnx2x_invalidate_uc_list(bp);
 
-               for (i = 0; i < MC_HASH_SIZE; i++)
-                       REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+       if (CHIP_IS_E1(bp))
+               bnx2x_invalidate_e1_mc_list(bp);
+       else {
+               bnx2x_invalidate_e1h_mc_list(bp);
+               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
        }
 
 #ifdef BCM_CNIC
@@ -8405,11 +8512,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                                                        bp->common.shmem2_base);
 }
 
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+       u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+       /* Get the number of maximum allowed iSCSI and FCoE connections */
+       bp->cnic_eth_dev.max_iscsi_conn =
+               (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+               BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+       bp->cnic_eth_dev.max_fcoe_conn =
+               (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+               BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+       BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+                      bp->cnic_eth_dev.max_iscsi_conn,
+                      bp->cnic_eth_dev.max_fcoe_conn);
+
+       /* If mamimum allowed number of connections is zero -
+        * disable the feature.
+        */
+       if (!bp->cnic_eth_dev.max_iscsi_conn)
+               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+       if (!bp->cnic_eth_dev.max_fcoe_conn)
+               bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
        u32 val, val2;
        int func = BP_ABS_FUNC(bp);
        int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+       u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+       u8 *fip_mac = bp->fip_mac;
+#endif
 
        if (BP_NOMCP(bp)) {
                BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8422,7 +8565,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-               /* iSCSI NPAR MAC */
+               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+                * FCoE MAC then the appropriate feature should be disabled.
+                */
                if (IS_MF_SI(bp)) {
                        u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
                        if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8430,8 +8575,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                                     iscsi_mac_addr_upper);
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    iscsi_mac_addr_lower);
-                               bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
-                       }
+                               BNX2X_DEV_INFO("Read iSCSI MAC: "
+                                              "0x%x:0x%04x\n", val2, val);
+                               bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+                               /* Disable iSCSI OOO if MAC configuration is
+                                * invalid.
+                                */
+                               if (!is_valid_ether_addr(iscsi_mac)) {
+                                       bp->flags |= NO_ISCSI_OOO_FLAG |
+                                                    NO_ISCSI_FLAG;
+                                       memset(iscsi_mac, 0, ETH_ALEN);
+                               }
+                       } else
+                               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+                       if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+                               val2 = MF_CFG_RD(bp, func_ext_config[func].
+                                                    fcoe_mac_addr_upper);
+                               val = MF_CFG_RD(bp, func_ext_config[func].
+                                                   fcoe_mac_addr_lower);
+                               BNX2X_DEV_INFO("Read FCoE MAC to "
+                                              "0x%x:0x%04x\n", val2, val);
+                               bnx2x_set_mac_buf(fip_mac, val, val2);
+
+                               /* Disable FCoE if MAC configuration is
+                                * invalid.
+                                */
+                               if (!is_valid_ether_addr(fip_mac)) {
+                                       bp->flags |= NO_FCOE_FLAG;
+                                       memset(bp->fip_mac, 0, ETH_ALEN);
+                               }
+                       } else
+                               bp->flags |= NO_FCOE_FLAG;
                }
 #endif
        } else {
@@ -8445,7 +8621,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                    iscsi_mac_upper);
                val = SHMEM_RD(bp, dev_info.port_hw_config[port].
                                   iscsi_mac_lower);
-               bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+               bnx2x_set_mac_buf(iscsi_mac, val, val2);
 #endif
        }
 
@@ -8453,14 +8629,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
 #ifdef BCM_CNIC
-       /* Inform the upper layers about FCoE MAC */
+       /* Set the FCoE MAC in modes other then MF_SI */
        if (!CHIP_IS_E1x(bp)) {
                if (IS_MF_SD(bp))
-                       memcpy(bp->fip_mac, bp->dev->dev_addr,
-                              sizeof(bp->fip_mac));
-               else
-                       memcpy(bp->fip_mac, bp->iscsi_mac,
-                              sizeof(bp->fip_mac));
+                       memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+               else if (!IS_MF(bp))
+                       memcpy(fip_mac, iscsi_mac, ETH_ALEN);
        }
 #endif
 }
@@ -8623,6 +8797,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
+#ifdef BCM_CNIC
+       bnx2x_get_cnic_info(bp);
+#endif
+
        return rc;
 }
 
@@ -8837,12 +9015,197 @@ static int bnx2x_close(struct net_device *dev)
        return 0;
 }
 
+#define E1_MAX_UC_LIST 29
+#define E1H_MAX_UC_LIST        30
+#define E2_MAX_UC_LIST 14
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+{
+       if (CHIP_IS_E1(bp))
+               return E1_MAX_UC_LIST;
+       else if (CHIP_IS_E1H(bp))
+               return E1H_MAX_UC_LIST;
+       else
+               return E2_MAX_UC_LIST;
+}
+
+
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
+{
+       if (CHIP_IS_E1(bp))
+               /* CAM Entries for Port0:
+                *      0 - prim ETH MAC
+                *      1 - BCAST MAC
+                *      2 - iSCSI L2 ring ETH MAC
+                *      3-31 - UC MACs
+                *
+                * Port1 entries are allocated the same way starting from
+                * entry 32.
+                */
+               return 3 + 32 * BP_PORT(bp);
+       else if (CHIP_IS_E1H(bp)) {
+               /* CAM Entries:
+                *      0-7  - prim ETH MAC for each function
+                *      8-15 - iSCSI L2 ring ETH MAC for each function
+                *      16 till 255 UC MAC lists for each function
+                *
+                * Remark: There is no FCoE support for E1H, thus FCoE related
+                *         MACs are not considered.
+                */
+               return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
+                       bnx2x_max_uc_list(bp) * BP_FUNC(bp);
+       } else {
+               /* CAM Entries (there is a separate CAM per engine):
+                *      0-4  - prim ETH MAC for each function
+                *      4-7 - iSCSI L2 ring ETH MAC for each function
+                *      8-11 - FIP ucast L2 MAC for each function
+                *      12-15 - ALL_ENODE_MACS mcast MAC for each function
+                *      16 till 71 UC MAC lists for each function
+                */
+               u8 func_idx =
+                       (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+
+               return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
+                       bnx2x_max_uc_list(bp) * func_idx;
+       }
+}
+
+/* set uc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+       int i = 0, old;
+       struct net_device *dev = bp->dev;
+       u8 offset = bnx2x_uc_list_cam_offset(bp);
+       struct netdev_hw_addr *ha;
+       struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+       dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+
+       if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
+               return -EINVAL;
+
+       netdev_for_each_uc_addr(ha, dev) {
+               /* copy mac */
+               config_cmd->config_table[i].msb_mac_addr =
+                       swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
+               config_cmd->config_table[i].middle_mac_addr =
+                       swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
+               config_cmd->config_table[i].lsb_mac_addr =
+                       swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
+
+               config_cmd->config_table[i].vlan_id = 0;
+               config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+               config_cmd->config_table[i].clients_bit_vector =
+                       cpu_to_le32(1 << BP_L_ID(bp));
+
+               SET_FLAG(config_cmd->config_table[i].flags,
+                       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+                       T_ETH_MAC_COMMAND_SET);
+
+               DP(NETIF_MSG_IFUP,
+                  "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
+                  config_cmd->config_table[i].msb_mac_addr,
+                  config_cmd->config_table[i].middle_mac_addr,
+                  config_cmd->config_table[i].lsb_mac_addr);
+
+               i++;
+
+               /* Set uc MAC in NIG */
+               bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
+                                    LLH_CAM_ETH_LINE + i);
+       }
+       old = config_cmd->hdr.length;
+       if (old > i) {
+               for (; i < old; i++) {
+                       if (CAM_IS_INVALID(config_cmd->
+                                          config_table[i])) {
+                               /* already invalidated */
+                               break;
+                       }
+                       /* invalidate */
+                       SET_FLAG(config_cmd->config_table[i].flags,
+                               MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+                               T_ETH_MAC_COMMAND_INVALIDATE);
+               }
+       }
+
+       wmb();
+
+       config_cmd->hdr.length = i;
+       config_cmd->hdr.offset = offset;
+       config_cmd->hdr.client_id = 0xff;
+       /* Mark that this ramrod doesn't use bp->set_mac_pending for
+        * synchronization.
+        */
+       config_cmd->hdr.echo = 0;
+
+       mb();
+
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+                  U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+}
+
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+{
+       int i;
+       struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+       dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+       int ramrod_flags = WAIT_RAMROD_COMMON;
+       u8 offset = bnx2x_uc_list_cam_offset(bp);
+       u8 max_list_size = bnx2x_max_uc_list(bp);
+
+       for (i = 0; i < max_list_size; i++) {
+               SET_FLAG(config_cmd->config_table[i].flags,
+                       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+                       T_ETH_MAC_COMMAND_INVALIDATE);
+               bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
+       }
+
+       wmb();
+
+       config_cmd->hdr.length = max_list_size;
+       config_cmd->hdr.offset = offset;
+       config_cmd->hdr.client_id = 0xff;
+       /* We'll wait for a completion this time... */
+       config_cmd->hdr.echo = 1;
+
+       bp->set_mac_pending = 1;
+
+       mb();
+
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+                     U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+       /* Wait for a completion */
+       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+                               ramrod_flags);
+
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+       /* some multicasts */
+       if (CHIP_IS_E1(bp)) {
+               return bnx2x_set_e1_mc_list(bp);
+       } else { /* E1H and newer */
+               return bnx2x_set_e1h_mc_list(bp);
+       }
+}
+
 /* called with netif_tx_lock from dev_mcast.c */
 void bnx2x_set_rx_mode(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 rx_mode = BNX2X_RX_MODE_NORMAL;
-       int port = BP_PORT(bp);
 
        if (bp->state != BNX2X_STATE_OPEN) {
                DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8853,47 +9216,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
 
        if (dev->flags & IFF_PROMISC)
                rx_mode = BNX2X_RX_MODE_PROMISC;
-       else if ((dev->flags & IFF_ALLMULTI) ||
-                ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
-                 CHIP_IS_E1(bp)))
+       else if (dev->flags & IFF_ALLMULTI)
                rx_mode = BNX2X_RX_MODE_ALLMULTI;
-       else { /* some multicasts */
-               if (CHIP_IS_E1(bp)) {
-                       /*
-                        * set mc list, do not wait as wait implies sleep
-                        * and set_rx_mode can be invoked from non-sleepable
-                        * context
-                        */
-                       u8 offset = (CHIP_REV_IS_SLOW(bp) ?
-                                    BNX2X_MAX_EMUL_MULTI*(1 + port) :
-                                    BNX2X_MAX_MULTICAST*(1 + port));
-
-                       bnx2x_set_e1_mc_list(bp, offset);
-               } else { /* E1H */
-                       /* Accept one or more multicasts */
-                       struct netdev_hw_addr *ha;
-                       u32 mc_filter[MC_HASH_SIZE];
-                       u32 crc, bit, regidx;
-                       int i;
-
-                       memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
-                       netdev_for_each_mc_addr(ha, dev) {
-                               DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
-                                  bnx2x_mc_addr(ha));
-
-                               crc = crc32c_le(0, bnx2x_mc_addr(ha),
-                                               ETH_ALEN);
-                               bit = (crc >> 24) & 0xff;
-                               regidx = bit >> 5;
-                               bit &= 0x1f;
-                               mc_filter[regidx] |= (1 << bit);
-                       }
+       else {
+               /* some multicasts */
+               if (bnx2x_set_mc_list(bp))
+                       rx_mode = BNX2X_RX_MODE_ALLMULTI;
 
-                       for (i = 0; i < MC_HASH_SIZE; i++)
-                               REG_WR(bp, MC_HASH_OFFSET(bp, i),
-                                      mc_filter[i]);
-               }
+               /* some unicasts */
+               if (bnx2x_set_uc_list(bp))
+                       rx_mode = BNX2X_RX_MODE_PROMISC;
        }
 
        bp->rx_mode = rx_mode;
@@ -8974,7 +9306,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_stop               = bnx2x_close,
        .ndo_start_xmit         = bnx2x_start_xmit,
        .ndo_select_queue       = bnx2x_select_queue,
-       .ndo_set_multicast_list = bnx2x_set_rx_mode,
+       .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
@@ -9120,7 +9452,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
        dev->vlan_features |= NETIF_F_TSO6;
 
-#ifdef BCM_DCB
+#ifdef BCM_DCBNL
        dev->dcbnl_ops = &bnx2x_dcbnl_ops;
 #endif
 
@@ -9527,6 +9859,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
        }
 #endif
 
+#ifdef BCM_DCBNL
+       /* Delete app tlvs from dcbnl */
+       bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
        unregister_netdev(dev);
 
        /* Delete all NAPI objects */
@@ -9800,15 +10137,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
                                        HW_CID(bp, BNX2X_ISCSI_ETH_CID));
                }
 
-               /* There may be not more than 8 L2 and COMMON SPEs and not more
-                * than 8 L5 SPEs in the air.
+               /* There may be not more than 8 L2 and not more than 8 L5 SPEs
+                * We also check that the number of outstanding
+                * COMMON ramrods is not more than the EQ and SPQ can
+                * accommodate.
                 */
-               if ((type == NONE_CONNECTION_TYPE) ||
-                   (type == ETH_CONNECTION_TYPE)) {
-                       if (!atomic_read(&bp->spq_left))
+               if (type == ETH_CONNECTION_TYPE) {
+                       if (!atomic_read(&bp->cq_spq_left))
                                break;
                        else
-                               atomic_dec(&bp->spq_left);
+                               atomic_dec(&bp->cq_spq_left);
+               } else if (type == NONE_CONNECTION_TYPE) {
+                       if (!atomic_read(&bp->eq_spq_left))
+                               break;
+                       else
+                               atomic_dec(&bp->eq_spq_left);
                } else if ((type == ISCSI_CONNECTION_TYPE) ||
                           (type == FCOE_CONNECTION_TYPE)) {
                        if (bp->cnic_spq_pending >=
@@ -9886,7 +10229,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
        int rc = 0;
 
        mutex_lock(&bp->cnic_mutex);
-       c_ops = bp->cnic_ops;
+       c_ops = rcu_dereference_protected(bp->cnic_ops,
+                                         lockdep_is_held(&bp->cnic_mutex));
        if (c_ops)
                rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
        mutex_unlock(&bp->cnic_mutex);
@@ -10000,7 +10344,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                int count = ctl->data.credit.credit_count;
 
                smp_mb__before_atomic_inc();
-               atomic_add(count, &bp->spq_left);
+               atomic_add(count, &bp->cq_spq_left);
                smp_mb__after_atomic_inc();
                break;
        }
@@ -10096,6 +10440,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
+       /* If both iSCSI and FCoE are disabled - return NULL in
+        * order to indicate CNIC that it should not try to work
+        * with this device.
+        */
+       if (NO_ISCSI(bp) && NO_FCOE(bp))
+               return NULL;
+
        cp->drv_owner = THIS_MODULE;
        cp->chip_id = CHIP_ID(bp);
        cp->pdev = bp->pdev;
@@ -10116,6 +10467,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
                BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
 
+       if (NO_ISCSI_OOO(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+       if (NO_ISCSI(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+       if (NO_FCOE(bp))
+               cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
        DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
                         "starting cid %d\n",
           cp->ctx_blk_size,
index e01330bb36c754966543adde30179fae57e75b54..1c89f19a4425b95786f77a2104c7ef6216ccb2f7 100644 (file)
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_PMA_REG_8727_PCS_OPT_CTRL         0xc808
 #define MDIO_PMA_REG_8727_GPIO_CTRL            0xc80e
 #define MDIO_PMA_REG_8727_PCS_GP               0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG          0xc8e4
 
 #define MDIO_AN_REG_8727_MISC_CTRL             0x8309
 
index 0e2737eac8b7e3d7360fd392020d8c9b3acefa52..3c5c014e82b228c77d99ec34caaeca8aa80ca9b5 100644 (file)
@@ -6,6 +6,9 @@ obj-$(CONFIG_BONDING) += bonding.o
 
 bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
 
+proc-$(CONFIG_PROC_FS) += bond_procfs.o
+bonding-objs += $(proc-y)
+
 ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
 bonding-objs += $(ipv6-y)
 
index 5c6fba802f2b759561ee6c9c0c44bd3ea761b2c6..9bc5de3e04a8375894e040cd3cb794a0fff5f90c 100644 (file)
@@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
 
        _lock_rx_hashtbl(bond);
 
-       hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
+       hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
        client_info = &(bond_info->rx_hashtbl[hash_index]);
 
        if (client_info->assigned) {
index 163e0b06eaa5d1aba0dda6aef0765016020e2b1a..68a5ce0a649f58f63f7e057eca4d8615133d44ec 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
-#include <linux/netpoll.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
 #include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
 #include <linux/smp.h>
 #include <linux/if_ether.h>
 #include <net/arp.h>
@@ -174,9 +171,6 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
 #endif
 
-static const char * const version =
-       DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
-
 int bond_net_id __read_mostly;
 
 static __be32 arp_target[BOND_MAX_ARP_TARGETS];
@@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev);
 
 /*---------------------------- General routines -----------------------------*/
 
-static const char *bond_mode_name(int mode)
+const char *bond_mode_name(int mode)
 {
        static const char *names[] = {
                [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
@@ -424,15 +418,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
        skb->priority = 1;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
-               struct netpoll *np = bond->dev->npinfo->netpoll;
-               slave_dev->npinfo = bond->dev->npinfo;
-               slave_dev->priv_flags |= IFF_IN_NETPOLL;
-               netpoll_send_skb_on_dev(np, skb, slave_dev);
-               slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
-       } else
-#endif
+       if (unlikely(netpoll_tx_running(slave_dev)))
+               bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+       else
                dev_queue_xmit(skb);
 
        return 0;
@@ -1288,63 +1276,105 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * You must hold read lock on bond->lock before calling this.
- */
-static bool slaves_support_netpoll(struct net_device *bond_dev)
+static inline int slave_enable_netpoll(struct slave *slave)
 {
-       struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
-       int i = 0;
-       bool ret = true;
+       struct netpoll *np;
+       int err = 0;
 
-       bond_for_each_slave(bond, slave, i) {
-               if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
-                   !slave->dev->netdev_ops->ndo_poll_controller)
-                       ret = false;
+       np = kzalloc(sizeof(*np), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!np)
+               goto out;
+
+       np->dev = slave->dev;
+       err = __netpoll_setup(np);
+       if (err) {
+               kfree(np);
+               goto out;
        }
-       return i != 0 && ret;
+       slave->np = np;
+out:
+       return err;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+       struct netpoll *np = slave->np;
+
+       if (!np)
+               return;
+
+       slave->np = NULL;
+       synchronize_rcu_bh();
+       __netpoll_cleanup(np);
+       kfree(np);
+}
+static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
+{
+       if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
+               return false;
+       if (!slave_dev->netdev_ops->ndo_poll_controller)
+               return false;
+       return true;
 }
 
 static void bond_poll_controller(struct net_device *bond_dev)
 {
-       struct bonding *bond = netdev_priv(bond_dev);
+}
+
+static void __bond_netpoll_cleanup(struct bonding *bond)
+{
        struct slave *slave;
        int i;
 
-       bond_for_each_slave(bond, slave, i) {
-               if (slave->dev && IS_UP(slave->dev))
-                       netpoll_poll_dev(slave->dev);
-       }
+       bond_for_each_slave(bond, slave, i)
+               if (IS_UP(slave->dev))
+                       slave_disable_netpoll(slave);
 }
-
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+
+       read_lock(&bond->lock);
+       __bond_netpoll_cleanup(bond);
+       read_unlock(&bond->lock);
+}
+
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+{
+       struct bonding *bond = netdev_priv(dev);
        struct slave *slave;
-       const struct net_device_ops *ops;
-       int i;
+       int i, err = 0;
 
        read_lock(&bond->lock);
-       bond_dev->npinfo = NULL;
        bond_for_each_slave(bond, slave, i) {
-               if (slave->dev) {
-                       ops = slave->dev->netdev_ops;
-                       if (ops->ndo_netpoll_cleanup)
-                               ops->ndo_netpoll_cleanup(slave->dev);
-                       else
-                               slave->dev->npinfo = NULL;
+               if (!IS_UP(slave->dev))
+                       continue;
+               err = slave_enable_netpoll(slave);
+               if (err) {
+                       __bond_netpoll_cleanup(bond);
+                       break;
                }
        }
        read_unlock(&bond->lock);
+       return err;
 }
 
-#else
+static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
+{
+       return bond->dev->npinfo;
+}
 
+#else
+static inline int slave_enable_netpoll(struct slave *slave)
+{
+       return 0;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+}
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
 }
-
 #endif
 
 /*---------------------------------- IOCTL ----------------------------------*/
@@ -1372,8 +1402,8 @@ static int bond_compute_features(struct bonding *bond)
 {
        struct slave *slave;
        struct net_device *bond_dev = bond->dev;
-       unsigned long features = bond_dev->features;
-       unsigned long vlan_features = 0;
+       u32 features = bond_dev->features;
+       u32 vlan_features = 0;
        unsigned short max_hard_header_len = max((u16)ETH_HLEN,
                                                bond_dev->hard_header_len);
        int i;
@@ -1400,8 +1430,8 @@ static int bond_compute_features(struct bonding *bond)
 
 done:
        features |= (bond_dev->features & BOND_VLAN_FEATURES);
-       bond_dev->features = netdev_fix_features(features, NULL);
-       bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+       bond_dev->features = netdev_fix_features(bond_dev, features);
+       bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
        bond_dev->hard_header_len = max_hard_header_len;
 
        return 0;
@@ -1423,6 +1453,71 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
        bond->setup_by_slave = 1;
 }
 
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
+ */
+static bool bond_should_deliver_exact_match(struct sk_buff *skb,
+                                           struct net_device *slave_dev,
+                                           struct net_device *bond_dev)
+{
+       if (slave_dev->priv_flags & IFF_SLAVE_INACTIVE) {
+               if (slave_dev->priv_flags & IFF_SLAVE_NEEDARP &&
+                   skb->protocol == __cpu_to_be16(ETH_P_ARP))
+                       return false;
+
+               if (bond_dev->priv_flags & IFF_MASTER_ALB &&
+                   skb->pkt_type != PACKET_BROADCAST &&
+                   skb->pkt_type != PACKET_MULTICAST)
+                               return false;
+
+               if (bond_dev->priv_flags & IFF_MASTER_8023AD &&
+                   skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+                       return false;
+
+               return true;
+       }
+       return false;
+}
+
+static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
+{
+       struct net_device *slave_dev;
+       struct net_device *bond_dev;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               return NULL;
+       slave_dev = skb->dev;
+       bond_dev = ACCESS_ONCE(slave_dev->master);
+       if (unlikely(!bond_dev))
+               return skb;
+
+       if (bond_dev->priv_flags & IFF_MASTER_ARPMON)
+               slave_dev->last_rx = jiffies;
+
+       if (bond_should_deliver_exact_match(skb, slave_dev, bond_dev)) {
+               skb->deliver_no_wcard = 1;
+               return skb;
+       }
+
+       skb->dev = bond_dev;
+
+       if (bond_dev->priv_flags & IFF_MASTER_ALB &&
+           bond_dev->priv_flags & IFF_BRIDGE_PORT &&
+           skb->pkt_type == PACKET_HOST) {
+
+               if (unlikely(skb_cow_head(skb,
+                                         skb->data - skb_mac_header(skb)))) {
+                       kfree_skb(skb);
+                       return NULL;
+               }
+               memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
+       }
+
+       return skb;
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
@@ -1594,16 +1689,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       res = netdev_set_master(slave_dev, bond_dev);
+       res = netdev_set_bond_master(slave_dev, bond_dev);
        if (res) {
-               pr_debug("Error %d calling netdev_set_master\n", res);
+               pr_debug("Error %d calling netdev_set_bond_master\n", res);
                goto err_restore_mac;
        }
+       res = netdev_rx_handler_register(slave_dev, bond_handle_frame, NULL);
+       if (res) {
+               pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+               goto err_unset_master;
+       }
+
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
                pr_debug("Opening slave %s failed\n", slave_dev->name);
-               goto err_unset_master;
+               goto err_unreg_rxhandler;
        }
 
        new_slave->dev = slave_dev;
@@ -1782,17 +1883,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        bond_set_carrier(bond);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       if (slaves_support_netpoll(bond_dev)) {
-               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-               if (bond_dev->npinfo)
-                       slave_dev->npinfo = bond_dev->npinfo;
-       } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
-               bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-               pr_info("New slave device %s does not support netpoll\n",
-                       slave_dev->name);
-               pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+       slave_dev->npinfo = bond_netpoll_info(bond);
+       if (slave_dev->npinfo) {
+               if (slave_enable_netpoll(new_slave)) {
+                       read_unlock(&bond->lock);
+                       pr_info("Error, %s: master_dev is using netpoll, "
+                                "but new slave device does not support netpoll.\n",
+                                bond_dev->name);
+                       res = -EBUSY;
+                       goto err_close;
+               }
        }
 #endif
+
        read_unlock(&bond->lock);
 
        res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1811,8 +1914,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 err_close:
        dev_close(slave_dev);
 
+err_unreg_rxhandler:
+       netdev_rx_handler_unregister(slave_dev);
+
 err_unset_master:
-       netdev_set_master(slave_dev, NULL);
+       netdev_set_bond_master(slave_dev, NULL);
 
 err_restore_mac:
        if (!bond->params.fail_over_mac) {
@@ -1992,19 +2098,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
                netif_addr_unlock_bh(bond_dev);
        }
 
-       netdev_set_master(slave_dev, NULL);
+       netdev_rx_handler_unregister(slave_dev);
+       netdev_set_bond_master(slave_dev, NULL);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       read_lock_bh(&bond->lock);
-
-       if (slaves_support_netpoll(bond_dev))
-               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-       read_unlock_bh(&bond->lock);
-       if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
-               slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
-       else
-               slave_dev->npinfo = NULL;
-#endif
+       slave_disable_netpoll(slave);
 
        /* close slave before restoring its mac address */
        dev_close(slave_dev);
@@ -2039,6 +2136,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
 
        ret = bond_release(bond_dev, slave_dev);
        if ((ret == 0) && (bond->slave_cnt == 0)) {
+               bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                pr_info("%s: destroying bond %s.\n",
                        bond_dev->name, bond_dev->name);
                unregister_netdevice(bond_dev);
@@ -2114,7 +2212,10 @@ static int bond_release_all(struct net_device *bond_dev)
                        netif_addr_unlock_bh(bond_dev);
                }
 
-               netdev_set_master(slave_dev, NULL);
+               netdev_rx_handler_unregister(slave_dev);
+               netdev_set_bond_master(slave_dev, NULL);
+
+               slave_disable_netpoll(slave);
 
                /* close slave before restoring its mac address */
                dev_close(slave_dev);
@@ -2571,7 +2672,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-       int i, vlan_id, rv;
+       int i, vlan_id;
        __be32 *targets = bond->params.arp_targets;
        struct vlan_entry *vlan;
        struct net_device *vlan_dev;
@@ -2598,8 +2699,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                fl.fl4_dst = targets[i];
                fl.fl4_tos = RTO_ONLINK;
 
-               rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
-               if (rv) {
+               rt = ip_route_output_key(dev_net(bond->dev), &fl);
+               if (IS_ERR(rt)) {
                        if (net_ratelimit()) {
                                pr_warning("%s: no route to arp_ip_target %pI4\n",
                                           bond->dev->name, &fl.fl4_dst);
@@ -3182,299 +3283,6 @@ out:
        read_unlock(&bond->lock);
 }
 
-/*------------------------------ proc/seq_file-------------------------------*/
-
-#ifdef CONFIG_PROC_FS
-
-static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
-       __acquires(&bond->lock)
-{
-       struct bonding *bond = seq->private;
-       loff_t off = 0;
-       struct slave *slave;
-       int i;
-
-       /* make sure the bond won't be taken away */
-       rcu_read_lock();
-       read_lock(&bond->lock);
-
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       bond_for_each_slave(bond, slave, i) {
-               if (++off == *pos)
-                       return slave;
-       }
-
-       return NULL;
-}
-
-static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       struct bonding *bond = seq->private;
-       struct slave *slave = v;
-
-       ++*pos;
-       if (v == SEQ_START_TOKEN)
-               return bond->first_slave;
-
-       slave = slave->next;
-
-       return (slave == bond->first_slave) ? NULL : slave;
-}
-
-static void bond_info_seq_stop(struct seq_file *seq, void *v)
-       __releases(&bond->lock)
-       __releases(RCU)
-{
-       struct bonding *bond = seq->private;
-
-       read_unlock(&bond->lock);
-       rcu_read_unlock();
-}
-
-static void bond_info_show_master(struct seq_file *seq)
-{
-       struct bonding *bond = seq->private;
-       struct slave *curr;
-       int i;
-
-       read_lock(&bond->curr_slave_lock);
-       curr = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
-       seq_printf(seq, "Bonding Mode: %s",
-                  bond_mode_name(bond->params.mode));
-
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
-           bond->params.fail_over_mac)
-               seq_printf(seq, " (fail_over_mac %s)",
-                  fail_over_mac_tbl[bond->params.fail_over_mac].modename);
-
-       seq_printf(seq, "\n");
-
-       if (bond->params.mode == BOND_MODE_XOR ||
-               bond->params.mode == BOND_MODE_8023AD) {
-               seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
-                       xmit_hashtype_tbl[bond->params.xmit_policy].modename,
-                       bond->params.xmit_policy);
-       }
-
-       if (USES_PRIMARY(bond->params.mode)) {
-               seq_printf(seq, "Primary Slave: %s",
-                          (bond->primary_slave) ?
-                          bond->primary_slave->dev->name : "None");
-               if (bond->primary_slave)
-                       seq_printf(seq, " (primary_reselect %s)",
-                  pri_reselect_tbl[bond->params.primary_reselect].modename);
-
-               seq_printf(seq, "\nCurrently Active Slave: %s\n",
-                          (curr) ? curr->dev->name : "None");
-       }
-
-       seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
-                  "up" : "down");
-       seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
-       seq_printf(seq, "Up Delay (ms): %d\n",
-                  bond->params.updelay * bond->params.miimon);
-       seq_printf(seq, "Down Delay (ms): %d\n",
-                  bond->params.downdelay * bond->params.miimon);
-
-
-       /* ARP information */
-       if (bond->params.arp_interval > 0) {
-               int printed = 0;
-               seq_printf(seq, "ARP Polling Interval (ms): %d\n",
-                               bond->params.arp_interval);
-
-               seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
-
-               for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
-                       if (!bond->params.arp_targets[i])
-                               break;
-                       if (printed)
-                               seq_printf(seq, ",");
-                       seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
-                       printed = 1;
-               }
-               seq_printf(seq, "\n");
-       }
-
-       if (bond->params.mode == BOND_MODE_8023AD) {
-               struct ad_info ad_info;
-
-               seq_puts(seq, "\n802.3ad info\n");
-               seq_printf(seq, "LACP rate: %s\n",
-                          (bond->params.lacp_fast) ? "fast" : "slow");
-               seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
-                          ad_select_tbl[bond->params.ad_select].modename);
-
-               if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
-                       seq_printf(seq, "bond %s has no active aggregator\n",
-                                  bond->dev->name);
-               } else {
-                       seq_printf(seq, "Active Aggregator Info:\n");
-
-                       seq_printf(seq, "\tAggregator ID: %d\n",
-                                  ad_info.aggregator_id);
-                       seq_printf(seq, "\tNumber of ports: %d\n",
-                                  ad_info.ports);
-                       seq_printf(seq, "\tActor Key: %d\n",
-                                  ad_info.actor_key);
-                       seq_printf(seq, "\tPartner Key: %d\n",
-                                  ad_info.partner_key);
-                       seq_printf(seq, "\tPartner Mac Address: %pM\n",
-                                  ad_info.partner_system);
-               }
-       }
-}
-
-static void bond_info_show_slave(struct seq_file *seq,
-                                const struct slave *slave)
-{
-       struct bonding *bond = seq->private;
-
-       seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
-       seq_printf(seq, "MII Status: %s\n",
-                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
-       seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
-       seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
-       seq_printf(seq, "Link Failure Count: %u\n",
-                  slave->link_failure_count);
-
-       seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
-
-       if (bond->params.mode == BOND_MODE_8023AD) {
-               const struct aggregator *agg
-                       = SLAVE_AD_INFO(slave).port.aggregator;
-
-               if (agg)
-                       seq_printf(seq, "Aggregator ID: %d\n",
-                                  agg->aggregator_identifier);
-               else
-                       seq_puts(seq, "Aggregator ID: N/A\n");
-       }
-       seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
-}
-
-static int bond_info_seq_show(struct seq_file *seq, void *v)
-{
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "%s\n", version);
-               bond_info_show_master(seq);
-       } else
-               bond_info_show_slave(seq, v);
-
-       return 0;
-}
-
-static const struct seq_operations bond_info_seq_ops = {
-       .start = bond_info_seq_start,
-       .next  = bond_info_seq_next,
-       .stop  = bond_info_seq_stop,
-       .show  = bond_info_seq_show,
-};
-
-static int bond_info_open(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq;
-       struct proc_dir_entry *proc;
-       int res;
-
-       res = seq_open(file, &bond_info_seq_ops);
-       if (!res) {
-               /* recover the pointer buried in proc_dir_entry data */
-               seq = file->private_data;
-               proc = PDE(inode);
-               seq->private = proc->data;
-       }
-
-       return res;
-}
-
-static const struct file_operations bond_info_fops = {
-       .owner   = THIS_MODULE,
-       .open    = bond_info_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-static void bond_create_proc_entry(struct bonding *bond)
-{
-       struct net_device *bond_dev = bond->dev;
-       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
-
-       if (bn->proc_dir) {
-               bond->proc_entry = proc_create_data(bond_dev->name,
-                                                   S_IRUGO, bn->proc_dir,
-                                                   &bond_info_fops, bond);
-               if (bond->proc_entry == NULL)
-                       pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
-                                  DRV_NAME, bond_dev->name);
-               else
-                       memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
-       }
-}
-
-static void bond_remove_proc_entry(struct bonding *bond)
-{
-       struct net_device *bond_dev = bond->dev;
-       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
-
-       if (bn->proc_dir && bond->proc_entry) {
-               remove_proc_entry(bond->proc_file_name, bn->proc_dir);
-               memset(bond->proc_file_name, 0, IFNAMSIZ);
-               bond->proc_entry = NULL;
-       }
-}
-
-/* Create the bonding directory under /proc/net, if doesn't exist yet.
- * Caller must hold rtnl_lock.
- */
-static void __net_init bond_create_proc_dir(struct bond_net *bn)
-{
-       if (!bn->proc_dir) {
-               bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
-               if (!bn->proc_dir)
-                       pr_warning("Warning: cannot create /proc/net/%s\n",
-                                  DRV_NAME);
-       }
-}
-
-/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
- */
-static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
-{
-       if (bn->proc_dir) {
-               remove_proc_entry(DRV_NAME, bn->net->proc_net);
-               bn->proc_dir = NULL;
-       }
-}
-
-#else /* !CONFIG_PROC_FS */
-
-static void bond_create_proc_entry(struct bonding *bond)
-{
-}
-
-static void bond_remove_proc_entry(struct bonding *bond)
-{
-}
-
-static inline void bond_create_proc_dir(struct bond_net *bn)
-{
-}
-
-static inline void bond_destroy_proc_dir(struct bond_net *bn)
-{
-}
-
-#endif /* CONFIG_PROC_FS */
-
-
 /*-------------------------- netdev event handling --------------------------*/
 
 /*
@@ -4654,9 +4462,12 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
        .ndo_poll_controller    = bond_poll_controller,
 #endif
+       .ndo_add_slave          = bond_enslave,
+       .ndo_del_slave          = bond_release,
 };
 
 static void bond_destructor(struct net_device *bond_dev)
@@ -5275,7 +5086,7 @@ static int __init bonding_init(void)
        int i;
        int res;
 
-       pr_info("%s", version);
+       pr_info("%s", bond_version);
 
        res = bond_check_params(&bonding_defaults);
        if (res)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
new file mode 100644 (file)
index 0000000..c32ff55
--- /dev/null
@@ -0,0 +1,275 @@
+#include <linux/proc_fs.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "bonding.h"
+
+
+extern const char *bond_mode_name(int mode);
+
+static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU)
+       __acquires(&bond->lock)
+{
+       struct bonding *bond = seq->private;
+       loff_t off = 0;
+       struct slave *slave;
+       int i;
+
+       /* make sure the bond won't be taken away */
+       rcu_read_lock();
+       read_lock(&bond->lock);
+
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       bond_for_each_slave(bond, slave, i) {
+               if (++off == *pos)
+                       return slave;
+       }
+
+       return NULL;
+}
+
+static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct bonding *bond = seq->private;
+       struct slave *slave = v;
+
+       ++*pos;
+       if (v == SEQ_START_TOKEN)
+               return bond->first_slave;
+
+       slave = slave->next;
+
+       return (slave == bond->first_slave) ? NULL : slave;
+}
+
+static void bond_info_seq_stop(struct seq_file *seq, void *v)
+       __releases(&bond->lock)
+       __releases(RCU)
+{
+       struct bonding *bond = seq->private;
+
+       read_unlock(&bond->lock);
+       rcu_read_unlock();
+}
+
+static void bond_info_show_master(struct seq_file *seq)
+{
+       struct bonding *bond = seq->private;
+       struct slave *curr;
+       int i;
+
+       read_lock(&bond->curr_slave_lock);
+       curr = bond->curr_active_slave;
+       read_unlock(&bond->curr_slave_lock);
+
+       seq_printf(seq, "Bonding Mode: %s",
+                  bond_mode_name(bond->params.mode));
+
+       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+           bond->params.fail_over_mac)
+               seq_printf(seq, " (fail_over_mac %s)",
+                  fail_over_mac_tbl[bond->params.fail_over_mac].modename);
+
+       seq_printf(seq, "\n");
+
+       if (bond->params.mode == BOND_MODE_XOR ||
+               bond->params.mode == BOND_MODE_8023AD) {
+               seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
+                       xmit_hashtype_tbl[bond->params.xmit_policy].modename,
+                       bond->params.xmit_policy);
+       }
+
+       if (USES_PRIMARY(bond->params.mode)) {
+               seq_printf(seq, "Primary Slave: %s",
+                          (bond->primary_slave) ?
+                          bond->primary_slave->dev->name : "None");
+               if (bond->primary_slave)
+                       seq_printf(seq, " (primary_reselect %s)",
+                  pri_reselect_tbl[bond->params.primary_reselect].modename);
+
+               seq_printf(seq, "\nCurrently Active Slave: %s\n",
+                          (curr) ? curr->dev->name : "None");
+       }
+
+       seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
+                  "up" : "down");
+       seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
+       seq_printf(seq, "Up Delay (ms): %d\n",
+                  bond->params.updelay * bond->params.miimon);
+       seq_printf(seq, "Down Delay (ms): %d\n",
+                  bond->params.downdelay * bond->params.miimon);
+
+
+       /* ARP information */
+       if (bond->params.arp_interval > 0) {
+               int printed = 0;
+               seq_printf(seq, "ARP Polling Interval (ms): %d\n",
+                               bond->params.arp_interval);
+
+               seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
+
+               for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
+                       if (!bond->params.arp_targets[i])
+                               break;
+                       if (printed)
+                               seq_printf(seq, ",");
+                       seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
+                       printed = 1;
+               }
+               seq_printf(seq, "\n");
+       }
+
+       if (bond->params.mode == BOND_MODE_8023AD) {
+               struct ad_info ad_info;
+
+               seq_puts(seq, "\n802.3ad info\n");
+               seq_printf(seq, "LACP rate: %s\n",
+                          (bond->params.lacp_fast) ? "fast" : "slow");
+               seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
+                          ad_select_tbl[bond->params.ad_select].modename);
+
+               if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+                       seq_printf(seq, "bond %s has no active aggregator\n",
+                                  bond->dev->name);
+               } else {
+                       seq_printf(seq, "Active Aggregator Info:\n");
+
+                       seq_printf(seq, "\tAggregator ID: %d\n",
+                                  ad_info.aggregator_id);
+                       seq_printf(seq, "\tNumber of ports: %d\n",
+                                  ad_info.ports);
+                       seq_printf(seq, "\tActor Key: %d\n",
+                                  ad_info.actor_key);
+                       seq_printf(seq, "\tPartner Key: %d\n",
+                                  ad_info.partner_key);
+                       seq_printf(seq, "\tPartner Mac Address: %pM\n",
+                                  ad_info.partner_system);
+               }
+       }
+}
+
+static void bond_info_show_slave(struct seq_file *seq,
+                                const struct slave *slave)
+{
+       struct bonding *bond = seq->private;
+
+       seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
+       seq_printf(seq, "MII Status: %s\n",
+                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
+       seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
+       seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
+       seq_printf(seq, "Link Failure Count: %u\n",
+                  slave->link_failure_count);
+
+       seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+
+       if (bond->params.mode == BOND_MODE_8023AD) {
+               const struct aggregator *agg
+                       = SLAVE_AD_INFO(slave).port.aggregator;
+
+               if (agg)
+                       seq_printf(seq, "Aggregator ID: %d\n",
+                                  agg->aggregator_identifier);
+               else
+                       seq_puts(seq, "Aggregator ID: N/A\n");
+       }
+       seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+}
+
+static int bond_info_seq_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(seq, "%s\n", bond_version);
+               bond_info_show_master(seq);
+       } else
+               bond_info_show_slave(seq, v);
+
+       return 0;
+}
+
+static const struct seq_operations bond_info_seq_ops = {
+       .start = bond_info_seq_start,
+       .next  = bond_info_seq_next,
+       .stop  = bond_info_seq_stop,
+       .show  = bond_info_seq_show,
+};
+
+static int bond_info_open(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       struct proc_dir_entry *proc;
+       int res;
+
+       res = seq_open(file, &bond_info_seq_ops);
+       if (!res) {
+               /* recover the pointer buried in proc_dir_entry data */
+               seq = file->private_data;
+               proc = PDE(inode);
+               seq->private = proc->data;
+       }
+
+       return res;
+}
+
+static const struct file_operations bond_info_fops = {
+       .owner   = THIS_MODULE,
+       .open    = bond_info_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+void bond_create_proc_entry(struct bonding *bond)
+{
+       struct net_device *bond_dev = bond->dev;
+       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+       if (bn->proc_dir) {
+               bond->proc_entry = proc_create_data(bond_dev->name,
+                                                   S_IRUGO, bn->proc_dir,
+                                                   &bond_info_fops, bond);
+               if (bond->proc_entry == NULL)
+                       pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
+                                  DRV_NAME, bond_dev->name);
+               else
+                       memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
+       }
+}
+
+void bond_remove_proc_entry(struct bonding *bond)
+{
+       struct net_device *bond_dev = bond->dev;
+       struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+       if (bn->proc_dir && bond->proc_entry) {
+               remove_proc_entry(bond->proc_file_name, bn->proc_dir);
+               memset(bond->proc_file_name, 0, IFNAMSIZ);
+               bond->proc_entry = NULL;
+       }
+}
+
+/* Create the bonding directory under /proc/net, if doesn't exist yet.
+ * Caller must hold rtnl_lock.
+ */
+void __net_init bond_create_proc_dir(struct bond_net *bn)
+{
+       if (!bn->proc_dir) {
+               bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
+               if (!bn->proc_dir)
+                       pr_warning("Warning: cannot create /proc/net/%s\n",
+                                  DRV_NAME);
+       }
+}
+
+/* Destroy the bonding directory under /proc/net, if empty.
+ * Caller must hold rtnl_lock.
+ */
+void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
+{
+       if (bn->proc_dir) {
+               remove_proc_entry(DRV_NAME, bn->net->proc_net);
+               bn->proc_dir = NULL;
+       }
+}
index 8fd0174c5380499f5a87178ca2a6024c4cc124e9..72bb0f6cc9bf39289ec7b9c0185f9d3ded6a4af8 100644 (file)
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
                        bond->dev->name, new_value);
        }
 out:
-       return count;
+       return ret;
 }
 static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
                   bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                }
        }
 out:
-       return count;
+       return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
                   bonding_show_slaves_active, bonding_store_slaves_active);
index 31fe980e4e28e5c102780aa6d9ae8450cc224763..c4e2343bb0b7a24d30ae25d402358d4c3781b594 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/if_bonding.h>
 #include <linux/cpumask.h>
 #include <linux/in6.h>
+#include <linux/netpoll.h>
 #include "bond_3ad.h"
 #include "bond_alb.h"
 
@@ -28,6 +29,8 @@
 #define DRV_NAME       "bonding"
 #define DRV_DESCRIPTION        "Ethernet Channel Bonding Driver"
 
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
 #define BOND_MAX_ARP_TARGETS   16
 
 #define IS_UP(dev)                                        \
@@ -132,7 +135,7 @@ static inline void unblock_netpoll_tx(void)
 
 static inline int is_netpoll_tx_blocked(struct net_device *dev)
 {
-       if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
+       if (unlikely(netpoll_tx_running(dev)))
                return atomic_read(&netpoll_block_tx);
        return 0;
 }
@@ -198,6 +201,9 @@ struct slave {
        u16    queue_id;
        struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
        struct tlb_slave_info tlb_info;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll *np;
+#endif
 };
 
 /*
@@ -265,7 +271,8 @@ struct bonding {
  *
  * Caller must hold bond lock for read
  */
-static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
+                                                 struct net_device *slave_dev)
 {
        struct slave *slave = NULL;
        int i;
@@ -276,7 +283,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
                }
        }
 
-       return 0;
+       return NULL;
 }
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -323,6 +330,22 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
        return slave->dev->last_rx;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+                                        struct sk_buff *skb)
+{
+       struct netpoll *np = slave->np;
+
+       if (np)
+               netpoll_send_skb(np, skb);
+}
+#else
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+                                        struct sk_buff *skb)
+{
+}
+#endif
+
 static inline void bond_set_slave_inactive_flags(struct slave *slave)
 {
        struct bonding *bond = netdev_priv(slave->dev->master);
@@ -393,6 +416,30 @@ struct bond_net {
 #endif
 };
 
+#ifdef CONFIG_PROC_FS
+void bond_create_proc_entry(struct bonding *bond);
+void bond_remove_proc_entry(struct bonding *bond);
+void bond_create_proc_dir(struct bond_net *bn);
+void bond_destroy_proc_dir(struct bond_net *bn);
+#else
+static inline void bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_create_proc_dir(struct bond_net *bn)
+{
+}
+
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
+{
+}
+#endif
+
+
 /* exported from bond_main.c */
 extern int bond_net_id;
 extern const struct bond_parm_tbl bond_lacp_tbl[];
index 5dec456fd4a4bbe98ad4ca5eae09856911c83e49..1d699e3df54704f7fc30a0c8facc757d6dbc9387 100644 (file)
@@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
 
+source "drivers/net/can/c_can/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
 source "drivers/net/can/softing/Kconfig"
index 53c82a71778e8b20c80308aa1ceca1840272ab92..24ebfe8d758adf3568ceeab95c11a60116515c16 100644 (file)
@@ -13,6 +13,7 @@ obj-y                         += softing/
 
 obj-$(CONFIG_CAN_SJA1000)      += sja1000/
 obj-$(CONFIG_CAN_MSCAN)                += mscan/
+obj-$(CONFIG_CAN_C_CAN)                += c_can/
 obj-$(CONFIG_CAN_AT91)         += at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)      += ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644 (file)
index 0000000..ffb9773
--- /dev/null
@@ -0,0 +1,15 @@
+menuconfig CAN_C_CAN
+       tristate "Bosch C_CAN devices"
+       depends on CAN_DEV && HAS_IOMEM
+
+if CAN_C_CAN
+
+config CAN_C_CAN_PLATFORM
+       tristate "Generic Platform Bus based C_CAN driver"
+       ---help---
+         This driver adds support for the C_CAN chips connected to
+         the "platform bus" (Linux abstraction for directly to the
+         processor attached devices) which can be found on various
+         boards from ST Microelectronics (http://www.st.com)
+         like the SPEAr1310 and SPEAr320 evaluation boards.
+endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644 (file)
index 0000000..9273f6d
--- /dev/null
@@ -0,0 +1,8 @@
+#
+#  Makefile for the Bosch C_CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_C_CAN) += c_can.o
+obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644 (file)
index 0000000..1405078
--- /dev/null
@@ -0,0 +1,1158 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * TX and RX NAPI implementation has been borrowed from at91 CAN driver
+ * written by:
+ * Copyright
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "c_can.h"
+
+/* control register */
+#define CONTROL_TEST           BIT(7)
+#define CONTROL_CCE            BIT(6)
+#define CONTROL_DISABLE_AR     BIT(5)
+#define CONTROL_ENABLE_AR      (0 << 5)
+#define CONTROL_EIE            BIT(3)
+#define CONTROL_SIE            BIT(2)
+#define CONTROL_IE             BIT(1)
+#define CONTROL_INIT           BIT(0)
+
+/* test register */
+#define TEST_RX                        BIT(7)
+#define TEST_TX1               BIT(6)
+#define TEST_TX2               BIT(5)
+#define TEST_LBACK             BIT(4)
+#define TEST_SILENT            BIT(3)
+#define TEST_BASIC             BIT(2)
+
+/* status register */
+#define STATUS_BOFF            BIT(7)
+#define STATUS_EWARN           BIT(6)
+#define STATUS_EPASS           BIT(5)
+#define STATUS_RXOK            BIT(4)
+#define STATUS_TXOK            BIT(3)
+
+/* error counter register */
+#define ERR_CNT_TEC_MASK       0xff
+#define ERR_CNT_TEC_SHIFT      0
+#define ERR_CNT_REC_SHIFT      8
+#define ERR_CNT_REC_MASK       (0x7f << ERR_CNT_REC_SHIFT)
+#define ERR_CNT_RP_SHIFT       15
+#define ERR_CNT_RP_MASK                (0x1 << ERR_CNT_RP_SHIFT)
+
+/* bit-timing register */
+#define BTR_BRP_MASK           0x3f
+#define BTR_BRP_SHIFT          0
+#define BTR_SJW_SHIFT          6
+#define BTR_SJW_MASK           (0x3 << BTR_SJW_SHIFT)
+#define BTR_TSEG1_SHIFT                8
+#define BTR_TSEG1_MASK         (0xf << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT                12
+#define BTR_TSEG2_MASK         (0x7 << BTR_TSEG2_SHIFT)
+
+/* brp extension register */
+#define BRP_EXT_BRPE_MASK      0x0f
+#define BRP_EXT_BRPE_SHIFT     0
+
+/* IFx command request */
+#define IF_COMR_BUSY           BIT(15)
+
+/* IFx command mask */
+#define IF_COMM_WR             BIT(7)
+#define IF_COMM_MASK           BIT(6)
+#define IF_COMM_ARB            BIT(5)
+#define IF_COMM_CONTROL                BIT(4)
+#define IF_COMM_CLR_INT_PND    BIT(3)
+#define IF_COMM_TXRQST         BIT(2)
+#define IF_COMM_DATAA          BIT(1)
+#define IF_COMM_DATAB          BIT(0)
+#define IF_COMM_ALL            (IF_COMM_MASK | IF_COMM_ARB | \
+                               IF_COMM_CONTROL | IF_COMM_TXRQST | \
+                               IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* IFx arbitration */
+#define IF_ARB_MSGVAL          BIT(15)
+#define IF_ARB_MSGXTD          BIT(14)
+#define IF_ARB_TRANSMIT                BIT(13)
+
+/* IFx message control */
+#define IF_MCONT_NEWDAT                BIT(15)
+#define IF_MCONT_MSGLST                BIT(14)
+#define IF_MCONT_CLR_MSGLST    (0 << 14)
+#define IF_MCONT_INTPND                BIT(13)
+#define IF_MCONT_UMASK         BIT(12)
+#define IF_MCONT_TXIE          BIT(11)
+#define IF_MCONT_RXIE          BIT(10)
+#define IF_MCONT_RMTEN         BIT(9)
+#define IF_MCONT_TXRQST                BIT(8)
+#define IF_MCONT_EOB           BIT(7)
+#define IF_MCONT_DLC_MASK      0xf
+
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x)        (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS    32
+#define C_CAN_MSG_OBJ_RX_NUM   16
+#define C_CAN_MSG_OBJ_TX_NUM   16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST  (C_CAN_MSG_OBJ_RX_FIRST + \
+                               C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST  (C_CAN_MSG_OBJ_TX_FIRST + \
+                               C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST  (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK        (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS    0x0000ffff
+
+/* status interrupt */
+#define STATUS_INTERRUPT       0x8000
+
+/* global interrupt masks */
+#define ENABLE_ALL_INTERRUPTS  1
+#define DISABLE_ALL_INTERRUPTS 0
+
+/* minimum timeout for checking BUSY status */
+#define MIN_TIMEOUT_VALUE      6
+
+/* napi related */
+#define C_CAN_NAPI_WEIGHT      C_CAN_MSG_OBJ_RX_NUM
+
+/* c_can lec values */
+enum c_can_lec_type {
+       LEC_NO_ERROR = 0,
+       LEC_STUFF_ERROR,
+       LEC_FORM_ERROR,
+       LEC_ACK_ERROR,
+       LEC_BIT1_ERROR,
+       LEC_BIT0_ERROR,
+       LEC_CRC_ERROR,
+       LEC_UNUSED,
+};
+
+/*
+ * c_can error types:
+ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
+ */
+enum c_can_bus_error_types {
+       C_CAN_NO_ERROR = 0,
+       C_CAN_BUS_OFF,
+       C_CAN_ERROR_WARNING,
+       C_CAN_ERROR_PASSIVE,
+};
+
+static struct can_bittiming_const c_can_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 16,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,        /* 6-bit BRP field + 4-bit BRPE field*/
+       .brp_inc = 1,
+};
+
+static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+{
+       return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
+                       C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+{
+       return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
+                       C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+{
+       u32 val = priv->read_reg(priv, reg);
+       val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+       return val;
+}
+
+static void c_can_enable_all_interrupts(struct c_can_priv *priv,
+                                               int enable)
+{
+       unsigned int cntrl_save = priv->read_reg(priv,
+                                               &priv->regs->control);
+
+       if (enable)
+               cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
+       else
+               cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+
+       priv->write_reg(priv, &priv->regs->control, cntrl_save);
+}
+
+static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+{
+       int count = MIN_TIMEOUT_VALUE;
+
+       while (count && priv->read_reg(priv,
+                               &priv->regs->ifregs[iface].com_req) &
+                               IF_COMR_BUSY) {
+               count--;
+               udelay(1);
+       }
+
+       if (!count)
+               return 1;
+
+       return 0;
+}
+
+static inline void c_can_object_get(struct net_device *dev,
+                                       int iface, int objno, int mask)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /*
+        * As per specs, after writting the message object number in the
+        * IF command request register the transfer b/w interface
+        * register and message RAM must be complete in 6 CAN-CLK
+        * period.
+        */
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+                       IFX_WRITE_LOW_16BIT(mask));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+                       IFX_WRITE_LOW_16BIT(objno));
+
+       if (c_can_msg_obj_is_busy(priv, iface))
+               netdev_err(dev, "timed out in object get\n");
+}
+
+static inline void c_can_object_put(struct net_device *dev,
+                                       int iface, int objno, int mask)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /*
+        * As per specs, after writting the message object number in the
+        * IF command request register the transfer b/w interface
+        * register and message RAM must be complete in 6 CAN-CLK
+        * period.
+        */
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+                       (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+                       IFX_WRITE_LOW_16BIT(objno));
+
+       if (c_can_msg_obj_is_busy(priv, iface))
+               netdev_err(dev, "timed out in object put\n");
+}
+
+static void c_can_write_msg_object(struct net_device *dev,
+                       int iface, struct can_frame *frame, int objno)
+{
+       int i;
+       u16 flags = 0;
+       unsigned int id;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (!(frame->can_id & CAN_RTR_FLAG))
+               flags |= IF_ARB_TRANSMIT;
+
+       if (frame->can_id & CAN_EFF_FLAG) {
+               id = frame->can_id & CAN_EFF_MASK;
+               flags |= IF_ARB_MSGXTD;
+       } else
+               id = ((frame->can_id & CAN_SFF_MASK) << 18);
+
+       flags |= IF_ARB_MSGVAL;
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+                               IFX_WRITE_LOW_16BIT(id));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+                               IFX_WRITE_HIGH_16BIT(id));
+
+       for (i = 0; i < frame->can_dlc; i += 2) {
+               priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+                               frame->data[i] | (frame->data[i + 1] << 8));
+       }
+
+       /* enable interrupt for this message object */
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
+                       frame->can_dlc);
+       c_can_object_put(dev, iface, objno, IF_COMM_ALL);
+}
+
+static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
+                                               int iface, int ctrl_mask,
+                                               int obj)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
+       c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+
+}
+
+static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
+                                               int iface,
+                                               int ctrl_mask)
+{
+       int i;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
+               priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                               ctrl_mask & ~(IF_MCONT_MSGLST |
+                                       IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+               c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
+       }
+}
+
+static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
+                                               int iface, int ctrl_mask,
+                                               int obj)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       ctrl_mask & ~(IF_MCONT_MSGLST |
+                               IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+       c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+}
+
+static void c_can_handle_lost_msg_obj(struct net_device *dev,
+                                       int iface, int objno)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       struct can_frame *frame;
+
+       netdev_err(dev, "msg lost in buffer %d\n", objno);
+
+       c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+                       IF_MCONT_CLR_MSGLST);
+
+       c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+
+       /* create an error msg */
+       skb = alloc_can_err_skb(dev, &frame);
+       if (unlikely(!skb))
+               return;
+
+       frame->can_id |= CAN_ERR_CRTL;
+       frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+       stats->rx_errors++;
+       stats->rx_over_errors++;
+
+       netif_receive_skb(skb);
+}
+
+static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+{
+       u16 flags, data;
+       int i;
+       unsigned int val;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       struct can_frame *frame;
+
+       skb = alloc_can_skb(dev, &frame);
+       if (!skb) {
+               stats->rx_dropped++;
+               return -ENOMEM;
+       }
+
+       frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+
+       flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
+       val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+               (flags << 16);
+
+       if (flags & IF_ARB_MSGXTD)
+               frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       else
+               frame->can_id = (val >> 18) & CAN_SFF_MASK;
+
+       if (flags & IF_ARB_TRANSMIT)
+               frame->can_id |= CAN_RTR_FLAG;
+       else {
+               for (i = 0; i < frame->can_dlc; i += 2) {
+                       data = priv->read_reg(priv,
+                               &priv->regs->ifregs[iface].data[i / 2]);
+                       frame->data[i] = data;
+                       frame->data[i + 1] = data >> 8;
+               }
+       }
+
+       netif_receive_skb(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += frame->can_dlc;
+
+       return 0;
+}
+
+static void c_can_setup_receive_object(struct net_device *dev, int iface,
+                                       int objno, unsigned int mask,
+                                       unsigned int id, unsigned int mcont)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+                       IFX_WRITE_LOW_16BIT(mask));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+                       IFX_WRITE_HIGH_16BIT(mask));
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+                       IFX_WRITE_LOW_16BIT(id));
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+                       (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+       c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+                       c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
+       priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
+       priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+
+       c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
+
+       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+                       c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
+{
+       int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+
+       /*
+        * as transmission request register's bit n-1 corresponds to
+        * message object n, we need to handle the same properly.
+        */
+       if (val & (1 << (objno - 1)))
+               return 1;
+
+       return 0;
+}
+
+static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
+                                       struct net_device *dev)
+{
+       u32 msg_obj_no;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct can_frame *frame = (struct can_frame *)skb->data;
+
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
+
+       msg_obj_no = get_tx_next_msg_obj(priv);
+
+       /* prepare message object for transmission */
+       c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+       can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+
+       /*
+        * we have to stop the queue in case of a wrap around or
+        * if the next TX message object is still in use
+        */
+       priv->tx_next++;
+       if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
+                       (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+               netif_stop_queue(dev);
+
+       return NETDEV_TX_OK;
+}
+
+static int c_can_set_bittiming(struct net_device *dev)
+{
+       unsigned int reg_btr, reg_brpe, ctrl_save;
+       u8 brp, brpe, sjw, tseg1, tseg2;
+       u32 ten_bit_brp;
+       struct c_can_priv *priv = netdev_priv(dev);
+       const struct can_bittiming *bt = &priv->can.bittiming;
+
+       /* c_can provides a 6-bit brp and 4-bit brpe fields */
+       ten_bit_brp = bt->brp - 1;
+       brp = ten_bit_brp & BTR_BRP_MASK;
+       brpe = ten_bit_brp >> 6;
+
+       sjw = bt->sjw - 1;
+       tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+       tseg2 = bt->phase_seg2 - 1;
+       reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
+                       (tseg2 << BTR_TSEG2_SHIFT);
+       reg_brpe = brpe & BRP_EXT_BRPE_MASK;
+
+       netdev_info(dev,
+               "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+
+       ctrl_save = priv->read_reg(priv, &priv->regs->control);
+       priv->write_reg(priv, &priv->regs->control,
+                       ctrl_save | CONTROL_CCE | CONTROL_INIT);
+       priv->write_reg(priv, &priv->regs->btr, reg_btr);
+       priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
+       priv->write_reg(priv, &priv->regs->control, ctrl_save);
+
+       return 0;
+}
+
+/*
+ * Configure C_CAN message objects for Tx and Rx purposes:
+ * C_CAN provides a total of 32 message objects that can be configured
+ * either for Tx or Rx purposes. Here the first 16 message objects are used as
+ * a reception FIFO. The end of reception FIFO is signified by the EoB bit
+ * being SET. The remaining 16 message objects are kept aside for Tx purposes.
+ * See user guide document for further details on configuring message
+ * objects.
+ */
+static void c_can_configure_msg_objects(struct net_device *dev)
+{
+       int i;
+
+       /* first invalidate all message objects */
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+               c_can_inval_msg_object(dev, 0, i);
+
+       /* setup receive message objects */
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+               c_can_setup_receive_object(dev, 0, i, 0, 0,
+                       (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+
+       c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+                       IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+}
+
+/*
+ * Configure C_CAN chip:
+ * - enable/disable auto-retransmission
+ * - set operating mode
+ * - configure message objects
+ */
+static void c_can_chip_config(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+               /* disable automatic retransmission */
+               priv->write_reg(priv, &priv->regs->control,
+                               CONTROL_DISABLE_AR);
+       else
+               /* enable automatic retransmission */
+               priv->write_reg(priv, &priv->regs->control,
+                               CONTROL_ENABLE_AR);
+
+       if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+                                       CAN_CTRLMODE_LOOPBACK)) {
+               /* loopback + silent mode : useful for hot self-test */
+               priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, &priv->regs->test,
+                               TEST_LBACK | TEST_SILENT);
+       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+               /* loopback mode : useful for self-test function */
+               priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+               /* silent mode : bus-monitoring mode */
+               priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+       } else
+               /* normal mode*/
+               priv->write_reg(priv, &priv->regs->control,
+                               CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+
+       /* configure message objects */
+       c_can_configure_msg_objects(dev);
+
+       /* set a `lec` value so that we can check for updates later */
+       priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+       /* set bittiming params */
+       c_can_set_bittiming(dev);
+}
+
+static void c_can_start(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* enable status change, error and module interrupts */
+       c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+       /* basic c_can configuration */
+       c_can_chip_config(dev);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* reset tx helper pointers */
+       priv->tx_next = priv->tx_echo = 0;
+}
+
+static void c_can_stop(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* disable all interrupts */
+       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+       /* set the state as STOPPED */
+       priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+       switch (mode) {
+       case CAN_MODE_START:
+               c_can_start(dev);
+               netif_wake_queue(dev);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+                                       struct can_berr_counter *bec)
+{
+       unsigned int reg_err_counter;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+       bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+                               ERR_CNT_REC_SHIFT;
+       bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
+       return 0;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+ * If we discover a not yet transmitted package, stop looking for more.
+ */
+static void c_can_do_tx(struct net_device *dev)
+{
+       u32 val;
+       u32 msg_obj_no;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+
+       for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+               msg_obj_no = get_tx_echo_msg_obj(priv);
+               c_can_inval_msg_object(dev, 0, msg_obj_no);
+               val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+               if (!(val & (1 << msg_obj_no))) {
+                       can_get_echo_skb(dev,
+                                       msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+                       stats->tx_bytes += priv->read_reg(priv,
+                                       &priv->regs->ifregs[0].msg_cntrl)
+                                       & IF_MCONT_DLC_MASK;
+                       stats->tx_packets++;
+               }
+       }
+
+       /* restart queue if wrap-up or if queue stalled on last pkt */
+       if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
+                       ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
+               netif_wake_queue(dev);
+}
+
+/*
+ * theory of operation:
+ *
+ * c_can core saves a received CAN message into the first free message
+ * object it finds free (starting with the lowest). Bits NEWDAT and
+ * INTPND are set for this message object indicating that a new message
+ * has arrived. To work-around this issue, we keep two groups of message
+ * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ *
+ * To ensure in-order frame reception we use the following
+ * approach while re-activating a message object to receive further
+ * frames:
+ * - if the current message object number is lower than
+ *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
+ *   the INTPND bit.
+ * - if the current message object number is equal to
+ *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
+ *   receive message objects.
+ * - if the current message object number is greater than
+ *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
+ *   only this message object.
+ */
+static int c_can_do_rx_poll(struct net_device *dev, int quota)
+{
+       u32 num_rx_pkts = 0;
+       unsigned int msg_obj, msg_ctrl_save;
+       struct c_can_priv *priv = netdev_priv(dev);
+       u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+
+       for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
+                       msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
+                       val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+                       msg_obj++) {
+               /*
+                * as interrupt pending register's bit n-1 corresponds to
+                * message object n, we need to handle the same properly.
+                */
+               if (val & (1 << (msg_obj - 1))) {
+                       c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
+                                       ~IF_COMM_TXRQST);
+                       msg_ctrl_save = priv->read_reg(priv,
+                                       &priv->regs->ifregs[0].msg_cntrl);
+
+                       if (msg_ctrl_save & IF_MCONT_EOB)
+                               return num_rx_pkts;
+
+                       if (msg_ctrl_save & IF_MCONT_MSGLST) {
+                               c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+                               num_rx_pkts++;
+                               quota--;
+                               continue;
+                       }
+
+                       if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+                               continue;
+
+                       /* read the data from the message object */
+                       c_can_read_msg_object(dev, 0, msg_ctrl_save);
+
+                       if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
+                               c_can_mark_rx_msg_obj(dev, 0,
+                                               msg_ctrl_save, msg_obj);
+                       else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
+                               /* activate this msg obj */
+                               c_can_activate_rx_msg_obj(dev, 0,
+                                               msg_ctrl_save, msg_obj);
+                       else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
+                               /* activate all lower message objects */
+                               c_can_activate_all_lower_rx_msg_obj(dev,
+                                               0, msg_ctrl_save);
+
+                       num_rx_pkts++;
+                       quota--;
+               }
+       }
+
+       return num_rx_pkts;
+}
+
+static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
+{
+       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+               (priv->current_status & LEC_UNUSED);
+}
+
+static int c_can_handle_state_change(struct net_device *dev,
+                               enum c_can_bus_error_types error_type)
+{
+       unsigned int reg_err_counter;
+       unsigned int rx_err_passive;
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       struct can_berr_counter bec;
+
+       /* propogate the error condition to the CAN stack */
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       c_can_get_berr_counter(dev, &bec);
+       reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+       rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+                               ERR_CNT_RP_SHIFT;
+
+       switch (error_type) {
+       case C_CAN_ERROR_WARNING:
+               /* error warning state */
+               priv->can.can_stats.error_warning++;
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[1] = (bec.txerr > bec.rxerr) ?
+                       CAN_ERR_CRTL_TX_WARNING :
+                       CAN_ERR_CRTL_RX_WARNING;
+               cf->data[6] = bec.txerr;
+               cf->data[7] = bec.rxerr;
+
+               break;
+       case C_CAN_ERROR_PASSIVE:
+               /* error passive state */
+               priv->can.can_stats.error_passive++;
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               cf->can_id |= CAN_ERR_CRTL;
+               if (rx_err_passive)
+                       cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+               if (bec.txerr > 127)
+                       cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+               cf->data[6] = bec.txerr;
+               cf->data[7] = bec.rxerr;
+               break;
+       case C_CAN_BUS_OFF:
+               /* bus-off state */
+               priv->can.state = CAN_STATE_BUS_OFF;
+               cf->can_id |= CAN_ERR_BUSOFF;
+               /*
+                * disable all interrupts in bus-off mode to ensure that
+                * the CPU is not hogged down
+                */
+               c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+               can_bus_off(dev);
+               break;
+       default:
+               break;
+       }
+
+       netif_receive_skb(skb);
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static int c_can_handle_bus_err(struct net_device *dev,
+                               enum c_can_lec_type lec_type)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       /*
+        * early exit if no lec update or no error.
+        * no lec update means that no CAN bus event has been detected
+        * since CPU wrote 0x7 value to status reg.
+        */
+       if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
+               return 0;
+
+       /* propogate the error condition to the CAN stack */
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       /*
+        * check for 'last error code' which tells us the
+        * type of the last error to occur on the CAN bus
+        */
+
+       /* common for all type of bus errors */
+       priv->can.can_stats.bus_error++;
+       stats->rx_errors++;
+       cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+       cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+       switch (lec_type) {
+       case LEC_STUFF_ERROR:
+               netdev_dbg(dev, "stuff error\n");
+               cf->data[2] |= CAN_ERR_PROT_STUFF;
+               break;
+       case LEC_FORM_ERROR:
+               netdev_dbg(dev, "form error\n");
+               cf->data[2] |= CAN_ERR_PROT_FORM;
+               break;
+       case LEC_ACK_ERROR:
+               netdev_dbg(dev, "ack error\n");
+               cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+                               CAN_ERR_PROT_LOC_ACK_DEL);
+               break;
+       case LEC_BIT1_ERROR:
+               netdev_dbg(dev, "bit1 error\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT1;
+               break;
+       case LEC_BIT0_ERROR:
+               netdev_dbg(dev, "bit0 error\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT0;
+               break;
+       case LEC_CRC_ERROR:
+               netdev_dbg(dev, "CRC error\n");
+               cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+                               CAN_ERR_PROT_LOC_CRC_DEL);
+               break;
+       default:
+               break;
+       }
+
+       /* set a `lec` value so that we can check for updates later */
+       priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+       netif_receive_skb(skb);
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static int c_can_poll(struct napi_struct *napi, int quota)
+{
+       u16 irqstatus;
+       int lec_type = 0;
+       int work_done = 0;
+       struct net_device *dev = napi->dev;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!irqstatus)
+               goto end;
+
+       /* status events have the highest priority */
+       if (irqstatus == STATUS_INTERRUPT) {
+               priv->current_status = priv->read_reg(priv,
+                                       &priv->regs->status);
+
+               /* handle Tx/Rx events */
+               if (priv->current_status & STATUS_TXOK)
+                       priv->write_reg(priv, &priv->regs->status,
+                                       priv->current_status & ~STATUS_TXOK);
+
+               if (priv->current_status & STATUS_RXOK)
+                       priv->write_reg(priv, &priv->regs->status,
+                                       priv->current_status & ~STATUS_RXOK);
+
+               /* handle state changes */
+               if ((priv->current_status & STATUS_EWARN) &&
+                               (!(priv->last_status & STATUS_EWARN))) {
+                       netdev_dbg(dev, "entered error warning state\n");
+                       work_done += c_can_handle_state_change(dev,
+                                               C_CAN_ERROR_WARNING);
+               }
+               if ((priv->current_status & STATUS_EPASS) &&
+                               (!(priv->last_status & STATUS_EPASS))) {
+                       netdev_dbg(dev, "entered error passive state\n");
+                       work_done += c_can_handle_state_change(dev,
+                                               C_CAN_ERROR_PASSIVE);
+               }
+               if ((priv->current_status & STATUS_BOFF) &&
+                               (!(priv->last_status & STATUS_BOFF))) {
+                       netdev_dbg(dev, "entered bus off state\n");
+                       work_done += c_can_handle_state_change(dev,
+                                               C_CAN_BUS_OFF);
+               }
+
+               /* handle bus recovery events */
+               if ((!(priv->current_status & STATUS_BOFF)) &&
+                               (priv->last_status & STATUS_BOFF)) {
+                       netdev_dbg(dev, "left bus off state\n");
+                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               }
+               if ((!(priv->current_status & STATUS_EPASS)) &&
+                               (priv->last_status & STATUS_EPASS)) {
+                       netdev_dbg(dev, "left error passive state\n");
+                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               }
+
+               priv->last_status = priv->current_status;
+
+               /* handle lec errors on the bus */
+               lec_type = c_can_has_and_handle_berr(priv);
+               if (lec_type)
+                       work_done += c_can_handle_bus_err(dev, lec_type);
+       } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
+                       (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
+               /* handle events corresponding to receive message objects */
+               work_done += c_can_do_rx_poll(dev, (quota - work_done));
+       } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
+                       (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
+               /* handle events corresponding to transmit message objects */
+               c_can_do_tx(dev);
+       }
+
+end:
+       if (work_done < quota) {
+               napi_complete(napi);
+               /* enable all IRQs */
+               c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+       }
+
+       return work_done;
+}
+
+static irqreturn_t c_can_isr(int irq, void *dev_id)
+{
+       u16 irqstatus;
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!irqstatus)
+               return IRQ_NONE;
+
+       /* disable all interrupts and schedule the NAPI */
+       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+       napi_schedule(&priv->napi);
+
+       return IRQ_HANDLED;
+}
+
+static int c_can_open(struct net_device *dev)
+{
+       int err;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* open the can device */
+       err = open_candev(dev);
+       if (err) {
+               netdev_err(dev, "failed to open can device\n");
+               return err;
+       }
+
+       /* register interrupt handler */
+       err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
+                               dev);
+       if (err < 0) {
+               netdev_err(dev, "failed to request interrupt\n");
+               goto exit_irq_fail;
+       }
+
+       /* start the c_can controller */
+       c_can_start(dev);
+
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+
+exit_irq_fail:
+       close_candev(dev);
+       return err;
+}
+
+static int c_can_close(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       napi_disable(&priv->napi);
+       c_can_stop(dev);
+       free_irq(dev->irq, dev);
+       close_candev(dev);
+
+       return 0;
+}
+
+struct net_device *alloc_c_can_dev(void)
+{
+       struct net_device *dev;
+       struct c_can_priv *priv;
+
+       dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+       if (!dev)
+               return NULL;
+
+       priv = netdev_priv(dev);
+       netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+
+       priv->dev = dev;
+       priv->can.bittiming_const = &c_can_bittiming_const;
+       priv->can.do_set_mode = c_can_set_mode;
+       priv->can.do_get_berr_counter = c_can_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
+                                       CAN_CTRLMODE_LOOPBACK |
+                                       CAN_CTRLMODE_LISTENONLY |
+                                       CAN_CTRLMODE_BERR_REPORTING;
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+
+void free_c_can_dev(struct net_device *dev)
+{
+       free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_c_can_dev);
+
+static const struct net_device_ops c_can_netdev_ops = {
+       .ndo_open = c_can_open,
+       .ndo_stop = c_can_close,
+       .ndo_start_xmit = c_can_start_xmit,
+};
+
+int register_c_can_dev(struct net_device *dev)
+{
+       dev->flags |= IFF_ECHO; /* we support local echo */
+       dev->netdev_ops = &c_can_netdev_ops;
+
+       return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_c_can_dev);
+
+void unregister_c_can_dev(struct net_device *dev)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       /* disable all interrupts */
+       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+       unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644 (file)
index 0000000..9b7fbef
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef C_CAN_H
+#define C_CAN_H
+
+/* c_can IF registers */
+struct c_can_if_regs {
+       u16 com_req;
+       u16 com_mask;
+       u16 mask1;
+       u16 mask2;
+       u16 arb1;
+       u16 arb2;
+       u16 msg_cntrl;
+       u16 data[4];
+       u16 _reserved[13];
+};
+
+/* c_can hardware registers */
+struct c_can_regs {
+       u16 control;
+       u16 status;
+       u16 err_cnt;
+       u16 btr;
+       u16 interrupt;
+       u16 test;
+       u16 brp_ext;
+       u16 _reserved1;
+       struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
+       u16 _reserved2[8];
+       u16 txrqst1;
+       u16 txrqst2;
+       u16 _reserved3[6];
+       u16 newdat1;
+       u16 newdat2;
+       u16 _reserved4[6];
+       u16 intpnd1;
+       u16 intpnd2;
+       u16 _reserved5[6];
+       u16 msgval1;
+       u16 msgval2;
+       u16 _reserved6[6];
+};
+
+/* c_can private data structure */
+struct c_can_priv {
+       struct can_priv can;    /* must be the first member */
+       struct napi_struct napi;
+       struct net_device *dev;
+       int tx_object;
+       int current_status;
+       int last_status;
+       u16 (*read_reg) (struct c_can_priv *priv, void *reg);
+       void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
+       struct c_can_regs __iomem *regs;
+       unsigned long irq_flags; /* for request_irq() */
+       unsigned int tx_next;
+       unsigned int tx_echo;
+       void *priv;             /* for board-specific data */
+};
+
+struct net_device *alloc_c_can_dev(void);
+void free_c_can_dev(struct net_device *dev);
+int register_c_can_dev(struct net_device *dev);
+void unregister_c_can_dev(struct net_device *dev);
+
+#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644 (file)
index 0000000..e629b96
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Platform CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+                                               void *reg)
+{
+       return readw(reg);
+}
+
+static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+                                               void *reg, u16 val)
+{
+       writew(val, reg);
+}
+
+static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+                                               void *reg)
+{
+       return readw(reg + (long)reg - (long)priv->regs);
+}
+
+static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+                                               void *reg, u16 val)
+{
+       writew(val, reg + (long)reg - (long)priv->regs);
+}
+
+static int __devinit c_can_plat_probe(struct platform_device *pdev)
+{
+       int ret;
+       void __iomem *addr;
+       struct net_device *dev;
+       struct c_can_priv *priv;
+       struct resource *mem, *irq;
+#ifdef CONFIG_HAVE_CLK
+       struct clk *clk;
+
+       /* get the appropriate clk */
+       clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "no clock defined\n");
+               ret = -ENODEV;
+               goto exit;
+       }
+#endif
+
+       /* get the platform data */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!mem || (irq <= 0)) {
+               ret = -ENODEV;
+               goto exit_free_clk;
+       }
+
+       if (!request_mem_region(mem->start, resource_size(mem),
+                               KBUILD_MODNAME)) {
+               dev_err(&pdev->dev, "resource unavailable\n");
+               ret = -ENODEV;
+               goto exit_free_clk;
+       }
+
+       addr = ioremap(mem->start, resource_size(mem));
+       if (!addr) {
+               dev_err(&pdev->dev, "failed to map can port\n");
+               ret = -ENOMEM;
+               goto exit_release_mem;
+       }
+
+       /* allocate the c_can device */
+       dev = alloc_c_can_dev();
+       if (!dev) {
+               ret = -ENOMEM;
+               goto exit_iounmap;
+       }
+
+       priv = netdev_priv(dev);
+
+       dev->irq = irq->start;
+       priv->regs = addr;
+#ifdef CONFIG_HAVE_CLK
+       priv->can.clock.freq = clk_get_rate(clk);
+       priv->priv = clk;
+#endif
+
+       switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+       case IORESOURCE_MEM_32BIT:
+               priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+               priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+               break;
+       case IORESOURCE_MEM_16BIT:
+       default:
+               priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+               priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+               break;
+       }
+
+       platform_set_drvdata(pdev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       ret = register_c_can_dev(dev);
+       if (ret) {
+               dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+                       KBUILD_MODNAME, ret);
+               goto exit_free_device;
+       }
+
+       dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+                KBUILD_MODNAME, priv->regs, dev->irq);
+       return 0;
+
+exit_free_device:
+       platform_set_drvdata(pdev, NULL);
+       free_c_can_dev(dev);
+exit_iounmap:
+       iounmap(addr);
+exit_release_mem:
+       release_mem_region(mem->start, resource_size(mem));
+exit_free_clk:
+#ifdef CONFIG_HAVE_CLK
+       clk_put(clk);
+exit:
+#endif
+       dev_err(&pdev->dev, "probe failed\n");
+
+       return ret;
+}
+
+static int __devexit c_can_plat_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(dev);
+       struct resource *mem;
+
+       unregister_c_can_dev(dev);
+       platform_set_drvdata(pdev, NULL);
+
+       free_c_can_dev(dev);
+       iounmap(priv->regs);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
+
+#ifdef CONFIG_HAVE_CLK
+       clk_put(priv->priv);
+#endif
+
+       return 0;
+}
+
+static struct platform_driver c_can_plat_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = c_can_plat_probe,
+       .remove = __devexit_p(c_can_plat_remove),
+};
+
+static int __init c_can_plat_init(void)
+{
+       return platform_driver_register(&c_can_plat_driver);
+}
+module_init(c_can_plat_init);
+
+static void __exit c_can_plat_exit(void)
+{
+       platform_driver_unregister(&c_can_plat_driver);
+}
+module_exit(c_can_plat_exit);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
index 302be4aa69d6d4252ad2be33a5a0d55dfb128c6d..271a1f00c2240c570320a98ca743f997894e8669 100644 (file)
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
 static DEFINE_RWLOCK(cnic_dev_lock);
 static DEFINE_MUTEX(cnic_lock);
 
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+       return rcu_dereference_protected(cnic_ulp_tbl[type],
+                                        lockdep_is_held(&cnic_lock));
+}
 
 static int cnic_service_bnx2(void *, void *);
 static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
                return -EINVAL;
        }
        mutex_lock(&cnic_lock);
-       if (cnic_ulp_tbl[ulp_type]) {
+       if (cnic_ulp_tbl_prot(ulp_type)) {
                pr_err("%s: Type %d has already been registered\n",
                       __func__, ulp_type);
                mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
                return -EINVAL;
        }
        mutex_lock(&cnic_lock);
-       ulp_ops = cnic_ulp_tbl[ulp_type];
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
        if (!ulp_ops) {
                pr_err("%s: Type %d has not been registered\n",
                       __func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
                return -EINVAL;
        }
        mutex_lock(&cnic_lock);
-       if (cnic_ulp_tbl[ulp_type] == NULL) {
+       if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
                pr_err("%s: Driver with type %d has not been registered\n",
                       __func__, ulp_type);
                mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 
        clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
        cp->ulp_handle[ulp_type] = ulp_ctx;
-       ulp_ops = cnic_ulp_tbl[ulp_type];
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
        rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
        cnic_hold(dev);
 
@@ -2970,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cp->ulp_ops[if_type];
+               ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+                                                   lockdep_is_held(&cnic_lock));
                if (!ulp_ops) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -2994,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cp->ulp_ops[if_type];
+               ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+                                                   lockdep_is_held(&cnic_lock));
                if (!ulp_ops || !ulp_ops->cnic_start) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -3058,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cnic_ulp_tbl[i];
+               ulp_ops = cnic_ulp_tbl_prot(i);
                if (!ulp_ops || !ulp_ops->cnic_init) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -3082,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
                struct cnic_ulp_ops *ulp_ops;
 
                mutex_lock(&cnic_lock);
-               ulp_ops = cnic_ulp_tbl[i];
+               ulp_ops = cnic_ulp_tbl_prot(i);
                if (!ulp_ops || !ulp_ops->cnic_exit) {
                        mutex_unlock(&cnic_lock);
                        continue;
@@ -3405,9 +3414,12 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
        memset(&fl, 0, sizeof(fl));
        fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
 
-       err = ip_route_output_key(&init_net, &rt, &fl);
-       if (!err)
+       rt = ip_route_output_key(&init_net, &fl);
+       err = 0;
+       if (!IS_ERR(rt))
                *dst = &rt->dst;
+       else
+               err = PTR_ERR(rt);
        return err;
 #else
        return -ENETUNREACH;
@@ -4187,6 +4199,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
 }
 
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+       u32 max_conn;
+
+       max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+       dev->max_iscsi_conn = max_conn;
+}
+
 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -4511,6 +4531,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
                return err;
        }
 
+       cnic_get_bnx2_iscsi_info(dev);
+
        return 0;
 }
 
@@ -4722,129 +4744,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
        cp->rx_cons = *cp->rx_cons_ptr;
 }
 
-static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
-                                    u32 lower_addr)
-{
-       u32 val;
-       u8 mac[6];
-
-       val = CNIC_RD(dev, upper_addr);
-
-       mac[0] = (u8) (val >> 8);
-       mac[1] = (u8) val;
-
-       val = CNIC_RD(dev, lower_addr);
-
-       mac[2] = (u8) (val >> 24);
-       mac[3] = (u8) (val >> 16);
-       mac[4] = (u8) (val >> 8);
-       mac[5] = (u8) val;
-
-       if (is_valid_ether_addr(mac)) {
-               memcpy(dev->mac_addr, mac, 6);
-               return 0;
-       } else {
-               return -EINVAL;
-       }
-}
-
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
-{
-       struct cnic_local *cp = dev->cnic_priv;
-       u32 base, base2, addr, addr1, val;
-       int port = CNIC_PORT(cp);
-
-       dev->max_iscsi_conn = 0;
-       base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
-       if (base == 0)
-               return;
-
-       base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
-                                             MISC_REG_GENERIC_CR_0));
-       addr = BNX2X_SHMEM_ADDR(base,
-               dev_info.port_hw_config[port].iscsi_mac_upper);
-
-       addr1 = BNX2X_SHMEM_ADDR(base,
-               dev_info.port_hw_config[port].iscsi_mac_lower);
-
-       cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
-
-       addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
-       val = CNIC_RD(dev, addr);
-
-       if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
-               u16 val16;
-
-               addr = BNX2X_SHMEM_ADDR(base,
-                               drv_lic_key[port].max_iscsi_init_conn);
-               val16 = CNIC_RD16(dev, addr);
-
-               if (val16)
-                       val16 ^= 0x1e1e;
-               dev->max_iscsi_conn = val16;
-       }
-
-       if (BNX2X_CHIP_IS_E2(cp->chip_id))
-               dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
-
-       if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
-               int func = CNIC_FUNC(cp);
-               u32 mf_cfg_addr;
-
-               if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
-                       mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
-                                             mf_cfg_addr));
-               else
-                       mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
-               if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-                       /* Must determine if the MF is SD vs SI mode */
-                       addr = BNX2X_SHMEM_ADDR(base,
-                                       dev_info.shared_feature_config.config);
-                       val = CNIC_RD(dev, addr);
-                       if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
-                           SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
-                               int rc;
-
-                               /* MULTI_FUNCTION_SI mode */
-                               addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                                       func_ext_config[func].func_cfg);
-                               val = CNIC_RD(dev, addr);
-                               if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
-                                       dev->max_iscsi_conn = 0;
-
-                               if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
-                                       dev->max_fcoe_conn = 0;
-
-                               addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                                       func_ext_config[func].
-                                       iscsi_mac_addr_upper);
-                               addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                                       func_ext_config[func].
-                                       iscsi_mac_addr_lower);
-                               rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
-                                                               addr1);
-                               if (rc && func > 1)
-                                       dev->max_iscsi_conn = 0;
-
-                               return;
-                       }
-               }
-
-               addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-                       func_mf_config[func].e1hov_tag);
-
-               val = CNIC_RD(dev, addr);
-               val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
-               if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
-                       dev->max_fcoe_conn = 0;
-                       dev->max_iscsi_conn = 0;
-               }
-       }
-       if (!is_valid_ether_addr(dev->mac_addr))
-               dev->max_iscsi_conn = 0;
-}
-
 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -4926,8 +4825,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
 
        cnic_init_bnx2x_kcq(dev);
 
-       cnic_get_bnx2x_iscsi_info(dev);
-
        /* Only 1 EQ */
        CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5281,15 +5178,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
 
        dev_hold(dev);
        pci_dev_get(pdev);
-       if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
-           pdev->device == PCI_DEVICE_ID_NX2_5709S) {
-               u8 rev;
-
-               pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
-               if (rev < 0x10) {
-                       pci_dev_put(pdev);
-                       goto cnic_err;
-               }
+       if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+            pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+           (pdev->revision < 0x10)) {
+               pci_dev_put(pdev);
+               goto cnic_err;
        }
        pci_dev_put(pdev);
 
@@ -5360,6 +5253,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cdev->pcidev = pdev;
        cp->chip_id = ethdev->chip_id;
 
+       if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+               cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+       if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+           !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+               cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+       memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
        cp->cnic_ops = &cnic_bnx2x_ops;
        cp->start_hw = cnic_start_bnx2x_hw;
        cp->stop_hw = cnic_stop_bnx2x_hw;
index b328f6c924c38609e674afc3141cf1feeb5dda0b..4456260c653c3d289d0016d57af38ffa78921ded 100644 (file)
@@ -220,7 +220,7 @@ struct cnic_local {
 #define ULP_F_INIT     0
 #define ULP_F_START    1
 #define ULP_F_CALL_PENDING     2
-       struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+       struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
 
        unsigned long cnic_local_flags;
 #define        CNIC_LCL_FL_KWQ_INIT            0x0
index 9f44e0ffe003bfa90d88bc2c4bec12d5c833ef0c..e01b49ee35915a623f3b2bf377bb616ead139199 100644 (file)
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION    "2.2.12"
-#define CNIC_MODULE_RELDATE    "Jan 03, 2011"
+#define CNIC_MODULE_VERSION    "2.2.13"
+#define CNIC_MODULE_RELDATE    "Jan 31, 2011"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
        u32             drv_state;
 #define CNIC_DRV_STATE_REGD            0x00000001
 #define CNIC_DRV_STATE_USING_MSIX      0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO    0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI                0x00000008
+#define CNIC_DRV_STATE_NO_FCOE         0x00000010
        u32             chip_id;
        u32             max_kwqe_pending;
        struct pci_dev  *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
        u32             fcoe_init_cid;
        u16             iscsi_l2_client_id;
        u16             iscsi_l2_cid;
+       u8              iscsi_mac[ETH_ALEN];
 
        int             num_irq;
        struct cnic_irq irq_arr[MAX_CNIC_VEC];
index ef02aa68c9261dd7889975ed4a75c379f52cf196..862804f32b6e5d9fcced19152d69294eda439cf3 100644 (file)
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
                                dev = NULL;
                                if (grp)
                                        dev = vlan_group_get_device(grp, vlan);
-                       } else
+                       } else if (netif_is_bond_slave(dev)) {
                                while (dev->master)
                                        dev = dev->master;
+                       }
                        return dev;
                }
        }
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
                cxgb_neigh_update((struct neighbour *)ctx);
                break;
        }
-       case (NETEVENT_PMTU_UPDATE):
-               break;
        case (NETEVENT_REDIRECT):{
                struct netevent_redirect *nr = ctx;
                cxgb_redirect(nr->old, nr->new);
index ec35d458102c797ea7f19cf6fcacb9bff88aa408..5352c8a23f4da1fc4cfdd9a08df6d40fd886b89c 100644 (file)
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
        case NETEVENT_NEIGH_UPDATE:
                check_neigh_update(data);
                break;
-       case NETEVENT_PMTU_UPDATE:
        case NETEVENT_REDIRECT:
        default:
                break;
index 461dd6f905f78ca476b1edfed1c71a9ea49c2eac..3177081136017f5d1890a55c42dfb7eb34e2fe48 100644 (file)
@@ -1593,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
                        ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
        }
 
-       if (!is_valid_ether_addr(ndev->dev_addr))
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
                dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
                         "set using ifconfig\n", ndev->name);
 
+               random_ether_addr(ndev->dev_addr);
+               mac_src = "random";
+       }
+
+
        platform_set_drvdata(pdev, ndev);
        ret = register_netdev(ndev);
 
index e610e1369053154abf27e8665d60ca1392a2d0ff..00bf595ebd67c6485a059b51e83f2b27f951a2a5 100644 (file)
@@ -364,6 +364,7 @@ struct e1000_adapter {
        /* structs defined in e1000_hw.h */
        struct e1000_hw hw;
 
+       spinlock_t stats64_lock;
        struct e1000_hw_stats stats;
        struct e1000_phy_info phy_info;
        struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
 extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                                    struct rtnl_link_stats64
+                                                    *stats);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
index fa08b6336cfb0796ae901076d25460f52600bb7a..d4e51aa231b95ad6d2df068fc3f14081827e847e 100644 (file)
@@ -46,15 +46,15 @@ struct e1000_stats {
 };
 
 #define E1000_STAT(str, m) { \
-                       .stat_string = str, \
-                       .type = E1000_STATS, \
-                       .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
-                       .stat_offset = offsetof(struct e1000_adapter, m) }
+               .stat_string = str, \
+               .type = E1000_STATS, \
+               .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+               .stat_offset = offsetof(struct e1000_adapter, m) }
 #define E1000_NETDEV_STAT(str, m) { \
-                       .stat_string = str, \
-                       .type = NETDEV_STATS, \
-                       .sizeof_stat = sizeof(((struct net_device *)0)->m), \
-                       .stat_offset = offsetof(struct net_device, m) }
+               .stat_string = str, \
+               .type = NETDEV_STATS, \
+               .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+               .stat_offset = offsetof(struct rtnl_link_stats64, m) }
 
 static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("tx_broadcast", stats.bptc),
        E1000_STAT("rx_multicast", stats.mprc),
        E1000_STAT("tx_multicast", stats.mptc),
-       E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
-       E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
-       E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
+       E1000_NETDEV_STAT("rx_errors", rx_errors),
+       E1000_NETDEV_STAT("tx_errors", tx_errors),
+       E1000_NETDEV_STAT("tx_dropped", tx_dropped),
        E1000_STAT("multicast", stats.mprc),
        E1000_STAT("collisions", stats.colc),
-       E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
-       E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
+       E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+       E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
        E1000_STAT("rx_crc_errors", stats.crcerrs),
-       E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
+       E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
        E1000_STAT("rx_no_buffer_count", stats.rnbc),
        E1000_STAT("rx_missed_errors", stats.mpc),
        E1000_STAT("tx_aborted_errors", stats.ecol),
        E1000_STAT("tx_carrier_errors", stats.tncrs),
-       E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
-       E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
+       E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+       E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
        E1000_STAT("tx_window_errors", stats.latecol),
        E1000_STAT("tx_abort_late_coll", stats.latecol),
        E1000_STAT("tx_deferred_ok", stats.dc),
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
        struct e1000_hw *hw = &adapter->hw;
        u32 *regs_buff = p;
        u16 phy_data;
-       u8 revision_id;
 
        memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 
-       pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
-       regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+       regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+                       adapter->pdev->device;
 
        regs_buff[0]  = er32(CTRL);
        regs_buff[1]  = er32(STATUS);
@@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
        rx_old = adapter->rx_ring;
 
        err = -ENOMEM;
-       tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+       tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
        if (!tx_ring)
                goto err_alloc_tx;
-       /*
-        * use a memcpy to save any previously configured
-        * items like napi structs from having to be
-        * reinitialized
-        */
-       memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
 
-       rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+       rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
        if (!rx_ring)
                goto err_alloc_rx;
-       memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
 
        adapter->tx_ring = tx_ring;
        adapter->rx_ring = rx_ring;
@@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl_reg = 0;
-       u32 stat_reg = 0;
        u16 phy_reg = 0;
        s32 ret_val = 0;
 
@@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
                 * Set the ILOS bit on the fiber Nic if half duplex link is
                 * detected.
                 */
-               stat_reg = er32(STATUS);
-               if ((stat_reg & E1000_STATUS_FD) == 0)
+               if ((er32(STATUS) & E1000_STATUS_FD) == 0)
                        ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
        }
 
@@ -1972,8 +1961,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
 static int e1000_nway_reset(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       if (netif_running(netdev))
-               e1000e_reinit_locked(adapter);
+
+       if (!netif_running(netdev))
+               return -EAGAIN;
+
+       if (!adapter->hw.mac.autoneg)
+               return -EINVAL;
+
+       e1000e_reinit_locked(adapter);
+
        return 0;
 }
 
@@ -1982,14 +1978,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
                                    u64 *data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct rtnl_link_stats64 net_stats;
        int i;
        char *p = NULL;
 
-       e1000e_update_stats(adapter);
+       e1000e_get_stats64(netdev, &net_stats);
        for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
                switch (e1000_gstrings_stats[i].type) {
                case NETDEV_STATS:
-                       p = (char *) netdev +
+                       p = (char *) &net_stats +
                                        e1000_gstrings_stats[i].stat_offset;
                        break;
                case E1000_STATS:
@@ -2014,7 +2011,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
 
        switch (stringset) {
        case ETH_SS_TEST:
-               memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
+               memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
                break;
        case ETH_SS_STATS:
                for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
index fb46974cfec1afd122edea3f3aa8a4058b8d1928..232b42b7f7ce7e2286bedc684bb17e0cca04cba0 100644 (file)
@@ -2104,7 +2104,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
 {
        union ich8_hws_flash_status hsfsts;
        s32 ret_val = -E1000_ERR_NVM;
-       s32 i = 0;
 
        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
@@ -2140,6 +2139,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
                ret_val = 0;
        } else {
+               s32 i = 0;
+
                /*
                 * Otherwise poll for sometime so the current
                 * cycle has a chance to end before giving up.
index 68aa1749bf66f027cb58a8bfba17dbfb34101043..96921de5df2e9885e8c4dfcc133aba0a233bf8e8 100644 (file)
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 {
        struct e1000_nvm_info *nvm = &hw->nvm;
        u32 eecd = er32(EECD);
-       u16 timeout = 0;
        u8 spi_stat_reg;
 
        if (nvm->type == e1000_nvm_eeprom_spi) {
+               u16 timeout = NVM_MAX_RETRY_SPI;
+
                /* Clear SK and CS */
                eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
                ew32(EECD, eecd);
                udelay(1);
-               timeout = NVM_MAX_RETRY_SPI;
 
                /*
                 * Read "Status Register" repeatedly until the LSB is cleared.
index 2e5022849f1828c19a1bc968e7112cdf487d4145..455d5a1101ed36e7ed50240323dec79898364c4a 100644 (file)
@@ -900,8 +900,6 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       netdev->stats.rx_bytes += total_rx_bytes;
-       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
-       netdev->stats.tx_bytes += total_tx_bytes;
-       netdev->stats.tx_packets += total_tx_packets;
        return count < tx_ring->count;
 }
 
@@ -1248,8 +1244,6 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       netdev->stats.rx_bytes += total_rx_bytes;
-       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -1429,8 +1423,6 @@ next_desc:
 
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
-       netdev->stats.rx_bytes += total_rx_bytes;
-       netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
 }
 
@@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        int err = 0, vector = 0;
 
        if (strlen(netdev->name) < (IFNAMSIZ - 5))
-               sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+               snprintf(adapter->rx_ring->name,
+                        sizeof(adapter->rx_ring->name) - 1,
+                        "%s-rx-0", netdev->name);
        else
                memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
        err = request_irq(adapter->msix_entries[vector].vector,
@@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
        vector++;
 
        if (strlen(netdev->name) < (IFNAMSIZ - 5))
-               sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+               snprintf(adapter->tx_ring->name,
+                        sizeof(adapter->tx_ring->name) - 1,
+                        "%s-tx-0", netdev->name);
        else
                memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
        err = request_irq(adapter->msix_entries[vector].vector,
@@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl, rfctl;
-       u32 psrctl = 0;
        u32 pages = 0;
 
        /* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                adapter->rx_ps_pages = 0;
 
        if (adapter->rx_ps_pages) {
+               u32 psrctl = 0;
+
                /* Configure extra packet-split registers */
                rfctl = er32(RFCTL);
                rfctl |= E1000_RFCTL_EXTEN;
@@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
        struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
-       int i;
 
        /* Check for Promiscuous and All Multicast modes */
 
@@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
        ew32(RCTL, rctl);
 
        if (!netdev_mc_empty(netdev)) {
+               int i = 0;
+
                mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
                if (!mta_list)
                        return;
 
                /* prepare a packed array of only addresses. */
-               i = 0;
                netdev_for_each_mc_addr(ha, netdev)
                        memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
 
@@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
        e1e_flush();
 }
 
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
 void e1000e_down(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter)
        del_timer_sync(&adapter->phy_info_timer);
 
        netif_carrier_off(netdev);
+
+       spin_lock(&adapter->stats64_lock);
+       e1000e_update_stats(adapter);
+       spin_unlock(&adapter->stats64_lock);
+
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
 
@@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
+       spin_lock_init(&adapter->stats64_lock);
+
        e1000e_set_interrupt_capability(adapter);
 
        if (e1000_alloc_queues(adapter))
@@ -3918,7 +3924,7 @@ release:
  * e1000e_update_stats - Update the board statistics counters
  * @adapter: board private structure
  **/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
@@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_phy_regs *phy = &adapter->phy_regs;
-       int ret_val;
 
        if ((er32(STATUS) & E1000_STATUS_LU) &&
            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+               int ret_val;
+
                ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
                ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
                ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work)
        struct e1000_ring *tx_ring = adapter->tx_ring;
        struct e1000_hw *hw = &adapter->hw;
        u32 link, tctl;
-       int tx_pending = 0;
 
        if (test_bit(__E1000_DOWN, &adapter->state))
                return;
@@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
        }
 
 link_up:
+       spin_lock(&adapter->stats64_lock);
        e1000e_update_stats(adapter);
+       spin_unlock(&adapter->stats64_lock);
 
        mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
        adapter->tpt_old = adapter->stats.tpt;
@@ -4334,20 +4342,17 @@ link_up:
 
        e1000e_update_adaptive(&adapter->hw);
 
-       if (!netif_carrier_ok(netdev)) {
-               tx_pending = (e1000_desc_unused(tx_ring) + 1 <
-                              tx_ring->count);
-               if (tx_pending) {
-                       /*
-                        * We've lost link, so the controller stops DMA,
-                        * but we've got queued Tx work that's never going
-                        * to get done, so reset controller to flush Tx.
-                        * (Do the reset outside of interrupt context).
-                        */
-                       schedule_work(&adapter->reset_task);
-                       /* return immediately since reset is imminent */
-                       return;
-               }
+       if (!netif_carrier_ok(netdev) &&
+           (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+               /*
+                * We've lost link, so the controller stops DMA,
+                * but we've got queued Tx work that's never going
+                * to get done, so reset controller to flush Tx.
+                * (Do the reset outside of interrupt context).
+                */
+               schedule_work(&adapter->reset_task);
+               /* return immediately since reset is imminent */
+               return;
        }
 
        /* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
        u32 cmd_length = 0;
        u16 ipcse = 0, tucse, mss;
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
-       int err;
 
        if (!skb_is_gso(skb))
                return 0;
 
        if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
                if (err)
                        return err;
        }
@@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
 }
 
 /**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
  * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
  *
  * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
  **/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                             struct rtnl_link_stats64 *stats)
 {
-       /* only return the current stats */
-       return &netdev->stats;
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       memset(stats, 0, sizeof(struct rtnl_link_stats64));
+       spin_lock(&adapter->stats64_lock);
+       e1000e_update_stats(adapter);
+       /* Fill out the OS statistics structure */
+       stats->rx_bytes = adapter->stats.gorc;
+       stats->rx_packets = adapter->stats.gprc;
+       stats->tx_bytes = adapter->stats.gotc;
+       stats->tx_packets = adapter->stats.gptc;
+       stats->multicast = adapter->stats.mprc;
+       stats->collisions = adapter->stats.colc;
+
+       /* Rx Errors */
+
+       /*
+        * RLEC on some newer hardware can be incorrect so build
+        * our own version based on RUC and ROC
+        */
+       stats->rx_errors = adapter->stats.rxerrc +
+               adapter->stats.crcerrs + adapter->stats.algnerrc +
+               adapter->stats.ruc + adapter->stats.roc +
+               adapter->stats.cexterr;
+       stats->rx_length_errors = adapter->stats.ruc +
+                                             adapter->stats.roc;
+       stats->rx_crc_errors = adapter->stats.crcerrs;
+       stats->rx_frame_errors = adapter->stats.algnerrc;
+       stats->rx_missed_errors = adapter->stats.mpc;
+
+       /* Tx Errors */
+       stats->tx_errors = adapter->stats.ecol +
+                                      adapter->stats.latecol;
+       stats->tx_aborted_errors = adapter->stats.ecol;
+       stats->tx_window_errors = adapter->stats.latecol;
+       stats->tx_carrier_errors = adapter->stats.tncrs;
+
+       /* Tx Dropped needs to be maintained elsewhere */
+
+       spin_unlock(&adapter->stats64_lock);
+       return stats;
 }
 
 /**
@@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
 {
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int vector, msix_irq;
 
        if (adapter->msix_entries) {
+               int vector, msix_irq;
+
                vector = 0;
                msix_irq = adapter->msix_entries[vector].vector;
                disable_irq(msix_irq);
@@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
        .ndo_open               = e1000_open,
        .ndo_stop               = e1000_close,
        .ndo_start_xmit         = e1000_xmit_frame,
-       .ndo_get_stats          = e1000_get_stats,
+       .ndo_get_stats64        = e1000e_get_stats64,
        .ndo_set_multicast_list = e1000_set_multi,
        .ndo_set_mac_address    = e1000_set_mac,
        .ndo_change_mtu         = e1000_change_mtu,
index 6bea051b134b5e7e48f0c85feb9a4879d956974b..6ae31fcfb6295c2585ae7fc206023a70bc3dda52 100644 (file)
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
 s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
 {
        s32 ret_val;
-       u32 page_select = 0;
        u32 page = offset >> IGP_PAGE_SHIFT;
-       u32 page_shift = 0;
 
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
        hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               u32 page_shift, page_select;
+
                /*
                 * Page select is register 31 for phy address 1 and 22 for
                 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
 s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
 {
        s32 ret_val;
-       u32 page_select = 0;
        u32 page = offset >> IGP_PAGE_SHIFT;
-       u32 page_shift = 0;
 
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
        hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
        if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               u32 page_shift, page_select;
+
                /*
                 * Page select is register 31 for phy address 1 and 22 for
                 * phy address 2 and 3. Page select is shifted only for
index e7b6c31880bacadc85262c39324a8773353d0ae4..2e573be16c13c77d5982b42c96b7de20fdeab8ea 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
-       enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
+       enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
 
index a937f49d9db747c3979240ffc40fa0af961a970b..e816bbb9fbf9b28d995a06cd308f893f62b200e1 100644 (file)
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "1.4.1.10"
-#define DRV_COPYRIGHT          "Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION            "2.1.1.10"
+#define DRV_COPYRIGHT          "Copyright 2008-2011 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
 
-#define ENIC_WQ_MAX            8
-#define ENIC_RQ_MAX            8
+#define ENIC_WQ_MAX            1
+#define ENIC_RQ_MAX            1
 #define ENIC_CQ_MAX            (ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
 
@@ -49,7 +49,7 @@ struct enic_msix_entry {
        void *devid;
 };
 
-#define ENIC_SET_APPLIED               (1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED      (1 << 0)
 #define ENIC_SET_REQUEST               (1 << 1)
 #define ENIC_SET_NAME                  (1 << 2)
 #define ENIC_SET_INSTANCE              (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
        /* receive queue cache line section */
        ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
        unsigned int rq_count;
-       int (*rq_alloc_buf)(struct vnic_rq *rq);
        u64 rq_truncated_pkts;
        u64 rq_bad_fcs;
        struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644 (file)
index 0000000..37ad3a1
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_fw_info(enic->vdev, fw_info);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_stats_dump(enic->vdev, vstats);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+       int err;
+
+       if (!is_valid_ether_addr(enic->netdev->dev_addr))
+               return -EADDRNOTAVAIL;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+       int err;
+
+       if (!is_valid_ether_addr(enic->netdev->dev_addr))
+               return -EADDRNOTAVAIL;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+       int broadcast, int promisc, int allmulti)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_packet_filter(enic->vdev, directed,
+               multicast, broadcast, promisc, allmulti);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_add_addr(enic->vdev, addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_del_addr(enic->vdev, addr);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_notify_unset(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_hang_notify(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+               IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_enable_wait(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_disable(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_deinit(enic->vdev);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_init_prov(enic->vdev,
+               (u8 *)vp, vic_provinfo_size(vp));
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+       int err;
+
+       spin_lock(&enic->devcmd_lock);
+       err = vnic_dev_init_done(enic->vdev, done, error);
+       spin_unlock(&enic->devcmd_lock);
+
+       return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+       struct enic *enic = netdev_priv(netdev);
+
+       spin_lock(&enic->devcmd_lock);
+       enic_add_vlan(enic, vid);
+       spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+       struct enic *enic = netdev_priv(netdev);
+
+       spin_lock(&enic->devcmd_lock);
+       enic_del_vlan(enic, vid);
+       spin_unlock(&enic->devcmd_lock);
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644 (file)
index 0000000..495f57f
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+       int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_init_done(struct enic *enic, int *done, int *error);
+
+#endif /* _ENIC_DEV_H_ */
index a0af48c51fb37d1860061e6358ec4dfdc64005fc..8b9cad5e9712524df44f305cd43cbceadaba5ec8 100644 (file)
@@ -44,6 +44,7 @@
 #include "vnic_vic.h"
 #include "enic_res.h"
 #include "enic.h"
+#include "enic_dev.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD       (2 * HZ)
 #define WQ_ENET_MAX_DESC_LEN           (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
        return 0;
 }
 
-static int enic_dev_fw_info(struct enic *enic,
-       struct vnic_devcmd_fw_info **fw_info)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_fw_info(enic->vdev, fw_info);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_get_drvinfo(struct net_device *netdev,
        struct ethtool_drvinfo *drvinfo)
 {
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
-static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_stats_dump(enic->vdev, vstats);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_get_ethtool_stats(struct net_device *netdev,
        struct ethtool_stats *stats, u64 *data)
 {
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
        return net_stats;
 }
 
-static void enic_reset_multicast_list(struct enic *enic)
+static void enic_reset_addr_lists(struct enic *enic)
 {
        enic->mc_count = 0;
+       enic->uc_count = 0;
        enic->flags = 0;
 }
 
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
        return 0;
 }
 
-static int enic_dev_add_station_addr(struct enic *enic)
-{
-       int err = 0;
-
-       if (is_valid_ether_addr(enic->netdev->dev_addr)) {
-               spin_lock(&enic->devcmd_lock);
-               err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
-               spin_unlock(&enic->devcmd_lock);
-       }
-
-       return err;
-}
-
-static int enic_dev_del_station_addr(struct enic *enic)
-{
-       int err = 0;
-
-       if (is_valid_ether_addr(enic->netdev->dev_addr)) {
-               spin_lock(&enic->devcmd_lock);
-               err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
-               spin_unlock(&enic->devcmd_lock);
-       }
-
-       return err;
-}
-
 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
 {
        struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
        return enic_dev_add_station_addr(enic);
 }
 
-static int enic_dev_packet_filter(struct enic *enic, int directed,
-       int multicast, int broadcast, int promisc, int allmulti)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_packet_filter(enic->vdev, directed,
-               multicast, broadcast, promisc, allmulti);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_add_addr(struct enic *enic, u8 *addr)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_add_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_del_addr(struct enic *enic, u8 *addr)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_del_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static void enic_add_multicast_addr_list(struct enic *enic)
+static void enic_update_multicast_addr_list(struct enic *enic)
 {
        struct net_device *netdev = enic->netdev;
        struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
        enic->mc_count = mc_count;
 }
 
-static void enic_add_unicast_addr_list(struct enic *enic)
+static void enic_update_unicast_addr_list(struct enic *enic)
 {
        struct net_device *netdev = enic->netdev;
        struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
        }
 
        if (!promisc) {
-               enic_add_unicast_addr_list(enic);
+               enic_update_unicast_addr_list(enic);
                if (!allmulti)
-                       enic_add_multicast_addr_list(enic);
+                       enic_update_multicast_addr_list(enic);
        }
 }
 
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
        enic->vlan_group = vlan_group;
 }
 
-/* rtnl lock is held */
-static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
-       struct enic *enic = netdev_priv(netdev);
-
-       spin_lock(&enic->devcmd_lock);
-       enic_add_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
-}
-
-/* rtnl lock is held */
-static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
-       struct enic *enic = netdev_priv(netdev);
-
-       spin_lock(&enic->devcmd_lock);
-       enic_del_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
-}
-
 /* netif_tx_lock held, BHs disabled */
 static void enic_tx_timeout(struct net_device *netdev)
 {
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
        schedule_work(&enic->reset);
 }
 
-static int enic_vnic_dev_deinit(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_deinit(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_init_prov(enic->vdev,
-               (u8 *)vp, vic_provinfo_size(vp));
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_init_done(struct enic *enic, int *done, int *error)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_init_done(enic->vdev, done, error);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
        struct enic *enic = netdev_priv(netdev);
@@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
        if (err)
                return err;
 
+       enic_reset_addr_lists(enic);
+
        switch (enic->pp.request) {
 
        case PORT_REQUEST_ASSOCIATE:
@@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
                vic_provinfo_free(vp);
                if (err)
                        return err;
-
-               enic->pp.set |= ENIC_SET_APPLIED;
                break;
 
        case PORT_REQUEST_DISASSOCIATE:
-               enic->pp.set &= ~ENIC_SET_APPLIED;
                break;
 
        default:
                return -EINVAL;
        }
 
+       /* Set flag to indicate that the port assoc/disassoc
+        * request has been sent out to fw
+        */
+       enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
        return 0;
 }
 
@@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
 
                if (is_zero_ether_addr(netdev->dev_addr))
                        random_ether_addr(netdev->dev_addr);
-       } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
-               if (!is_zero_ether_addr(enic->pp.mac_addr))
-                       enic_dev_del_addr(enic, enic->pp.mac_addr);
        }
 
        memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
        if (err)
                goto set_port_profile_cleanup;
 
-       if (!is_zero_ether_addr(enic->pp.mac_addr))
-               enic_dev_add_addr(enic, enic->pp.mac_addr);
-
 set_port_profile_cleanup:
        memset(enic->pp.vf_mac, 0, ETH_ALEN);
 
@@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
        int err, error, done;
        u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
 
-       if (!(enic->pp.set & ENIC_SET_APPLIED))
+       if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
                return -ENODATA;
 
        err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        return 0;
 }
 
-static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
-{
-       struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-
-       if (vnic_rq_posting_soon(rq)) {
-
-               /* SW workaround for A0 HW erratum: if we're just about
-                * to write posted_index, insert a dummy desc
-                * of type resvd
-                */
-
-               rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
-               vnic_rq_post(rq, 0, 0, 0, 0);
-       } else {
-               return enic_rq_alloc_buf(rq);
-       }
-
-       return 0;
-}
-
-static int enic_dev_hw_version(struct enic *enic,
-       enum vnic_dev_hw_version *hw_ver)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_hw_version(enic->vdev, hw_ver);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_set_rq_alloc_buf(struct enic *enic)
-{
-       enum vnic_dev_hw_version hw_ver;
-       int err;
-
-       err = enic_dev_hw_version(enic, &hw_ver);
-       if (err)
-               return err;
-
-       switch (hw_ver) {
-       case VNIC_DEV_HW_VER_A1:
-               enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
-               break;
-       case VNIC_DEV_HW_VER_A2:
-       case VNIC_DEV_HW_VER_UNKNOWN:
-               enic->rq_alloc_buf = enic_rq_alloc_buf;
-               break;
-       default:
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
 static void enic_rq_indicate_buf(struct vnic_rq *rq,
        struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
        int skipped, void *opaque)
@@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                        0 /* don't unmask intr */,
                        0 /* don't reset intr timer */);
 
-       err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+       err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
 
        /* Buffer allocation failed. Stay in polling
         * mode so we can try to fill the ring again.
@@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                        0 /* don't unmask intr */,
                        0 /* don't reset intr timer */);
 
-       err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
+       err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
 
        /* Buffer allocation failed. Stay in polling mode
         * so we can try to fill the ring again.
@@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
        return err;
 }
 
-static int enic_dev_notify_unset(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_notify_unset(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_enable(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_enable_wait(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_disable(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_disable(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_notify_timer_start(struct enic *enic)
 {
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
        }
 
        for (i = 0; i < enic->rq_count; i++) {
-               vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+               vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
                /* Need at least one buffer on ring to get going */
                if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
                        netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
                rss_hash_bits, rss_base_cpu, rss_enable);
 }
 
-static int enic_dev_hang_notify(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_hang_notify(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
-static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
-{
-       int err;
-
-       spin_lock(&enic->devcmd_lock);
-       err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
-               IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
-       spin_unlock(&enic->devcmd_lock);
-
-       return err;
-}
-
 static void enic_reset(struct work_struct *work)
 {
        struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
        enic_dev_hang_notify(enic);
        enic_stop(enic->netdev);
        enic_dev_hang_reset(enic);
-       enic_reset_multicast_list(enic);
+       enic_reset_addr_lists(enic);
        enic_init_vnic_resources(enic);
        enic_set_rss_nic_cfg(enic);
        enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
 static int enic_set_intr_mode(struct enic *enic)
 {
        unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
-       unsigned int m = 1;
+       unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
        unsigned int i;
 
        /* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
        .ndo_tx_timeout         = enic_tx_timeout,
        .ndo_set_vf_port        = enic_set_vf_port,
        .ndo_get_vf_port        = enic_get_vf_port,
-#ifdef IFLA_VF_MAX
        .ndo_set_vf_mac         = enic_set_vf_mac,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = enic_poll_controller,
 #endif
@@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
 
        enic_init_vnic_resources(enic);
 
-       err = enic_set_rq_alloc_buf(enic);
-       if (err) {
-               dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
-               goto err_out_free_vnic_resources;
-       }
-
        err = enic_set_rss_nic_cfg(enic);
        if (err) {
                dev_err(dev, "Failed to config nic, aborting\n");
                goto err_out_free_vnic_resources;
        }
 
-       err = enic_dev_set_ig_vlan_rewrite_mode(enic);
-       if (err) {
-               dev_err(dev,
-                       "Failed to set ingress vlan rewrite mode, aborting.\n");
-               goto err_out_free_vnic_resources;
-       }
-
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        default:
                netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
                goto err_out_vnic_unregister;
        }
 
+       /* Setup devcmd lock
+        */
+
+       spin_lock_init(&enic->devcmd_lock);
+
+       /*
+        * Set ingress vlan rewrite mode before vnic initialization
+        */
+
+       err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+       if (err) {
+               dev_err(dev,
+                       "Failed to set ingress vlan rewrite mode, aborting.\n");
+               goto err_out_dev_close;
+       }
+
        /* Issue device init to initialize the vnic-to-switch link.
         * We'll start with carrier off and wait for link UP
         * notification later to turn on carrier.  We don't need
@@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
                }
        }
 
-       /* Setup devcmd lock
-        */
-
-       spin_lock_init(&enic->devcmd_lock);
-
        err = enic_dev_init(enic);
        if (err) {
                dev_err(dev, "Device initialization failed, aborting\n");
index fb35d8b176686b75a62856d1a1da539a6113d568..c489e72107de636439fc0f0182da8346b83a3797 100644 (file)
@@ -419,25 +419,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
        return err;
 }
 
-int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
-{
-       struct vnic_devcmd_fw_info *fw_info;
-       int err;
-
-       err = vnic_dev_fw_info(vdev, &fw_info);
-       if (err)
-               return err;
-
-       if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
-               *hw_ver = VNIC_DEV_HW_VER_A1;
-       else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
-               *hw_ver = VNIC_DEV_HW_VER_A2;
-       else
-               *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
-
-       return 0;
-}
-
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
        void *value)
 {
index 05f9a24cd45974e6463891af6b154facc0b24e0d..e837546213a8432b7a6520807a8cfaea47ba8cfa 100644 (file)
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-enum vnic_dev_hw_version {
-       VNIC_DEV_HW_VER_UNKNOWN,
-       VNIC_DEV_HW_VER_A1,
-       VNIC_DEV_HW_VER_A2,
-};
-
 enum vnic_dev_intr_mode {
        VNIC_DEV_INTR_MODE_UNKNOWN,
        VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
        u64 *a0, u64 *a1, int wait);
 int vnic_dev_fw_info(struct vnic_dev *vdev,
        struct vnic_devcmd_fw_info **fw_info);
-int vnic_dev_hw_version(struct vnic_dev *vdev,
-       enum vnic_dev_hw_version *hw_ver);
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
        void *value);
 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
index 37f08de2454ada35c39ae681b04e10a607d11633..2056586f4d4b02641ec9609e3858b3c0368822ff 100644 (file)
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
        }
 }
 
-static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-{
-       return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
-}
-
 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
 {
        rq->ring.desc_avail += count;
index 0cb1cf9cf4b0c2c38abd69f964838f276c1a6ff9..a59cf961a436be4fc151b62160f2ad3d17429491 100644 (file)
  * Sorry, I had to rewrite most of this for 2.5.x -DaveM
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/capability.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
 }
 
 static const char version[] __initconst =
-       "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
+       "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
 
 static const struct net_device_ops eql_netdev_ops = {
        .ndo_open       = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
        equalizer_t *eql = netdev_priv(dev);
 
        /* XXX We should force this off automatically for the user. */
-       printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
-              "your slave devices.\n", dev->name);
+       netdev_info(dev,
+                   "remember to turn off Van-Jacobson compression on your slave devices\n");
 
        BUG_ON(!list_empty(&eql->queue.all_slaves));
 
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
 {
        int err;
 
-       printk(version);
+       pr_info("%s\n", version);
 
        dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
        if (!dev_eql)
index cd0282d5d40f13935eeafb2a0fe5b7c2587b9f17..885d8baff7d5804b272c664fbc69fefa0cbf87b8 100644 (file)
@@ -54,7 +54,7 @@
 
 #include "fec.h"
 
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
 #define FEC_ALIGNMENT  0xf
 #else
 #define FEC_ALIGNMENT  0x3
@@ -148,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 #define        OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
 #else
 #define        OPT_FRAME_SIZE  0
@@ -184,7 +183,7 @@ struct fec_enet_private {
        struct bufdesc  *rx_bd_base;
        struct bufdesc  *tx_bd_base;
        /* The next free ring entry */
-       struct bufdesc  *cur_rx, *cur_tx; 
+       struct bufdesc  *cur_rx, *cur_tx;
        /* The ring entries to be free()ed */
        struct bufdesc  *dirty_tx;
 
@@ -192,28 +191,21 @@ struct fec_enet_private {
        /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
        spinlock_t hw_lock;
 
-       struct  platform_device *pdev;
+       struct  platform_device *pdev;
 
        int     opened;
 
        /* Phylib and MDIO interface */
-       struct  mii_bus *mii_bus;
-       struct  phy_device *phy_dev;
-       int     mii_timeout;
-       uint    phy_speed;
+       struct  mii_bus *mii_bus;
+       struct  phy_device *phy_dev;
+       int     mii_timeout;
+       uint    phy_speed;
        phy_interface_t phy_interface;
        int     link;
        int     full_duplex;
        struct  completion mdio_done;
 };
 
-static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
-static void fec_enet_tx(struct net_device *dev);
-static void fec_enet_rx(struct net_device *dev);
-static int fec_enet_close(struct net_device *dev);
-static void fec_restart(struct net_device *dev, int duplex);
-static void fec_stop(struct net_device *dev);
-
 /* FEC MII MMFR bits definition */
 #define FEC_MMFR_ST            (1 << 30)
 #define FEC_MMFR_OP_READ       (2 << 28)
@@ -240,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len)
 }
 
 static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
        struct bufdesc *bdp;
@@ -263,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (status & BD_ENET_TX_READY) {
                /* Ooops.  All transmit buffers are full.  Bail out.
-                * This should not happen, since dev->tbusy should be set.
+                * This should not happen, since ndev->tbusy should be set.
                 */
-               printk("%s: tx queue full!.\n", dev->name);
+               printk("%s: tx queue full!.\n", ndev->name);
                spin_unlock_irqrestore(&fep->hw_lock, flags);
                return NETDEV_TX_BUSY;
        }
@@ -285,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
                unsigned int index;
                index = bdp - fep->tx_bd_base;
-               memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
+               memcpy(fep->tx_bounce[index], skb->data, skb->len);
                bufaddr = fep->tx_bounce[index];
        }
 
@@ -300,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Save skb pointer */
        fep->tx_skbuff[fep->skb_cur] = skb;
 
-       dev->stats.tx_bytes += skb->len;
+       ndev->stats.tx_bytes += skb->len;
        fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
 
        /* Push the data cache so the CPM does not get stale memory
         * data.
         */
-       bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
+       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
                        FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
@@ -327,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (bdp == fep->dirty_tx) {
                fep->tx_full = 1;
-               netif_stop_queue(dev);
+               netif_stop_queue(ndev);
        }
 
        fep->cur_tx = bdp;
@@ -337,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+/* This function is called to start or restart the FEC during a link
+ * change.  This only happens when switching between half and full
+ * duplex.
+ */
 static void
-fec_timeout(struct net_device *dev)
+fec_restart(struct net_device *ndev, int duplex)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       const struct platform_device_id *id_entry =
+                               platform_get_device_id(fep->pdev);
+       int i;
+       u32 temp_mac[2];
+       u32 rcntl = OPT_FRAME_SIZE | 0x04;
 
-       dev->stats.tx_errors++;
+       /* Whack a reset.  We should wait for this. */
+       writel(1, fep->hwp + FEC_ECNTRL);
+       udelay(10);
 
-       fec_restart(dev, fep->full_duplex);
-       netif_wake_queue(dev);
-}
+       /*
+        * enet-mac reset will reset mac address registers too,
+        * so need to reconfigure it.
+        */
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+               memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+       }
 
-static irqreturn_t
-fec_enet_interrupt(int irq, void * dev_id)
-{
-       struct  net_device *dev = dev_id;
-       struct fec_enet_private *fep = netdev_priv(dev);
-       uint    int_events;
-       irqreturn_t ret = IRQ_NONE;
+       /* Clear any outstanding interrupt. */
+       writel(0xffc00000, fep->hwp + FEC_IEVENT);
 
-       do {
-               int_events = readl(fep->hwp + FEC_IEVENT);
-               writel(int_events, fep->hwp + FEC_IEVENT);
+       /* Reset all multicast. */
+       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+       writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+       writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
 
-               if (int_events & FEC_ENET_RXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_rx(dev);
-               }
+       /* Set maximum receive buffer size. */
+       writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
 
-               /* Transmit OK, or non-fatal error. Update the buffer
-                * descriptors. FEC handles all errors, we just discover
-                * them as part of the transmit process.
-                */
-               if (int_events & FEC_ENET_TXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_tx(dev);
+       /* Set receive and transmit descriptor base. */
+       writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+       writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+                       fep->hwp + FEC_X_DES_START);
+
+       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+       fep->cur_rx = fep->rx_bd_base;
+
+       /* Reset SKB transmit buffers. */
+       fep->skb_cur = fep->skb_dirty = 0;
+       for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+               if (fep->tx_skbuff[i]) {
+                       dev_kfree_skb_any(fep->tx_skbuff[i]);
+                       fep->tx_skbuff[i] = NULL;
                }
+       }
 
-               if (int_events & FEC_ENET_MII) {
-                       ret = IRQ_HANDLED;
-                       complete(&fep->mdio_done);
+       /* Enable MII mode */
+       if (duplex) {
+               /* FD enable */
+               writel(0x04, fep->hwp + FEC_X_CNTRL);
+       } else {
+               /* No Rcv on Xmit */
+               rcntl |= 0x02;
+               writel(0x0, fep->hwp + FEC_X_CNTRL);
+       }
+
+       fep->full_duplex = duplex;
+
+       /* Set MII speed */
+       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+       /*
+        * The phy interface and speed need to get configured
+        * differently on enet-mac.
+        */
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+               /* Enable flow control and length check */
+               rcntl |= 0x40000000 | 0x00000020;
+
+               /* MII or RMII */
+               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+                       rcntl |= (1 << 8);
+               else
+                       rcntl &= ~(1 << 8);
+
+               /* 10M or 100M */
+               if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+                       rcntl &= ~(1 << 9);
+               else
+                       rcntl |= (1 << 9);
+
+       } else {
+#ifdef FEC_MIIGSK_ENR
+               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+                       /* disable the gasket and wait */
+                       writel(0, fep->hwp + FEC_MIIGSK_ENR);
+                       while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+                               udelay(1);
+
+                       /*
+                        * configure the gasket:
+                        *   RMII, 50 MHz, no loopback, no echo
+                        */
+                       writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+                       /* re-enable the gasket */
+                       writel(2, fep->hwp + FEC_MIIGSK_ENR);
                }
-       } while (int_events);
+#endif
+       }
+       writel(rcntl, fep->hwp + FEC_R_CNTRL);
 
-       return ret;
+       /* And last, enable the transmit and receive processing */
+       writel(2, fep->hwp + FEC_ECNTRL);
+       writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+       /* Enable interrupts we wish to service */
+       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+static void
+fec_stop(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       /* We cannot expect a graceful transmit stop without link !!! */
+       if (fep->link) {
+               writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+               udelay(10);
+               if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+                       printk("fec_stop : Graceful transmit stop did not complete !\n");
+       }
+
+       /* Whack a reset.  We should wait for this. */
+       writel(1, fep->hwp + FEC_ECNTRL);
+       udelay(10);
+       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
 
 static void
-fec_enet_tx(struct net_device *dev)
+fec_timeout(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       ndev->stats.tx_errors++;
+
+       fec_restart(ndev, fep->full_duplex);
+       netif_wake_queue(ndev);
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
 {
        struct  fec_enet_private *fep;
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
 
-       fep = netdev_priv(dev);
+       fep = netdev_priv(ndev);
        spin_lock(&fep->hw_lock);
        bdp = fep->dirty_tx;
 
@@ -400,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
                if (bdp == fep->cur_tx && fep->tx_full == 0)
                        break;
 
-               dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                               FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
                bdp->cbd_bufaddr = 0;
 
                skb = fep->tx_skbuff[fep->skb_dirty];
@@ -408,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
                                   BD_ENET_TX_CSL)) {
-                       dev->stats.tx_errors++;
+                       ndev->stats.tx_errors++;
                        if (status & BD_ENET_TX_HB)  /* No heartbeat */
-                               dev->stats.tx_heartbeat_errors++;
+                               ndev->stats.tx_heartbeat_errors++;
                        if (status & BD_ENET_TX_LC)  /* Late collision */
-                               dev->stats.tx_window_errors++;
+                               ndev->stats.tx_window_errors++;
                        if (status & BD_ENET_TX_RL)  /* Retrans limit */
-                               dev->stats.tx_aborted_errors++;
+                               ndev->stats.tx_aborted_errors++;
                        if (status & BD_ENET_TX_UN)  /* Underrun */
-                               dev->stats.tx_fifo_errors++;
+                               ndev->stats.tx_fifo_errors++;
                        if (status & BD_ENET_TX_CSL) /* Carrier lost */
-                               dev->stats.tx_carrier_errors++;
+                               ndev->stats.tx_carrier_errors++;
                } else {
-                       dev->stats.tx_packets++;
+                       ndev->stats.tx_packets++;
                }
 
                if (status & BD_ENET_TX_READY)
@@ -430,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
                 * but we eventually sent the packet OK.
                 */
                if (status & BD_ENET_TX_DEF)
-                       dev->stats.collisions++;
+                       ndev->stats.collisions++;
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
@@ -447,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
                 */
                if (fep->tx_full) {
                        fep->tx_full = 0;
-                       if (netif_queue_stopped(dev))
-                               netif_wake_queue(dev);
+                       if (netif_queue_stopped(ndev))
+                               netif_wake_queue(ndev);
                }
        }
        fep->dirty_tx = bdp;
@@ -462,9 +563,9 @@ fec_enet_tx(struct net_device *dev)
  * effectively tossing the packet.
  */
 static void
-fec_enet_rx(struct net_device *dev)
+fec_enet_rx(struct net_device *ndev)
 {
-       struct  fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
        struct bufdesc *bdp;
@@ -498,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
                /* Check for errors. */
                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
                           BD_ENET_RX_CR | BD_ENET_RX_OV)) {
-                       dev->stats.rx_errors++;
+                       ndev->stats.rx_errors++;
                        if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
                                /* Frame too long or too short. */
-                               dev->stats.rx_length_errors++;
+                               ndev->stats.rx_length_errors++;
                        }
                        if (status & BD_ENET_RX_NO)     /* Frame alignment */
-                               dev->stats.rx_frame_errors++;
+                               ndev->stats.rx_frame_errors++;
                        if (status & BD_ENET_RX_CR)     /* CRC Error */
-                               dev->stats.rx_crc_errors++;
+                               ndev->stats.rx_crc_errors++;
                        if (status & BD_ENET_RX_OV)     /* FIFO overrun */
-                               dev->stats.rx_fifo_errors++;
+                               ndev->stats.rx_fifo_errors++;
                }
 
                /* Report late collisions as a frame error.
@@ -516,19 +617,19 @@ fec_enet_rx(struct net_device *dev)
                 * have in the buffer.  So, just drop this frame on the floor.
                 */
                if (status & BD_ENET_RX_CL) {
-                       dev->stats.rx_errors++;
-                       dev->stats.rx_frame_errors++;
+                       ndev->stats.rx_errors++;
+                       ndev->stats.rx_frame_errors++;
                        goto rx_processing_done;
                }
 
                /* Process the incoming frame. */
-               dev->stats.rx_packets++;
+               ndev->stats.rx_packets++;
                pkt_len = bdp->cbd_datlen;
-               dev->stats.rx_bytes += pkt_len;
+               ndev->stats.rx_bytes += pkt_len;
                data = (__u8*)__va(bdp->cbd_bufaddr);
 
-               dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
-                               DMA_FROM_DEVICE);
+               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                               FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
 
                if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
                        swap_buffer(data, pkt_len);
@@ -542,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
 
                if (unlikely(!skb)) {
                        printk("%s: Memory squeeze, dropping packet.\n",
-                                       dev->name);
-                       dev->stats.rx_dropped++;
+                                       ndev->name);
+                       ndev->stats.rx_dropped++;
                } else {
                        skb_reserve(skb, NET_IP_ALIGN);
                        skb_put(skb, pkt_len - 4);      /* Make room */
                        skb_copy_to_linear_data(skb, data, pkt_len - 4);
-                       skb->protocol = eth_type_trans(skb, dev);
+                       skb->protocol = eth_type_trans(skb, ndev);
                        netif_rx(skb);
                }
 
-               bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
-                       DMA_FROM_DEVICE);
+               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+                               FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
 rx_processing_done:
                /* Clear the status flags for this buffer */
                status &= ~BD_ENET_RX_STATS;
@@ -578,10 +679,47 @@ rx_processing_done:
        spin_unlock(&fep->hw_lock);
 }
 
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = dev_id;
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       uint int_events;
+       irqreturn_t ret = IRQ_NONE;
+
+       do {
+               int_events = readl(fep->hwp + FEC_IEVENT);
+               writel(int_events, fep->hwp + FEC_IEVENT);
+
+               if (int_events & FEC_ENET_RXF) {
+                       ret = IRQ_HANDLED;
+                       fec_enet_rx(ndev);
+               }
+
+               /* Transmit OK, or non-fatal error. Update the buffer
+                * descriptors. FEC handles all errors, we just discover
+                * them as part of the transmit process.
+                */
+               if (int_events & FEC_ENET_TXF) {
+                       ret = IRQ_HANDLED;
+                       fec_enet_tx(ndev);
+               }
+
+               if (int_events & FEC_ENET_MII) {
+                       ret = IRQ_HANDLED;
+                       complete(&fep->mdio_done);
+               }
+       } while (int_events);
+
+       return ret;
+}
+
+
+
 /* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *dev)
+static void __inline__ fec_get_mac(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
        unsigned char *iap, tmpaddr[ETH_ALEN];
 
@@ -617,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
                iap = &tmpaddr[0];
        }
 
-       memcpy(dev->dev_addr, iap, ETH_ALEN);
+       memcpy(ndev->dev_addr, iap, ETH_ALEN);
 
        /* Adjust MAC if using macaddr */
        if (iap == macaddr)
-                dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -629,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
 /*
  * Phy section
  */
-static void fec_enet_adjust_link(struct net_device *dev)
+static void fec_enet_adjust_link(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phy_dev = fep->phy_dev;
        unsigned long flags;
 
@@ -648,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
        /* Duplex link change */
        if (phy_dev->link) {
                if (fep->full_duplex != phy_dev->duplex) {
-                       fec_restart(dev, phy_dev->duplex);
+                       fec_restart(ndev, phy_dev->duplex);
                        status_change = 1;
                }
        }
@@ -657,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
        if (phy_dev->link != fep->link) {
                fep->link = phy_dev->link;
                if (phy_dev->link)
-                       fec_restart(dev, phy_dev->duplex);
+                       fec_restart(ndev, phy_dev->duplex);
                else
-                       fec_stop(dev);
+                       fec_stop(ndev);
                status_change = 1;
        }
 
@@ -728,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
        return 0;
 }
 
-static int fec_enet_mii_probe(struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phy_dev = NULL;
        char mdio_bus_id[MII_BUS_ID_SIZE];
        char phy_name[MII_BUS_ID_SIZE + 3];
@@ -755,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
 
        if (phy_id >= PHY_MAX_ADDR) {
                printk(KERN_INFO "%s: no PHY, assuming direct connection "
-                       "to switch\n", dev->name);
+                       "to switch\n", ndev->name);
                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
                phy_id = 0;
        }
 
        snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
-       phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+       phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
                PHY_INTERFACE_MODE_MII);
        if (IS_ERR(phy_dev)) {
-               printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+               printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
                return PTR_ERR(phy_dev);
        }
 
@@ -777,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
        fep->full_duplex = 0;
 
        printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-               "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+               "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
                fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
                fep->phy_dev->irq);
 
@@ -787,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
 static int fec_enet_mii_init(struct platform_device *pdev)
 {
        static struct mii_bus *fec0_mii_bus;
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
        int err = -ENXIO, i;
@@ -846,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                fep->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(dev, fep->mii_bus);
-
        if (mdiobus_register(fep->mii_bus))
                goto err_out_free_mdio_irq;
 
@@ -874,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
        mdiobus_free(fep->mii_bus);
 }
 
-static int fec_enet_get_settings(struct net_device *dev,
+static int fec_enet_get_settings(struct net_device *ndev,
                                  struct ethtool_cmd *cmd)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phydev = fep->phy_dev;
 
        if (!phydev)
@@ -886,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
        return phy_ethtool_gset(phydev, cmd);
 }
 
-static int fec_enet_set_settings(struct net_device *dev,
+static int fec_enet_set_settings(struct net_device *ndev,
                                 struct ethtool_cmd *cmd)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phydev = fep->phy_dev;
 
        if (!phydev)
@@ -898,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
        return phy_ethtool_sset(phydev, cmd);
 }
 
-static void fec_enet_get_drvinfo(struct net_device *dev,
+static void fec_enet_get_drvinfo(struct net_device *ndev,
                                 struct ethtool_drvinfo *info)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
        strcpy(info->driver, fep->pdev->dev.driver->name);
        strcpy(info->version, "Revision: 1.0");
-       strcpy(info->bus_info, dev_name(&dev->dev));
+       strcpy(info->bus_info, dev_name(&ndev->dev));
 }
 
 static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -915,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
 };
 
-static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct phy_device *phydev = fep->phy_dev;
 
-       if (!netif_running(dev))
+       if (!netif_running(ndev))
                return -EINVAL;
 
        if (!phydev)
@@ -929,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return phy_mii_ioctl(phydev, rq, cmd);
 }
 
-static void fec_enet_free_buffers(struct net_device *dev)
+static void fec_enet_free_buffers(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
@@ -941,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
                skb = fep->rx_skbuff[i];
 
                if (bdp->cbd_bufaddr)
-                       dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
                if (skb)
                        dev_kfree_skb(skb);
@@ -953,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
                kfree(fep->tx_bounce[i]);
 }
 
-static int fec_enet_alloc_buffers(struct net_device *dev)
+static int fec_enet_alloc_buffers(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
@@ -964,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
        for (i = 0; i < RX_RING_SIZE; i++) {
                skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
                if (!skb) {
-                       fec_enet_free_buffers(dev);
+                       fec_enet_free_buffers(ndev);
                        return -ENOMEM;
                }
                fep->rx_skbuff[i] = skb;
 
-               bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
                bdp->cbd_sc = BD_ENET_RX_EMPTY;
                bdp++;
@@ -996,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
 }
 
 static int
-fec_enet_open(struct net_device *dev)
+fec_enet_open(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        int ret;
 
        /* I should reset the ring buffers here, but I don't yet know
         * a simple way to do that.
         */
 
-       ret = fec_enet_alloc_buffers(dev);
+       ret = fec_enet_alloc_buffers(ndev);
        if (ret)
                return ret;
 
        /* Probe and connect to PHY when open the interface */
-       ret = fec_enet_mii_probe(dev);
+       ret = fec_enet_mii_probe(ndev);
        if (ret) {
-               fec_enet_free_buffers(dev);
+               fec_enet_free_buffers(ndev);
                return ret;
        }
        phy_start(fep->phy_dev);
-       netif_start_queue(dev);
+       netif_start_queue(ndev);
        fep->opened = 1;
        return 0;
 }
 
 static int
-fec_enet_close(struct net_device *dev)
+fec_enet_close(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
        /* Don't know what to do yet. */
        fep->opened = 0;
-       netif_stop_queue(dev);
-       fec_stop(dev);
+       netif_stop_queue(ndev);
+       fec_stop(ndev);
 
-       if (fep->phy_dev)
+       if (fep->phy_dev) {
+               phy_stop(fep->phy_dev);
                phy_disconnect(fep->phy_dev);
+       }
 
-        fec_enet_free_buffers(dev);
+       fec_enet_free_buffers(ndev);
 
        return 0;
 }
@@ -1052,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
 #define HASH_BITS      6               /* #bits in hash */
 #define CRC32_POLY     0xEDB88320
 
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct netdev_hw_addr *ha;
        unsigned int i, bit, data, crc, tmp;
        unsigned char hash;
 
-       if (dev->flags & IFF_PROMISC) {
+       if (ndev->flags & IFF_PROMISC) {
                tmp = readl(fep->hwp + FEC_R_CNTRL);
                tmp |= 0x8;
                writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1070,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
        tmp &= ~0x8;
        writel(tmp, fep->hwp + FEC_R_CNTRL);
 
-       if (dev->flags & IFF_ALLMULTI) {
+       if (ndev->flags & IFF_ALLMULTI) {
                /* Catch all multicast addresses, so set the
                 * filter to all 1's
                 */
@@ -1085,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
        writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
        writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
 
-       netdev_for_each_mc_addr(ha, dev) {
+       netdev_for_each_mc_addr(ha, ndev) {
                /* Only support group multicast for now */
                if (!(ha->addr[0] & 1))
                        continue;
@@ -1093,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
                /* calculate crc32 value of mac address */
                crc = 0xffffffff;
 
-               for (i = 0; i < dev->addr_len; i++) {
+               for (i = 0; i < ndev->addr_len; i++) {
                        data = ha->addr[i];
                        for (bit = 0; bit < 8; bit++, data >>= 1) {
                                crc = (crc >> 1) ^
@@ -1120,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
 
 /* Set a MAC change in hardware. */
 static int
-fec_set_mac_address(struct net_device *dev, void *p)
+fec_set_mac_address(struct net_device *ndev, void *p)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct sockaddr *addr = p;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
-       writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
-               (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+       writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+               (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
                fep->hwp + FEC_ADDR_LOW);
-       writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+       writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
                fep->hwp + FEC_ADDR_HIGH);
        return 0;
 }
@@ -1147,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = fec_timeout,
        .ndo_set_mac_address    = fec_set_mac_address,
-       .ndo_do_ioctl           = fec_enet_ioctl,
+       .ndo_do_ioctl           = fec_enet_ioctl,
 };
 
  /*
   * XXX:  We need to clean up on failure exits here.
   *
   */
-static int fec_enet_init(struct net_device *dev)
+static int fec_enet_init(struct net_device *ndev)
 {
-       struct fec_enet_private *fep = netdev_priv(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
        struct bufdesc *cbd_base;
        struct bufdesc *bdp;
        int i;
@@ -1171,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev)
 
        spin_lock_init(&fep->hw_lock);
 
-       fep->hwp = (void __iomem *)dev->base_addr;
-       fep->netdev = dev;
+       fep->netdev = ndev;
 
        /* Get the Ethernet address */
-       fec_get_mac(dev);
+       fec_get_mac(ndev);
 
        /* Set receive and transmit descriptor base. */
        fep->rx_bd_base = cbd_base;
        fep->tx_bd_base = cbd_base + RX_RING_SIZE;
 
        /* The FEC Ethernet specific entries in the device structure */
-       dev->watchdog_timeo = TX_TIMEOUT;
-       dev->netdev_ops = &fec_netdev_ops;
-       dev->ethtool_ops = &fec_enet_ethtool_ops;
+       ndev->watchdog_timeo = TX_TIMEOUT;
+       ndev->netdev_ops = &fec_netdev_ops;
+       ndev->ethtool_ops = &fec_enet_ethtool_ops;
 
        /* Initialize the receive buffer descriptors. */
        bdp = fep->rx_bd_base;
@@ -1213,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev)
        bdp--;
        bdp->cbd_sc |= BD_SC_WRAP;
 
-       fec_restart(dev, 0);
+       fec_restart(ndev, 0);
 
        return 0;
 }
 
-/* This function is called to start or restart the FEC during a link
- * change.  This only happens when switching between half and full
- * duplex.
- */
-static void
-fec_restart(struct net_device *dev, int duplex)
-{
-       struct fec_enet_private *fep = netdev_priv(dev);
-       const struct platform_device_id *id_entry =
-                               platform_get_device_id(fep->pdev);
-       int i;
-       u32 val, temp_mac[2];
-
-       /* Whack a reset.  We should wait for this. */
-       writel(1, fep->hwp + FEC_ECNTRL);
-       udelay(10);
-
-       /*
-        * enet-mac reset will reset mac address registers too,
-        * so need to reconfigure it.
-        */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
-               memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
-               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
-       }
-
-       /* Clear any outstanding interrupt. */
-       writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
-       /* Reset all multicast. */
-       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-#ifndef CONFIG_M5272
-       writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
-       writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
-       /* Set maximum receive buffer size. */
-       writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
-       /* Set receive and transmit descriptor base. */
-       writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
-       writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
-                       fep->hwp + FEC_X_DES_START);
-
-       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
-       fep->cur_rx = fep->rx_bd_base;
-
-       /* Reset SKB transmit buffers. */
-       fep->skb_cur = fep->skb_dirty = 0;
-       for (i = 0; i <= TX_RING_MOD_MASK; i++) {
-               if (fep->tx_skbuff[i]) {
-                       dev_kfree_skb_any(fep->tx_skbuff[i]);
-                       fep->tx_skbuff[i] = NULL;
-               }
-       }
-
-       /* Enable MII mode */
-       if (duplex) {
-               /* MII enable / FD enable */
-               writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
-               writel(0x04, fep->hwp + FEC_X_CNTRL);
-       } else {
-               /* MII enable / No Rcv on Xmit */
-               writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
-               writel(0x0, fep->hwp + FEC_X_CNTRL);
-       }
-       fep->full_duplex = duplex;
-
-       /* Set MII speed */
-       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-
-       /*
-        * The phy interface and speed need to get configured
-        * differently on enet-mac.
-        */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
-               val = readl(fep->hwp + FEC_R_CNTRL);
-
-               /* MII or RMII */
-               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
-                       val |= (1 << 8);
-               else
-                       val &= ~(1 << 8);
-
-               /* 10M or 100M */
-               if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
-                       val &= ~(1 << 9);
-               else
-                       val |= (1 << 9);
-
-               writel(val, fep->hwp + FEC_R_CNTRL);
-       } else {
-#ifdef FEC_MIIGSK_ENR
-               if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
-                       /* disable the gasket and wait */
-                       writel(0, fep->hwp + FEC_MIIGSK_ENR);
-                       while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
-                               udelay(1);
-
-                       /*
-                        * configure the gasket:
-                        *   RMII, 50 MHz, no loopback, no echo
-                        */
-                       writel(1, fep->hwp + FEC_MIIGSK_CFGR);
-
-                       /* re-enable the gasket */
-                       writel(2, fep->hwp + FEC_MIIGSK_ENR);
-               }
-#endif
-       }
-
-       /* And last, enable the transmit and receive processing */
-       writel(2, fep->hwp + FEC_ECNTRL);
-       writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-
-       /* Enable interrupts we wish to service */
-       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
-static void
-fec_stop(struct net_device *dev)
-{
-       struct fec_enet_private *fep = netdev_priv(dev);
-
-       /* We cannot expect a graceful transmit stop without link !!! */
-       if (fep->link) {
-               writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
-               udelay(10);
-               if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
-                       printk("fec_stop : Graceful transmit stop did not complete !\n");
-       }
-
-       /* Whack a reset.  We should wait for this. */
-       writel(1, fep->hwp + FEC_ECNTRL);
-       udelay(10);
-       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
 static int __devinit
 fec_probe(struct platform_device *pdev)
 {
@@ -1378,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
 
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct fec_enet_private));
-       if (!ndev)
-               return -ENOMEM;
+       if (!ndev) {
+               ret = -ENOMEM;
+               goto failed_alloc_etherdev;
+       }
 
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        /* setup board info structure */
        fep = netdev_priv(ndev);
-       memset(fep, 0, sizeof(*fep));
 
-       ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+       fep->hwp = ioremap(r->start, resource_size(r));
        fep->pdev = pdev;
 
-       if (!ndev->base_addr) {
+       if (!fep->hwp) {
                ret = -ENOMEM;
                goto failed_ioremap;
        }
@@ -1408,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
                        break;
                ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
                if (ret) {
-                       while (i >= 0) {
+                       while (--i >= 0) {
                                irq = platform_get_irq(pdev, i);
                                free_irq(irq, ndev);
-                               i--;
                        }
                        goto failed_irq;
                }
@@ -1454,9 +1450,11 @@ failed_clk:
                        free_irq(irq, ndev);
        }
 failed_irq:
-       iounmap((void __iomem *)ndev->base_addr);
+       iounmap(fep->hwp);
 failed_ioremap:
        free_netdev(ndev);
+failed_alloc_etherdev:
+       release_mem_region(r->start, resource_size(r));
 
        return ret;
 }
@@ -1466,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
-
-       platform_set_drvdata(pdev, NULL);
+       struct resource *r;
 
        fec_stop(ndev);
        fec_enet_mii_remove(fep);
        clk_disable(fep->clk);
        clk_put(fep->clk);
-       iounmap((void __iomem *)ndev->base_addr);
+       iounmap(fep->hwp);
        unregister_netdev(ndev);
        free_netdev(ndev);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       BUG_ON(!r);
+       release_mem_region(r->start, resource_size(r));
+
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
@@ -1484,16 +1488,14 @@ static int
 fec_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
-       struct fec_enet_private *fep;
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
-       if (ndev) {
-               fep = netdev_priv(ndev);
-               if (netif_running(ndev)) {
-                       fec_stop(ndev);
-                       netif_device_detach(ndev);
-               }
-               clk_disable(fep->clk);
+       if (netif_running(ndev)) {
+               fec_stop(ndev);
+               netif_device_detach(ndev);
        }
+       clk_disable(fep->clk);
+
        return 0;
 }
 
@@ -1501,16 +1503,14 @@ static int
 fec_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
-       struct fec_enet_private *fep;
+       struct fec_enet_private *fep = netdev_priv(ndev);
 
-       if (ndev) {
-               fep = netdev_priv(ndev);
-               clk_enable(fep->clk);
-               if (netif_running(ndev)) {
-                       fec_restart(ndev, fep->full_duplex);
-                       netif_device_attach(ndev);
-               }
+       clk_enable(fep->clk);
+       if (netif_running(ndev)) {
+               fec_restart(ndev, fep->full_duplex);
+               netif_device_attach(ndev);
        }
+
        return 0;
 }
 
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644 (file)
index 0000000..df70368
--- /dev/null
@@ -0,0 +1,1196 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "ftmac100.h"
+
+#define DRV_NAME       "ftmac100"
+#define DRV_VERSION    "0.2"
+
+#define RX_QUEUE_ENTRIES       128     /* must be power of 2 */
+#define TX_QUEUE_ENTRIES       16      /* must be power of 2 */
+
+#define MAX_PKT_SIZE           1518
+#define RX_BUF_SIZE            2044    /* must be smaller than 0x7ff */
+
+#if MAX_PKT_SIZE > 0x7ff
+#error invalid MAX_PKT_SIZE
+#endif
+
+#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
+#error invalid RX_BUF_SIZE
+#endif
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftmac100_descs {
+       struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+       struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftmac100 {
+       struct resource *res;
+       void __iomem *base;
+       int irq;
+
+       struct ftmac100_descs *descs;
+       dma_addr_t descs_dma_addr;
+
+       unsigned int rx_pointer;
+       unsigned int tx_clean_pointer;
+       unsigned int tx_pointer;
+       unsigned int tx_pending;
+
+       spinlock_t tx_lock;
+
+       struct net_device *netdev;
+       struct device *dev;
+       struct napi_struct napi;
+
+       struct mii_if_info mii;
+};
+
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED   (FTMAC100_INT_RPKT_FINISH       | \
+                                FTMAC100_INT_NORXBUF           | \
+                                FTMAC100_INT_XPKT_OK           | \
+                                FTMAC100_INT_XPKT_LOST         | \
+                                FTMAC100_INT_RPKT_LOST         | \
+                                FTMAC100_INT_AHB_ERR           | \
+                                FTMAC100_INT_PHYSTS_CHG)
+
+#define INT_MASK_ALL_DISABLED  0
+
+static void ftmac100_enable_all_int(struct ftmac100 *priv)
+{
+       iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_disable_all_int(struct ftmac100 *priv)
+{
+       iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+       iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+       iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
+}
+
+static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
+{
+       iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
+}
+
+static int ftmac100_reset(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       int i;
+
+       /* NOTE: reset clears all registers */
+       iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
+
+       for (i = 0; i < 5; i++) {
+               unsigned int maccr;
+
+               maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+               if (!(maccr & FTMAC100_MACCR_SW_RST)) {
+                       /*
+                        * FTMAC100_MACCR_SW_RST cleared does not indicate
+                        * that hardware reset completed (what the f*ck).
+                        * We still need to wait for a while.
+                        */
+                       usleep_range(500, 1000);
+                       return 0;
+               }
+
+               usleep_range(1000, 10000);
+       }
+
+       netdev_err(netdev, "software reset failed\n");
+       return -EIO;
+}
+
+static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
+{
+       unsigned int maddr = mac[0] << 8 | mac[1];
+       unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+       iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
+       iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
+}
+
+#define MACCR_ENABLE_ALL       (FTMAC100_MACCR_XMT_EN  | \
+                                FTMAC100_MACCR_RCV_EN  | \
+                                FTMAC100_MACCR_XDMA_EN | \
+                                FTMAC100_MACCR_RDMA_EN | \
+                                FTMAC100_MACCR_CRC_APD | \
+                                FTMAC100_MACCR_FULLDUP | \
+                                FTMAC100_MACCR_RX_RUNT | \
+                                FTMAC100_MACCR_RX_BROADPKT)
+
+static int ftmac100_start_hw(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+
+       if (ftmac100_reset(priv))
+               return -EIO;
+
+       /* setup ring buffer base registers */
+       ftmac100_set_rx_ring_base(priv,
+                                 priv->descs_dma_addr +
+                                 offsetof(struct ftmac100_descs, rxdes));
+       ftmac100_set_tx_ring_base(priv,
+                                 priv->descs_dma_addr +
+                                 offsetof(struct ftmac100_descs, txdes));
+
+       iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
+
+       ftmac100_set_mac(priv, netdev->dev_addr);
+
+       iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+       return 0;
+}
+
+static void ftmac100_stop_hw(struct ftmac100 *priv)
+{
+       iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
+}
+
+static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
+}
+
+static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
+{
+       /* clear status bits */
+       rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
+}
+
+static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
+}
+
+static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
+{
+       return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
+}
+
+static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
+{
+       return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
+}
+
+static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
+                                          unsigned int size)
+{
+       rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+       rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
+}
+
+static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
+{
+       rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+}
+
+static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
+                                       dma_addr_t addr)
+{
+       rxdes->rxdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
+{
+       return le32_to_cpu(rxdes->rxdes2);
+}
+
+/*
+ * rxdes3 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
+{
+       rxdes->rxdes3 = (unsigned int)page;
+}
+
+static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
+{
+       return (struct page *)rxdes->rxdes3;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftmac100_next_rx_pointer(int pointer)
+{
+       return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
+{
+       priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
+{
+       return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftmac100_rxdes *
+ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
+{
+       struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+
+       while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
+               if (ftmac100_rxdes_first_segment(rxdes))
+                       return rxdes;
+
+               ftmac100_rxdes_set_dma_own(rxdes);
+               ftmac100_rx_pointer_advance(priv);
+               rxdes = ftmac100_current_rxdes(priv);
+       }
+
+       return NULL;
+}
+
+static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
+                                    struct ftmac100_rxdes *rxdes)
+{
+       struct net_device *netdev = priv->netdev;
+       bool error = false;
+
+       if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx err\n");
+
+               netdev->stats.rx_errors++;
+               error = true;
+       }
+
+       if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx crc err\n");
+
+               netdev->stats.rx_crc_errors++;
+               error = true;
+       }
+
+       if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx frame too long\n");
+
+               netdev->stats.rx_length_errors++;
+               error = true;
+       } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx runt\n");
+
+               netdev->stats.rx_length_errors++;
+               error = true;
+       } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "rx odd nibble\n");
+
+               netdev->stats.rx_length_errors++;
+               error = true;
+       }
+
+       return error;
+}
+
+static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+       bool done = false;
+
+       if (net_ratelimit())
+               netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+       do {
+               if (ftmac100_rxdes_last_segment(rxdes))
+                       done = true;
+
+               ftmac100_rxdes_set_dma_own(rxdes);
+               ftmac100_rx_pointer_advance(priv);
+               rxdes = ftmac100_current_rxdes(priv);
+       } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
+
+       netdev->stats.rx_dropped++;
+}
+
+static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_rxdes *rxdes;
+       struct sk_buff *skb;
+       struct page *page;
+       dma_addr_t map;
+       int length;
+
+       rxdes = ftmac100_rx_locate_first_segment(priv);
+       if (!rxdes)
+               return false;
+
+       if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
+               ftmac100_rx_drop_packet(priv);
+               return true;
+       }
+
+       /*
+        * It is impossible to get multi-segment packets
+        * because we always provide big enough receive buffers.
+        */
+       if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
+               BUG();
+
+       /* start processing */
+       skb = netdev_alloc_skb_ip_align(netdev, 128);
+       if (unlikely(!skb)) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "rx skb alloc failed\n");
+
+               ftmac100_rx_drop_packet(priv);
+               return true;
+       }
+
+       if (unlikely(ftmac100_rxdes_multicast(rxdes)))
+               netdev->stats.multicast++;
+
+       map = ftmac100_rxdes_get_dma_addr(rxdes);
+       dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+       length = ftmac100_rxdes_frame_length(rxdes);
+       page = ftmac100_rxdes_get_page(rxdes);
+       skb_fill_page_desc(skb, 0, page, 0, length);
+       skb->len += length;
+       skb->data_len += length;
+       skb->truesize += length;
+       __pskb_pull_tail(skb, min(length, 64));
+
+       ftmac100_alloc_rx_page(priv, rxdes);
+
+       ftmac100_rx_pointer_advance(priv);
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       netdev->stats.rx_packets++;
+       netdev->stats.rx_bytes += skb->len;
+
+       /* push packet to protocol stack */
+       netif_receive_skb(skb);
+
+       (*processed)++;
+       return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
+{
+       /* clear all except end of ring bit */
+       txdes->txdes0 = 0;
+       txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+       txdes->txdes2 = 0;
+       txdes->txdes3 = 0;
+}
+
+static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
+{
+       return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
+{
+       /*
+        * Make sure dma own bit will not be set before any other
+        * descriptor fields.
+        */
+       wmb();
+       txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
+{
+       return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
+}
+
+static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
+{
+       return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
+}
+
+static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+}
+
+static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
+}
+
+static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
+}
+
+static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
+}
+
+static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
+                                          unsigned int len)
+{
+       txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
+}
+
+static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
+                                       dma_addr_t addr)
+{
+       txdes->txdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
+{
+       return le32_to_cpu(txdes->txdes2);
+}
+
+/*
+ * txdes3 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
+{
+       txdes->txdes3 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
+{
+       return (struct sk_buff *)txdes->txdes3;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftmac100_next_tx_pointer(int pointer)
+{
+       return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
+{
+       priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
+{
+       priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
+{
+       return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
+{
+       return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_txdes *txdes;
+       struct sk_buff *skb;
+       dma_addr_t map;
+
+       if (priv->tx_pending == 0)
+               return false;
+
+       txdes = ftmac100_current_clean_txdes(priv);
+
+       if (ftmac100_txdes_owned_by_dma(txdes))
+               return false;
+
+       skb = ftmac100_txdes_get_skb(txdes);
+       map = ftmac100_txdes_get_dma_addr(txdes);
+
+       if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
+                    ftmac100_txdes_late_collision(txdes))) {
+               /*
+                * packet transmitted to ethernet lost due to late collision
+                * or excessive collision
+                */
+               netdev->stats.tx_aborted_errors++;
+       } else {
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += skb->len;
+       }
+
+       dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+       dev_kfree_skb(skb);
+
+       ftmac100_txdes_reset(txdes);
+
+       ftmac100_tx_clean_pointer_advance(priv);
+
+       spin_lock(&priv->tx_lock);
+       priv->tx_pending--;
+       spin_unlock(&priv->tx_lock);
+       netif_wake_queue(netdev);
+
+       return true;
+}
+
+static void ftmac100_tx_complete(struct ftmac100 *priv)
+{
+       while (ftmac100_tx_complete_packet(priv))
+               ;
+}
+
+static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
+                        dma_addr_t map)
+{
+       struct net_device *netdev = priv->netdev;
+       struct ftmac100_txdes *txdes;
+       unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+       txdes = ftmac100_current_txdes(priv);
+       ftmac100_tx_pointer_advance(priv);
+
+       /* setup TX descriptor */
+       ftmac100_txdes_set_skb(txdes, skb);
+       ftmac100_txdes_set_dma_addr(txdes, map);
+
+       ftmac100_txdes_set_first_segment(txdes);
+       ftmac100_txdes_set_last_segment(txdes);
+       ftmac100_txdes_set_txint(txdes);
+       ftmac100_txdes_set_buffer_size(txdes, len);
+
+       spin_lock(&priv->tx_lock);
+       priv->tx_pending++;
+       if (priv->tx_pending == TX_QUEUE_ENTRIES)
+               netif_stop_queue(netdev);
+
+       /* start transmit */
+       ftmac100_txdes_set_dma_own(txdes);
+       spin_unlock(&priv->tx_lock);
+
+       ftmac100_txdma_start_polling(priv);
+       return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes)
+{
+       struct net_device *netdev = priv->netdev;
+       struct page *page;
+       dma_addr_t map;
+
+       page = alloc_page(GFP_KERNEL);
+       if (!page) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "failed to allocate rx page\n");
+               return -ENOMEM;
+       }
+
+       map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(priv->dev, map))) {
+               if (net_ratelimit())
+                       netdev_err(netdev, "failed to map rx page\n");
+               __free_page(page);
+               return -ENOMEM;
+       }
+
+       ftmac100_rxdes_set_page(rxdes, page);
+       ftmac100_rxdes_set_dma_addr(rxdes, map);
+       ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
+       ftmac100_rxdes_set_dma_own(rxdes);
+       return 0;
+}
+
+static void ftmac100_free_buffers(struct ftmac100 *priv)
+{
+       int i;
+
+       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+               struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+               struct page *page = ftmac100_rxdes_get_page(rxdes);
+               dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
+
+               if (!page)
+                       continue;
+
+               dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+               __free_page(page);
+       }
+
+       for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+               struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
+               struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
+               dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
+
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+               dev_kfree_skb(skb);
+       }
+
+       dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
+                         priv->descs, priv->descs_dma_addr);
+}
+
+static int ftmac100_alloc_buffers(struct ftmac100 *priv)
+{
+       int i;
+
+       priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
+                                        &priv->descs_dma_addr, GFP_KERNEL);
+       if (!priv->descs)
+               return -ENOMEM;
+
+       memset(priv->descs, 0, sizeof(struct ftmac100_descs));
+
+       /* initialize RX ring */
+       ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+               struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+               if (ftmac100_alloc_rx_page(priv, rxdes))
+                       goto err;
+       }
+
+       /* initialize TX ring */
+       ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+       return 0;
+
+err:
+       ftmac100_free_buffers(priv);
+       return -ENOMEM;
+}
+
+/******************************************************************************
+ * struct mii_if_info functions
+ *****************************************************************************/
+static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       unsigned int phycr;
+       int i;
+
+       phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+               FTMAC100_PHYCR_REGAD(reg) |
+               FTMAC100_PHYCR_MIIRD;
+
+       iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+       for (i = 0; i < 10; i++) {
+               phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+               if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
+                       return phycr & FTMAC100_PHYCR_MIIRDATA;
+
+               usleep_range(100, 1000);
+       }
+
+       netdev_err(netdev, "mdio read timed out\n");
+       return 0;
+}
+
+static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
+                               int data)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       unsigned int phycr;
+       int i;
+
+       phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+               FTMAC100_PHYCR_REGAD(reg) |
+               FTMAC100_PHYCR_MIIWR;
+
+       data = FTMAC100_PHYWDATA_MIIWDATA(data);
+
+       iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
+       iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+       for (i = 0; i < 10; i++) {
+               phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+               if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
+                       return;
+
+               usleep_range(100, 1000);
+       }
+
+       netdev_err(netdev, "mdio write timed out\n");
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftmac100_get_drvinfo(struct net_device *netdev,
+                                struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+static int ftmac100_nway_reset(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_nway_restart(&priv->mii);
+}
+
+static u32 ftmac100_get_link(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       return mii_link_ok(&priv->mii);
+}
+
+static const struct ethtool_ops ftmac100_ethtool_ops = {
+       .set_settings           = ftmac100_set_settings,
+       .get_settings           = ftmac100_get_settings,
+       .get_drvinfo            = ftmac100_get_drvinfo,
+       .nway_reset             = ftmac100_nway_reset,
+       .get_link               = ftmac100_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
+{
+       struct net_device *netdev = dev_id;
+       struct ftmac100 *priv = netdev_priv(netdev);
+
+       if (likely(netif_running(netdev))) {
+               /* Disable interrupts for polling */
+               ftmac100_disable_all_int(priv);
+               napi_schedule(&priv->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftmac100_poll(struct napi_struct *napi, int budget)
+{
+       struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
+       struct net_device *netdev = priv->netdev;
+       unsigned int status;
+       bool completed = true;
+       int rx = 0;
+
+       status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
+
+       if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
+               /*
+                * FTMAC100_INT_RPKT_FINISH:
+                *      RX DMA has received packets into RX buffer successfully
+                *
+                * FTMAC100_INT_NORXBUF:
+                *      RX buffer unavailable
+                */
+               bool retry;
+
+               do {
+                       retry = ftmac100_rx_packet(priv, &rx);
+               } while (retry && rx < budget);
+
+               if (retry && rx == budget)
+                       completed = false;
+       }
+
+       if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
+               /*
+                * FTMAC100_INT_XPKT_OK:
+                *      packet transmitted to ethernet successfully
+                *
+                * FTMAC100_INT_XPKT_LOST:
+                *      packet transmitted to ethernet lost due to late
+                *      collision or excessive collision
+                */
+               ftmac100_tx_complete(priv);
+       }
+
+       if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
+                     FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
+               if (net_ratelimit())
+                       netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+                                   status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
+                                   status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+                                   status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+                                   status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+               if (status & FTMAC100_INT_NORXBUF) {
+                       /* RX buffer unavailable */
+                       netdev->stats.rx_over_errors++;
+               }
+
+               if (status & FTMAC100_INT_RPKT_LOST) {
+                       /* received packet lost due to RX FIFO full */
+                       netdev->stats.rx_fifo_errors++;
+               }
+
+               if (status & FTMAC100_INT_PHYSTS_CHG) {
+                       /* PHY link status change */
+                       mii_check_link(&priv->mii);
+               }
+       }
+
+       if (completed) {
+               /* stop polling */
+               napi_complete(napi);
+               ftmac100_enable_all_int(priv);
+       }
+
+       return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftmac100_open(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       int err;
+
+       err = ftmac100_alloc_buffers(priv);
+       if (err) {
+               netdev_err(netdev, "failed to allocate buffers\n");
+               goto err_alloc;
+       }
+
+       err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
+       if (err) {
+               netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+               goto err_irq;
+       }
+
+       priv->rx_pointer = 0;
+       priv->tx_clean_pointer = 0;
+       priv->tx_pointer = 0;
+       priv->tx_pending = 0;
+
+       err = ftmac100_start_hw(priv);
+       if (err)
+               goto err_hw;
+
+       napi_enable(&priv->napi);
+       netif_start_queue(netdev);
+
+       ftmac100_enable_all_int(priv);
+
+       return 0;
+
+err_hw:
+       free_irq(priv->irq, netdev);
+err_irq:
+       ftmac100_free_buffers(priv);
+err_alloc:
+       return err;
+}
+
+static int ftmac100_stop(struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+
+       ftmac100_disable_all_int(priv);
+       netif_stop_queue(netdev);
+       napi_disable(&priv->napi);
+       ftmac100_stop_hw(priv);
+       free_irq(priv->irq, netdev);
+       ftmac100_free_buffers(priv);
+
+       return 0;
+}
+
+static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       dma_addr_t map;
+
+       if (unlikely(skb->len > MAX_PKT_SIZE)) {
+               if (net_ratelimit())
+                       netdev_dbg(netdev, "tx packet too big\n");
+
+               netdev->stats.tx_dropped++;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(priv->dev, map))) {
+               /* drop packet */
+               if (net_ratelimit())
+                       netdev_err(netdev, "map socket buffer failed\n");
+
+               netdev->stats.tx_dropped++;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       return ftmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+       struct ftmac100 *priv = netdev_priv(netdev);
+       struct mii_ioctl_data *data = if_mii(ifr);
+
+       return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
+}
+
+static const struct net_device_ops ftmac100_netdev_ops = {
+       .ndo_open               = ftmac100_open,
+       .ndo_stop               = ftmac100_stop,
+       .ndo_start_xmit         = ftmac100_hard_start_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = ftmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftmac100_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int irq;
+       struct net_device *netdev;
+       struct ftmac100 *priv;
+       int err;
+
+       if (!pdev)
+               return -ENODEV;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENXIO;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       /* setup net_device */
+       netdev = alloc_etherdev(sizeof(*priv));
+       if (!netdev) {
+               err = -ENOMEM;
+               goto err_alloc_etherdev;
+       }
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+       netdev->netdev_ops = &ftmac100_netdev_ops;
+
+       platform_set_drvdata(pdev, netdev);
+
+       /* setup private data */
+       priv = netdev_priv(netdev);
+       priv->netdev = netdev;
+       priv->dev = &pdev->dev;
+
+       spin_lock_init(&priv->tx_lock);
+
+       /* initialize NAPI */
+       netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+
+       /* map io memory */
+       priv->res = request_mem_region(res->start, resource_size(res),
+                                      dev_name(&pdev->dev));
+       if (!priv->res) {
+               dev_err(&pdev->dev, "Could not reserve memory region\n");
+               err = -ENOMEM;
+               goto err_req_mem;
+       }
+
+       priv->base = ioremap(res->start, res->end - res->start);
+       if (!priv->base) {
+               dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+               err = -EIO;
+               goto err_ioremap;
+       }
+
+       priv->irq = irq;
+
+       /* initialize struct mii_if_info */
+       priv->mii.phy_id        = 0;
+       priv->mii.phy_id_mask   = 0x1f;
+       priv->mii.reg_num_mask  = 0x1f;
+       priv->mii.dev           = netdev;
+       priv->mii.mdio_read     = ftmac100_mdio_read;
+       priv->mii.mdio_write    = ftmac100_mdio_write;
+
+       /* register network device */
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register netdev\n");
+               goto err_register_netdev;
+       }
+
+       netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+       if (!is_valid_ether_addr(netdev->dev_addr)) {
+               random_ether_addr(netdev->dev_addr);
+               netdev_info(netdev, "generated random MAC address %pM\n",
+                           netdev->dev_addr);
+       }
+
+       return 0;
+
+err_register_netdev:
+       iounmap(priv->base);
+err_ioremap:
+       release_resource(priv->res);
+err_req_mem:
+       netif_napi_del(&priv->napi);
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(netdev);
+err_alloc_etherdev:
+       return err;
+}
+
+static int __exit ftmac100_remove(struct platform_device *pdev)
+{
+       struct net_device *netdev;
+       struct ftmac100 *priv;
+
+       netdev = platform_get_drvdata(pdev);
+       priv = netdev_priv(netdev);
+
+       unregister_netdev(netdev);
+
+       iounmap(priv->base);
+       release_resource(priv->res);
+
+       netif_napi_del(&priv->napi);
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(netdev);
+       return 0;
+}
+
+static struct platform_driver ftmac100_driver = {
+       .probe          = ftmac100_probe,
+       .remove         = __exit_p(ftmac100_remove),
+       .driver         = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+       },
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftmac100_init(void)
+{
+       pr_info("Loading version " DRV_VERSION " ...\n");
+       return platform_driver_register(&ftmac100_driver);
+}
+
+static void __exit ftmac100_exit(void)
+{
+       platform_driver_unregister(&ftmac100_driver);
+}
+
+module_init(ftmac100_init);
+module_exit(ftmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644 (file)
index 0000000..46a0c47
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTMAC100_H
+#define __FTMAC100_H
+
+#define        FTMAC100_OFFSET_ISR             0x00
+#define        FTMAC100_OFFSET_IMR             0x04
+#define        FTMAC100_OFFSET_MAC_MADR        0x08
+#define        FTMAC100_OFFSET_MAC_LADR        0x0c
+#define        FTMAC100_OFFSET_MAHT0           0x10
+#define        FTMAC100_OFFSET_MAHT1           0x14
+#define        FTMAC100_OFFSET_TXPD            0x18
+#define        FTMAC100_OFFSET_RXPD            0x1c
+#define        FTMAC100_OFFSET_TXR_BADR        0x20
+#define        FTMAC100_OFFSET_RXR_BADR        0x24
+#define        FTMAC100_OFFSET_ITC             0x28
+#define        FTMAC100_OFFSET_APTC            0x2c
+#define        FTMAC100_OFFSET_DBLAC           0x30
+#define        FTMAC100_OFFSET_MACCR           0x88
+#define        FTMAC100_OFFSET_MACSR           0x8c
+#define        FTMAC100_OFFSET_PHYCR           0x90
+#define        FTMAC100_OFFSET_PHYWDATA        0x94
+#define        FTMAC100_OFFSET_FCR             0x98
+#define        FTMAC100_OFFSET_BPR             0x9c
+#define        FTMAC100_OFFSET_TS              0xc4
+#define        FTMAC100_OFFSET_DMAFIFOS        0xc8
+#define        FTMAC100_OFFSET_TM              0xcc
+#define        FTMAC100_OFFSET_TX_MCOL_SCOL    0xd4
+#define        FTMAC100_OFFSET_RPF_AEP         0xd8
+#define        FTMAC100_OFFSET_XM_PG           0xdc
+#define        FTMAC100_OFFSET_RUNT_TLCC       0xe0
+#define        FTMAC100_OFFSET_CRCER_FTL       0xe4
+#define        FTMAC100_OFFSET_RLC_RCC         0xe8
+#define        FTMAC100_OFFSET_BROC            0xec
+#define        FTMAC100_OFFSET_MULCA           0xf0
+#define        FTMAC100_OFFSET_RP              0xf4
+#define        FTMAC100_OFFSET_XP              0xf8
+
+/*
+ * Interrupt status register & interrupt mask register
+ */
+#define        FTMAC100_INT_RPKT_FINISH        (1 << 0)
+#define        FTMAC100_INT_NORXBUF            (1 << 1)
+#define        FTMAC100_INT_XPKT_FINISH        (1 << 2)
+#define        FTMAC100_INT_NOTXBUF            (1 << 3)
+#define        FTMAC100_INT_XPKT_OK            (1 << 4)
+#define        FTMAC100_INT_XPKT_LOST          (1 << 5)
+#define        FTMAC100_INT_RPKT_SAV           (1 << 6)
+#define        FTMAC100_INT_RPKT_LOST          (1 << 7)
+#define        FTMAC100_INT_AHB_ERR            (1 << 8)
+#define        FTMAC100_INT_PHYSTS_CHG         (1 << 9)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTMAC100_ITC_RXINT_CNT(x)      (((x) & 0xf) << 0)
+#define FTMAC100_ITC_RXINT_THR(x)      (((x) & 0x7) << 4)
+#define FTMAC100_ITC_RXINT_TIME_SEL    (1 << 7)
+#define FTMAC100_ITC_TXINT_CNT(x)      (((x) & 0xf) << 8)
+#define FTMAC100_ITC_TXINT_THR(x)      (((x) & 0x7) << 12)
+#define FTMAC100_ITC_TXINT_TIME_SEL    (1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define        FTMAC100_APTC_RXPOLL_CNT(x)     (((x) & 0xf) << 0)
+#define        FTMAC100_APTC_RXPOLL_TIME_SEL   (1 << 4)
+#define        FTMAC100_APTC_TXPOLL_CNT(x)     (((x) & 0xf) << 8)
+#define        FTMAC100_APTC_TXPOLL_TIME_SEL   (1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTMAC100_DBLAC_INCR4_EN                (1 << 0)
+#define FTMAC100_DBLAC_INCR8_EN                (1 << 1)
+#define FTMAC100_DBLAC_INCR16_EN       (1 << 2)
+#define FTMAC100_DBLAC_RXFIFO_LTHR(x)  (((x) & 0x7) << 3)
+#define FTMAC100_DBLAC_RXFIFO_HTHR(x)  (((x) & 0x7) << 6)
+#define FTMAC100_DBLAC_RX_THR_EN       (1 << 9)
+
+/*
+ * MAC control register
+ */
+#define        FTMAC100_MACCR_XDMA_EN          (1 << 0)
+#define        FTMAC100_MACCR_RDMA_EN          (1 << 1)
+#define        FTMAC100_MACCR_SW_RST           (1 << 2)
+#define        FTMAC100_MACCR_LOOP_EN          (1 << 3)
+#define        FTMAC100_MACCR_CRC_DIS          (1 << 4)
+#define        FTMAC100_MACCR_XMT_EN           (1 << 5)
+#define        FTMAC100_MACCR_ENRX_IN_HALFTX   (1 << 6)
+#define        FTMAC100_MACCR_RCV_EN           (1 << 8)
+#define        FTMAC100_MACCR_HT_MULTI_EN      (1 << 9)
+#define        FTMAC100_MACCR_RX_RUNT          (1 << 10)
+#define        FTMAC100_MACCR_RX_FTL           (1 << 11)
+#define        FTMAC100_MACCR_RCV_ALL          (1 << 12)
+#define        FTMAC100_MACCR_CRC_APD          (1 << 14)
+#define        FTMAC100_MACCR_FULLDUP          (1 << 15)
+#define        FTMAC100_MACCR_RX_MULTIPKT      (1 << 16)
+#define        FTMAC100_MACCR_RX_BROADPKT      (1 << 17)
+
+/*
+ * PHY control register
+ */
+#define FTMAC100_PHYCR_MIIRDATA                0xffff
+#define FTMAC100_PHYCR_PHYAD(x)                (((x) & 0x1f) << 16)
+#define FTMAC100_PHYCR_REGAD(x)                (((x) & 0x1f) << 21)
+#define FTMAC100_PHYCR_MIIRD           (1 << 26)
+#define FTMAC100_PHYCR_MIIWR           (1 << 27)
+
+/*
+ * PHY write data register
+ */
+#define FTMAC100_PHYWDATA_MIIWDATA(x)  ((x) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftmac100_txdes {
+       unsigned int    txdes0;
+       unsigned int    txdes1;
+       unsigned int    txdes2; /* TXBUF_BADR */
+       unsigned int    txdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define        FTMAC100_TXDES0_TXPKT_LATECOL   (1 << 0)
+#define        FTMAC100_TXDES0_TXPKT_EXSCOL    (1 << 1)
+#define        FTMAC100_TXDES0_TXDMA_OWN       (1 << 31)
+
+#define        FTMAC100_TXDES1_TXBUF_SIZE(x)   ((x) & 0x7ff)
+#define        FTMAC100_TXDES1_LTS             (1 << 27)
+#define        FTMAC100_TXDES1_FTS             (1 << 28)
+#define        FTMAC100_TXDES1_TX2FIC          (1 << 29)
+#define        FTMAC100_TXDES1_TXIC            (1 << 30)
+#define        FTMAC100_TXDES1_EDOTR           (1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftmac100_rxdes {
+       unsigned int    rxdes0;
+       unsigned int    rxdes1;
+       unsigned int    rxdes2; /* RXBUF_BADR */
+       unsigned int    rxdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define        FTMAC100_RXDES0_RFL             0x7ff
+#define        FTMAC100_RXDES0_MULTICAST       (1 << 16)
+#define        FTMAC100_RXDES0_BROADCAST       (1 << 17)
+#define        FTMAC100_RXDES0_RX_ERR          (1 << 18)
+#define        FTMAC100_RXDES0_CRC_ERR         (1 << 19)
+#define        FTMAC100_RXDES0_FTL             (1 << 20)
+#define        FTMAC100_RXDES0_RUNT            (1 << 21)
+#define        FTMAC100_RXDES0_RX_ODD_NB       (1 << 22)
+#define        FTMAC100_RXDES0_LRS             (1 << 28)
+#define        FTMAC100_RXDES0_FRS             (1 << 29)
+#define        FTMAC100_RXDES0_RXDMA_OWN       (1 << 31)
+
+#define        FTMAC100_RXDES1_RXBUF_SIZE(x)   ((x) & 0x7ff)
+#define        FTMAC100_RXDES1_EDORR           (1 << 31)
+
+#endif /* __FTMAC100_H */
index ac1d323c5eb5112194db47a7f1170be32c81cf5a..8931168d3e743a14fb412ed7d4e32b96dbcaffb7 100644 (file)
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
 static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct list_head *p;
+       struct bpqdev *bpqdev = v;
 
        ++*pos;
 
        if (v == SEQ_START_TOKEN)
-               p = rcu_dereference(bpq_devices.next);
+               p = rcu_dereference(list_next_rcu(&bpq_devices));
        else
-               p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+               p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
 
        return (p == &bpq_devices) ? NULL 
                : list_entry(p, struct bpqdev, bpq_list);
index 0a2368fa6bc660906b2120a84e69570e4c1962d7..65c1833244f728f6ff5cadf87cad6f9030a73181 100644 (file)
@@ -129,6 +129,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                break;
        case E1000_DEV_ID_82580_COPPER:
        case E1000_DEV_ID_82580_FIBER:
+       case E1000_DEV_ID_82580_QUAD_FIBER:
        case E1000_DEV_ID_82580_SERDES:
        case E1000_DEV_ID_82580_SGMII:
        case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -237,9 +238,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                size = 14;
        nvm->word_size = 1 << size;
 
-       /* if 82576 then initialize mailbox parameters */
-       if (mac->type == e1000_82576)
+       /* if part supports SR-IOV then initialize mailbox parameters */
+       switch (mac->type) {
+       case e1000_82576:
+       case e1000_i350:
                igb_init_mbx_params_pf(hw);
+               break;
+       default:
+               break;
+       }
 
        /* setup PHY parameters */
        if (phy->media_type != e1000_media_type_copper) {
index 6319ed902bc0e07a8347442fd03de9dfad560356..92e11da2574952ebf532722639f3b00d3773e029 100644 (file)
 /* Management Control */
 #define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
 #define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_EN_BMC2OS     0x10000000 /* OSBMC is Enabled or not */
 /* Enable Neighbor Discovery Filtering */
 #define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
 #define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
 #define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
                                                       on DMA coal */
 
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA          0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK     0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT    14
+#define E1000_RTTBCNRC_RF_INT_MASK     \
+       (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
 #endif
index e2638afb8cdc50ace3958484f3183d3e669e1cb4..eec9ed73558830dbd7387c9e69825c3c4f96ea83 100644 (file)
@@ -54,6 +54,7 @@ struct e1000_hw;
 #define E1000_DEV_ID_82580_SERDES             0x1510
 #define E1000_DEV_ID_82580_SGMII              0x1511
 #define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527
 #define E1000_DEV_ID_DH89XXCC_SGMII           0x0438
 #define E1000_DEV_ID_DH89XXCC_SERDES          0x043A
 #define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C
@@ -247,6 +248,10 @@ struct e1000_hw_stats {
        u64 scvpc;
        u64 hrmpc;
        u64 doosync;
+       u64 o2bgptc;
+       u64 o2bspc;
+       u64 b2ospc;
+       u64 b2ogprc;
 };
 
 struct e1000_phy_stats {
index c474cdb70047c6f9cde8851a6b793c13143bdfdf..78d48c7fa8591e98024b3a4c583173ab6d5bbb54 100644 (file)
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
 {
        struct e1000_mbx_info *mbx = &hw->mbx;
 
-       if (hw->mac.type == e1000_82576) {
-               mbx->timeout = 0;
-               mbx->usec_delay = 0;
-
-               mbx->size = E1000_VFMAILBOX_SIZE;
-
-               mbx->ops.read = igb_read_mbx_pf;
-               mbx->ops.write = igb_write_mbx_pf;
-               mbx->ops.read_posted = igb_read_posted_mbx;
-               mbx->ops.write_posted = igb_write_posted_mbx;
-               mbx->ops.check_for_msg = igb_check_for_msg_pf;
-               mbx->ops.check_for_ack = igb_check_for_ack_pf;
-               mbx->ops.check_for_rst = igb_check_for_rst_pf;
-
-               mbx->stats.msgs_tx = 0;
-               mbx->stats.msgs_rx = 0;
-               mbx->stats.reqs = 0;
-               mbx->stats.acks = 0;
-               mbx->stats.rsts = 0;
-       }
+       mbx->timeout = 0;
+       mbx->usec_delay = 0;
+
+       mbx->size = E1000_VFMAILBOX_SIZE;
+
+       mbx->ops.read = igb_read_mbx_pf;
+       mbx->ops.write = igb_write_mbx_pf;
+       mbx->ops.read_posted = igb_read_posted_mbx;
+       mbx->ops.write_posted = igb_write_posted_mbx;
+       mbx->ops.check_for_msg = igb_check_for_msg_pf;
+       mbx->ops.check_for_ack = igb_check_for_ack_pf;
+       mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
 
        return 0;
 }
index 8ac83c5190d5ea21835183eb0a944b8a569decda..61713548c0277a1efd0fbad1da37e5131d45e239 100644 (file)
 
 #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
 
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL 0x3604  /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRC 0x36B0  /* Tx BCN Rate-Scheduler Config - WO */
+
 /* Split and Replication RX Control - RW */
 #define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
 /*
 
 /* DMA Coalescing registers */
 #define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC    0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC   0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC   0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC    0x0415C /* OS2BMC packets transmitted by host */
+
 #endif
index 92a4ef09e55c37bb6a2a98505803b6c44b5a8772..bbc5ebfe254a040ebe9fc251b3c03cf7260b1175 100644 (file)
@@ -77,6 +77,7 @@ struct vf_data_storage {
        unsigned long last_nack;
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
+       u16 tx_rate;
 };
 
 #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
@@ -323,6 +324,7 @@ struct igb_adapter {
        u16 rx_ring_count;
        unsigned int vfs_allocated_count;
        struct vf_data_storage *vf_data;
+       int vf_rate_link_speed;
        u32 rss_queues;
        u32 wvbr;
 };
index a70e16bcfa7e3fb58ac34205c3540cb306957cd7..78d420b4b2db4b0fe76bffadeb8f839d31facc6c 100644 (file)
@@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
        IGB_STAT("tx_smbus", stats.mgptc),
        IGB_STAT("rx_smbus", stats.mgprc),
        IGB_STAT("dropped_smbus", stats.mgpdc),
+       IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+       IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+       IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
+       IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
 };
 
 #define IGB_NETDEV_STAT(_net_stat) { \
@@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev,
        regs_buff[548] = rd32(E1000_TDFT);
        regs_buff[549] = rd32(E1000_TDFHS);
        regs_buff[550] = rd32(E1000_TDFPC);
-
+       regs_buff[551] = adapter->stats.o2bgptc;
+       regs_buff[552] = adapter->stats.b2ospc;
+       regs_buff[553] = adapter->stats.o2bspc;
+       regs_buff[554] = adapter->stats.b2ogprc;
 }
 
 static int igb_get_eeprom_len(struct net_device *netdev)
@@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
        char firmware_version[32];
        u16 eeprom_data;
 
-       strncpy(drvinfo->driver,  igb_driver_name, 32);
-       strncpy(drvinfo->version, igb_driver_version, 32);
+       strncpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver) - 1);
+       strncpy(drvinfo->version, igb_driver_version,
+               sizeof(drvinfo->version) - 1);
 
        /* EEPROM image version # is reported as firmware version # for
         * 82575 controllers */
@@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
                (eeprom_data & 0x0FF0) >> 4,
                eeprom_data & 0x000F);
 
-       strncpy(drvinfo->fw_version, firmware_version, 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strncpy(drvinfo->fw_version, firmware_version,
+               sizeof(drvinfo->fw_version) - 1);
+       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info) - 1);
        drvinfo->n_stats = IGB_STATS_LEN;
        drvinfo->testinfo_len = IGB_TEST_LEN;
        drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
                {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
        for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
                wr32(reg, (_test[pat] & write));
-               val = rd32(reg);
+               val = rd32(reg) & mask;
                if (val != (_test[pat] & write & mask)) {
                        dev_err(&adapter->pdev->dev, "pattern test reg %04X "
                                "failed: got 0x%08X expected 0x%08X\n",
index 58c665b7513d1cea3ff9a80464e92260def6deff..3666b967846a611e1fa9c100ab7e53d40ce4bbd2 100644 (file)
 #endif
 #include "igb.h"
 
-#define DRV_VERSION "2.1.0-k2"
+#define DRV_VERSION "2.4.13-k2"
 char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
 
 static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
@@ -68,6 +68,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -149,6 +150,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
                                 struct ifla_vf_info *ivi);
+static void igb_check_vf_rate_limit(struct igb_adapter *);
 
 #ifdef CONFIG_PM
 static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -2286,9 +2288,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
        spin_lock_init(&adapter->stats64_lock);
 #ifdef CONFIG_PCI_IOV
-       if (hw->mac.type == e1000_82576)
-               adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
-
+       switch (hw->mac.type) {
+       case e1000_82576:
+       case e1000_i350:
+               if (max_vfs > 7) {
+                       dev_warn(&pdev->dev,
+                                "Maximum of 7 VFs per PF, using max\n");
+                       adapter->vfs_allocated_count = 7;
+               } else
+                       adapter->vfs_allocated_count = max_vfs;
+               break;
+       default:
+               break;
+       }
 #endif /* CONFIG_PCI_IOV */
        adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
 
@@ -3505,6 +3517,7 @@ static void igb_watchdog_task(struct work_struct *work)
                        netif_carrier_on(netdev);
 
                        igb_ping_all_vfs(adapter);
+                       igb_check_vf_rate_limit(adapter);
 
                        /* link state has changed, schedule phy info update */
                        if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -4547,6 +4560,15 @@ void igb_update_stats(struct igb_adapter *adapter,
        adapter->stats.mgptc += rd32(E1000_MGTPTC);
        adapter->stats.mgprc += rd32(E1000_MGTPRC);
        adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+
+       /* OS2BMC Stats */
+       reg = rd32(E1000_MANC);
+       if (reg & E1000_MANC_EN_BMC2OS) {
+               adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+               adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+               adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+               adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+       }
 }
 
 static irqreturn_t igb_msix_other(int irq, void *data)
@@ -6593,9 +6615,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        return igb_set_vf_mac(adapter, vf, mac);
 }
 
+static int igb_link_mbps(int internal_link_speed)
+{
+       switch (internal_link_speed) {
+       case SPEED_100:
+               return 100;
+       case SPEED_1000:
+               return 1000;
+       default:
+               return 0;
+       }
+}
+
+static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+                                 int link_speed)
+{
+       int rf_dec, rf_int;
+       u32 bcnrc_val;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = link_speed / tx_rate;
+               rf_dec = (link_speed - (rf_int * tx_rate));
+               rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+               bcnrc_val = E1000_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+                              E1000_RTTBCNRC_RF_INT_MASK);
+               bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+       wr32(E1000_RTTBCNRC, bcnrc_val);
+}
+
+static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
+{
+       int actual_link_speed, i;
+       bool reset_rate = false;
+
+       /* VF TX rate limit was not set or not supported */
+       if ((adapter->vf_rate_link_speed == 0) ||
+           (adapter->hw.mac.type != e1000_82576))
+               return;
+
+       actual_link_speed = igb_link_mbps(adapter->link_speed);
+       if (actual_link_speed != adapter->vf_rate_link_speed) {
+               reset_rate = true;
+               adapter->vf_rate_link_speed = 0;
+               dev_info(&adapter->pdev->dev,
+                        "Link speed has been changed. VF Transmit "
+                        "rate is disabled\n");
+       }
+
+       for (i = 0; i < adapter->vfs_allocated_count; i++) {
+               if (reset_rate)
+                       adapter->vf_data[i].tx_rate = 0;
+
+               igb_set_vf_rate_limit(&adapter->hw, i,
+                                     adapter->vf_data[i].tx_rate,
+                                     actual_link_speed);
+       }
+}
+
 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 {
-       return -EOPNOTSUPP;
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       int actual_link_speed;
+
+       if (hw->mac.type != e1000_82576)
+               return -EOPNOTSUPP;
+
+       actual_link_speed = igb_link_mbps(adapter->link_speed);
+       if ((vf >= adapter->vfs_allocated_count) ||
+           (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
+           (tx_rate < 0) || (tx_rate > actual_link_speed))
+               return -EINVAL;
+
+       adapter->vf_rate_link_speed = actual_link_speed;
+       adapter->vf_data[vf].tx_rate = (u16)tx_rate;
+       igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+       return 0;
 }
 
 static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6606,7 +6710,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
                return -EINVAL;
        ivi->vf = vf;
        memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
-       ivi->tx_rate = 0;
+       ivi->tx_rate = adapter->vf_data[vf].tx_rate;
        ivi->vlan = adapter->vf_data[vf].pf_vlan;
        ivi->qos = adapter->vf_data[vf].pf_qos;
        return 0;
index ed6e3d910247fb423f65a98063d24b48bad9f1b4..1d943aa7c7a62d6c88cadf73852285eb8171c2c9 100644 (file)
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 *regs_buff = p;
-       u8 revision_id;
 
        memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
 
-       pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
-       regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+       regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+                       adapter->pdev->device;
 
        regs_buff[0] = er32(CTRL);
        regs_buff[1] = er32(STATUS);
index 990c329e6c3b47ab83916a2afde5d3e41996c22b..d5dad5d607d65b5f6d7e9edd3a17be2c6c1ef5b2 100644 (file)
@@ -201,9 +201,6 @@ struct igbvf_adapter {
        unsigned int restart_queue;
        u32 txd_cmd;
 
-       bool detect_tx_hung;
-       u8 tx_timeout_factor;
-
        u32 tx_int_delay;
        u32 tx_abs_int_delay;
 
index 6352c8158e6d92f90889fe08ced2f5185d5d6550..6ccc32fd7338e06d7d2b3ef3d1d3c10ea7b21de4 100644 (file)
@@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
        buffer_info->time_stamp = 0;
 }
 
-static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
-{
-       struct igbvf_ring *tx_ring = adapter->tx_ring;
-       unsigned int i = tx_ring->next_to_clean;
-       unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
-       union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
-
-       /* detected Tx unit hang */
-       dev_err(&adapter->pdev->dev,
-               "Detected Tx Unit Hang:\n"
-               "  TDH                  <%x>\n"
-               "  TDT                  <%x>\n"
-               "  next_to_use          <%x>\n"
-               "  next_to_clean        <%x>\n"
-               "buffer_info[next_to_clean]:\n"
-               "  time_stamp           <%lx>\n"
-               "  next_to_watch        <%x>\n"
-               "  jiffies              <%lx>\n"
-               "  next_to_watch.status <%x>\n",
-               readl(adapter->hw.hw_addr + tx_ring->head),
-               readl(adapter->hw.hw_addr + tx_ring->tail),
-               tx_ring->next_to_use,
-               tx_ring->next_to_clean,
-               tx_ring->buffer_info[eop].time_stamp,
-               eop,
-               jiffies,
-               eop_desc->wb.status);
-}
-
 /**
  * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
  * @adapter: board private structure
@@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
 {
        struct igbvf_adapter *adapter = tx_ring->adapter;
-       struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        struct igbvf_buffer *buffer_info;
        struct sk_buff *skb;
@@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
                }
        }
 
-       if (adapter->detect_tx_hung) {
-               /* Detect a transmit hang in hardware, this serializes the
-                * check with the clearing of time_stamp and movement of i */
-               adapter->detect_tx_hung = false;
-               if (tx_ring->buffer_info[i].time_stamp &&
-                   time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
-                              (adapter->tx_timeout_factor * HZ)) &&
-                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
-
-                       tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
-                       /* detected Tx unit hang */
-                       igbvf_print_tx_hang(adapter);
-
-                       netif_stop_queue(netdev);
-               }
-       }
        adapter->net_stats.tx_bytes += total_bytes;
        adapter->net_stats.tx_packets += total_packets;
        return count < tx_ring->count;
@@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
                                                  &adapter->link_duplex);
                        igbvf_print_link_info(adapter);
 
-                       /* adjust timeout factor according to speed/duplex */
-                       adapter->tx_timeout_factor = 1;
-                       switch (adapter->link_speed) {
-                       case SPEED_10:
-                               adapter->tx_timeout_factor = 16;
-                               break;
-                       case SPEED_100:
-                               /* maybe add some timeout factor ? */
-                               break;
-                       }
-
                        netif_carrier_on(netdev);
                        netif_wake_queue(netdev);
                }
@@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
        /* Cause software interrupt to ensure Rx ring is cleaned */
        ew32(EICS, adapter->rx_ring->eims_value);
 
-       /* Force detection of hung controller every watchdog period */
-       adapter->detect_tx_hung = 1;
-
        /* Reset the timer */
        if (!test_bit(__IGBVF_DOWN, &adapter->state))
                mod_timer(&adapter->watchdog_timer,
@@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
        hw->device_id = pdev->device;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
-
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+       hw->revision_id = pdev->revision;
 
        err = -EIO;
        adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
index aa93655c3aa7c1da10f8aeda9eaf2a1535b06756..a5b0f0e194bb729a71d8b1326a2eeedb34fc0310 100644 (file)
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
 
        if (phyaddr != 0x1f) {
                u16 mii_phyctrl, mii_1000cr;
-               u8 revisionid = 0;
 
                mii_1000cr  = mdio_read(dev, phyaddr, MII_CTRL1000);
                mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
                mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
 
                /* Set default phyparam */
-               pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid);
-               ipg_set_phy_default_param(revisionid, dev, phyaddr);
+               ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
 
                /* Reset PHY */
                mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
index 3b8c924636171ac9078ff15b0d1c1f2c29b98dad..1e546fc127d06a83126bc2857d7da2abe51790e1 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -334,6 +334,10 @@ struct ixgbe_adapter {
        u16 bd_number;
        struct work_struct reset_task;
        struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+       /* DCB parameters */
+       struct ieee_pfc *ixgbe_ieee_pfc;
+       struct ieee_ets *ixgbe_ieee_ets;
        struct ixgbe_dcb_config dcb_cfg;
        struct ixgbe_dcb_config temp_dcb_cfg;
        u8 dcb_set_bitmap;
@@ -521,7 +525,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
 extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
 extern int ethtool_ioctl(struct ifreq *ifr);
-extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
 extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -549,6 +552,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                           struct sk_buff *skb);
 extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
                               struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                                struct scatterlist *sgl, unsigned int sgc);
 extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
 extern int ixgbe_fcoe_enable(struct net_device *netdev);
 extern int ixgbe_fcoe_disable(struct net_device *netdev);
index d0f1d9d2c416e2838d0816739eceb8907ffd8c59..ff23907bde0ca8d5cca5f93d2c130d4d57bd4f93 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -280,10 +280,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
 {
        enum ixgbe_media_type media_type;
 
+       /* Detect if there is a copper PHY attached. */
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
+               media_type = ixgbe_media_type_copper;
+               goto out;
+       default:
+               break;
+       }
+
        /* Media type for I82598 is based on device ID */
        switch (hw->device_id) {
        case IXGBE_DEV_ID_82598:
        case IXGBE_DEV_ID_82598_BX:
+               /* Default device ID is mezzanine card KX/KX4 */
                media_type = ixgbe_media_type_backplane;
                break;
        case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -306,7 +318,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
                media_type = ixgbe_media_type_unknown;
                break;
        }
-
+out:
        return media_type;
 }
 
@@ -354,7 +366,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 
        /* Negotiate the fc mode to use */
        ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
                goto out;
 
        /* Disable any previous flow control settings */
@@ -372,10 +384,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
         * 2: Tx flow control is enabled (we can send pause frames but
         *     we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
-        * other: Invalid.
 #ifdef CONFIG_DCB
         * 4: Priority Flow Control is enabled.
 #endif
+        * other: Invalid.
         */
        switch (hw->fc.current_mode) {
        case ixgbe_fc_none:
@@ -432,9 +444,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
                reg = (rx_pba_size - hw->fc.low_water) << 6;
                if (hw->fc.send_xon)
                        reg |= IXGBE_FCRTL_XONE;
+
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
 
-               reg = (rx_pba_size - hw->fc.high_water) << 10;
+               reg = (rx_pba_size - hw->fc.high_water) << 6;
                reg |= IXGBE_FCRTH_FCEN;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
@@ -627,13 +640,12 @@ out:
        return 0;
 }
 
-
 /**
  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if auto-negotiation enabled
- *  @autoneg_wait_to_complete: true if waiting is needed to complete
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
@@ -672,7 +684,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
                 * ixgbe_hw This will write the AUTOC register based on the new
                 * stored values
                 */
-               status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+               status = ixgbe_start_mac_link_82598(hw,
+                                                   autoneg_wait_to_complete);
        }
 
        return status;
@@ -698,7 +711,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
        /* Setup the PHY according to input speed */
        status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
                                              autoneg_wait_to_complete);
-
        /* Set up MAC */
        ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 
@@ -770,7 +782,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
                else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
                        goto no_phy_reset;
 
-
                hw->phy.ops.reset(hw);
        }
 
@@ -779,12 +790,9 @@ no_phy_reset:
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
+       ixgbe_disable_pcie_master(hw);
 
+mac_reset_top:
        /*
         * Issue global reset to the MAC.  This needs to be a SW reset.
         * If link reset is used, it might reset the MAC when mng is using it
@@ -805,6 +813,19 @@ no_phy_reset:
                hw_dbg(hw, "Reset polling failed to complete.\n");
        }
 
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.  We use 1usec since that is
+        * what is needed for ixgbe_disable_pcie_master().  The second reset
+        * then clears out any effects of those events.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               udelay(1);
+               goto mac_reset_top;
+       }
+
        msleep(50);
 
        gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -824,15 +845,15 @@ no_phy_reset:
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
        }
 
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
        /*
         * Store MAC address from RAR0, clear receive address registers, and
         * clear the multicast table
         */
        hw->mac.ops.init_rx_addrs(hw);
 
-       /* Store the permanent mac address */
-       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
 reset_hw_out:
        if (phy_status)
                status = phy_status;
@@ -849,6 +870,13 @@ reset_hw_out:
 static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 {
        u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
 
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
        rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -868,14 +896,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        u32 rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
 
-       if (rar < rar_entries) {
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-               if (rar_high & IXGBE_RAH_VIND_MASK) {
-                       rar_high &= ~IXGBE_RAH_VIND_MASK;
-                       IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
-               }
-       } else {
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
                hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+       if (rar_high & IXGBE_RAH_VIND_MASK) {
+               rar_high &= ~IXGBE_RAH_VIND_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
        }
 
        return 0;
@@ -994,13 +1025,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
 }
 
 /**
- *  ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module
- *  over I2C interface through an intermediate phy.
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
  *  @byte_offset: EEPROM byte offset to read
  *  @eeprom_data: value read
  *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
  **/
 static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                                       u8 *eeprom_data)
@@ -1074,10 +1104,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
 
        /* Copper PHY must be checked before AUTOC LMS to determine correct
         * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
-       if (hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_cu_unknown) {
-               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                                    &ext_ability);
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
+       case ixgbe_phy_cu_unknown:
+               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
+               MDIO_MMD_PMAPMD, &ext_ability);
                if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
                if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1085,6 +1117,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
                if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
                goto out;
+       default:
+               break;
        }
 
        switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1179,13 +1213,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
        .set_vmdq               = &ixgbe_set_vmdq_82598,
        .clear_vmdq             = &ixgbe_clear_vmdq_82598,
        .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
        .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
        .enable_mc              = &ixgbe_enable_mc_generic,
        .disable_mc             = &ixgbe_disable_mc_generic,
        .clear_vfta             = &ixgbe_clear_vfta_82598,
        .set_vfta               = &ixgbe_set_vfta_82598,
        .fc_enable              = &ixgbe_fc_enable_82598,
+       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
+       .release_swfw_sync      = &ixgbe_release_swfw_sync,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
index a21f5817685bbc6d24df74e3a948ea81252e8865..00aeba385a2ff25409ab23ff2c6108e5f9dd56c4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                        goto setup_sfp_out;
 
                /* PHY config will finish before releasing the semaphore */
-               ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
                if (ret_val != 0) {
                        ret_val = IXGBE_ERR_SWFW_SYNC;
                        goto setup_sfp_out;
@@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        enum ixgbe_media_type media_type;
 
        /* Detect if there is a copper PHY attached. */
-       if (hw->phy.type == ixgbe_phy_cu_unknown ||
-           hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq) {
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
                media_type = ixgbe_media_type_copper;
                goto out;
+       default:
+               break;
        }
 
        switch (hw->device_id) {
@@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_CX4:
                media_type = ixgbe_media_type_cx4;
                break;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               media_type = ixgbe_media_type_copper;
+               break;
        default:
                media_type = ixgbe_media_type_unknown;
                break;
@@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        return status;
 }
 
- /**
 *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
 *  @hw: pointer to hardware structure
 *
 *  The base drivers may require better control over SFP+ module
 *  PHY states.  This includes selectively shutting down the Tx
 *  laser on the PHY, effectively halting physical link.
 **/
+/**
+ *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively shutting down the Tx
+ *  laser on the PHY, effectively halting physical link.
+ **/
 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -463,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  **/
 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
-       hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
-
        if (hw->mac.autotry_restart) {
                ixgbe_disable_tx_laser_multispeed_fiber(hw);
                ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -487,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                                           bool autoneg_wait_to_complete)
 {
        s32 status = 0;
-       ixgbe_link_speed phy_link_speed;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        u32 speedcnt = 0;
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+       u32 i = 0;
        bool link_up = false;
        bool negotiation;
-       int i;
 
        /* Mask off requested but non-supported speeds */
-       hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation);
-       speed &= phy_link_speed;
+       status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
+                                                  &negotiation);
+       if (status != 0)
+               return status;
+
+       speed &= link_speed;
 
        /*
         * Try each speed one by one, highest priority first.  We do this in
@@ -508,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 
                /* If we already have link at this speed, just jump out */
-               hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+                                               false);
+               if (status != 0)
+                       return status;
 
-               if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+               if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
                        goto out;
 
                /* Set the module link speed */
@@ -522,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                msleep(40);
 
                status = ixgbe_setup_mac_link_82599(hw,
-                                              IXGBE_LINK_SPEED_10GB_FULL,
-                                              autoneg,
-                                              autoneg_wait_to_complete);
+                                                   IXGBE_LINK_SPEED_10GB_FULL,
+                                                   autoneg,
+                                                   autoneg_wait_to_complete);
                if (status != 0)
                        return status;
 
@@ -536,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                 * Section 73.10.2, we may have to wait up to 500ms if KR is
                 * attempted.  82599 uses the same timing for 10g SFI.
                 */
-
                for (i = 0; i < 5; i++) {
                        /* Wait for the link partner to also set speed */
                        msleep(100);
 
                        /* If we have link, just jump out */
-                       hw->mac.ops.check_link(hw, &phy_link_speed,
-                                              &link_up, false);
+                       status = hw->mac.ops.check_link(hw, &link_speed,
+                                                       &link_up, false);
+                       if (status != 0)
+                               return status;
+
                        if (link_up)
                                goto out;
                }
@@ -555,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
 
                /* If we already have link at this speed, just jump out */
-               hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+                                               false);
+               if (status != 0)
+                       return status;
 
-               if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+               if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
                        goto out;
 
                /* Set the module link speed */
@@ -570,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                msleep(40);
 
                status = ixgbe_setup_mac_link_82599(hw,
-                                                     IXGBE_LINK_SPEED_1GB_FULL,
-                                                     autoneg,
-                                                     autoneg_wait_to_complete);
+                                                   IXGBE_LINK_SPEED_1GB_FULL,
+                                                   autoneg,
+                                                   autoneg_wait_to_complete);
                if (status != 0)
                        return status;
 
@@ -583,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                msleep(100);
 
                /* If we have link, just jump out */
-               hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+                                               false);
+               if (status != 0)
+                       return status;
+
                if (link_up)
                        goto out;
        }
@@ -626,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                                     bool autoneg_wait_to_complete)
 {
        s32 status = 0;
-       ixgbe_link_speed link_speed;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        s32 i, j;
        bool link_up = false;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       struct ixgbe_adapter *adapter = hw->back;
-
-       hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
 
         /* Set autoneg_advertised value based on input link speed */
        hw->phy.autoneg_advertised = 0;
@@ -658,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
        for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
                status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
                                                    autoneg_wait_to_complete);
-               if (status)
+               if (status != 0)
                        goto out;
 
                /*
@@ -671,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                        mdelay(100);
 
                        /* If we have link, just jump out */
-                       hw->mac.ops.check_link(hw, &link_speed,
-                                              &link_up, false);
+                       status = hw->mac.ops.check_link(hw, &link_speed,
+                                                       &link_up, false);
+                       if (status != 0)
+                               goto out;
+
                        if (link_up)
                                goto out;
                }
@@ -690,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
        hw->phy.smart_speed_active = true;
        status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
                                            autoneg_wait_to_complete);
-       if (status)
+       if (status != 0)
                goto out;
 
        /*
@@ -703,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
                mdelay(100);
 
                /* If we have link, just jump out */
-               hw->mac.ops.check_link(hw, &link_speed,
-                                      &link_up, false);
+               status = hw->mac.ops.check_link(hw, &link_speed,
+                                               &link_up, false);
+               if (status != 0)
+                       goto out;
+
                if (link_up)
                        goto out;
        }
@@ -716,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
 out:
        if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-               e_info(hw, "Smartspeed has downgraded the link speed from "
+               hw_dbg(hw, "Smartspeed has downgraded the link speed from "
                       "the maximum advertised\n");
        return status;
 }
@@ -748,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 
        /* Check to see if speed passed in is supported. */
        hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
+       if (status != 0)
+               goto out;
+
        speed &= link_capabilities;
 
        if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -761,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        else
                orig_autoc = autoc;
 
-
        if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
            link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
            link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -878,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 
        /* PHY ops must be identified and initialized prior to reset */
 
-       /* Init PHY and function pointers, perform SFP setup */
+       /* Identify PHY and related function pointers */
        status = hw->phy.ops.init(hw);
 
        if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -890,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                hw->phy.sfp_setup_needed = false;
        }
 
+       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               goto reset_hw_out;
+
        /* Reset PHY */
        if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
                hw->phy.ops.reset(hw);
@@ -898,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
+       ixgbe_disable_pcie_master(hw);
 
+mac_reset_top:
        /*
         * Issue global reset to the MAC.  This needs to be a SW reset.
         * If link reset is used, it might reset the MAC when mng is using it
@@ -924,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                hw_dbg(hw, "Reset polling failed to complete.\n");
        }
 
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.  We use 1usec since that is
+        * what is needed for ixgbe_disable_pcie_master().  The second reset
+        * then clears out any effects of those events.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               udelay(1);
+               goto mac_reset_top;
+       }
+
        msleep(50);
 
        /*
@@ -951,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                }
        }
 
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
        /*
         * Store MAC address from RAR0, clear receive address registers, and
         * clear the multicast table.  Also reset num_rar_entries to 128,
@@ -959,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
        hw->mac.num_rar_entries = 128;
        hw->mac.ops.init_rx_addrs(hw);
 
-       /* Store the permanent mac address */
-       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
        /* Store the permanent SAN mac address */
        hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
 
@@ -1733,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *
  *  Determines the physical layer module found on the current adapter.
+ *  If PHY already detected, maintains current PHY type in hw struct,
+ *  otherwise executes the PHY detection routine.
  **/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+       /* Detect PHY if not unknown - returns success if already detected. */
        status = ixgbe_identify_phy_generic(hw);
-       if (status != 0)
-               status = ixgbe_identify_sfp_module_generic(hw);
+       if (status != 0) {
+               /* 82599 10GBASE-T requires an external PHY */
+               if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+                       goto out;
+               else
+                       status = ixgbe_identify_sfp_module_generic(hw);
+       }
+
+       /* Set PHY type none if no PHY detected */
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               hw->phy.type = ixgbe_phy_none;
+               status = 0;
+       }
+
+       /* Return error if SFP module has been detected but is not supported */
+       if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+out:
        return status;
 }
 
@@ -1763,11 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
 
        hw->phy.ops.identify(hw);
 
-       if (hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq ||
-           hw->phy.type == ixgbe_phy_cu_unknown) {
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
+       case ixgbe_phy_cu_unknown:
                hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                                    &ext_ability);
+                                                        &ext_ability);
                if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
                if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1775,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
                if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
                goto out;
+       default:
+               break;
        }
 
        switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1886,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
                if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
                        break;
                else
+                       /* Use interrupt-safe sleep just in case */
                        udelay(10);
        }
 
@@ -1995,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .set_vmdq               = &ixgbe_set_vmdq_generic,
        .clear_vmdq             = &ixgbe_clear_vmdq_generic,
        .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
        .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
        .enable_mc              = &ixgbe_enable_mc_generic,
        .disable_mc             = &ixgbe_disable_mc_generic,
@@ -2006,31 +2069,34 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .setup_sfp              = &ixgbe_setup_sfp_modules_82599,
        .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
        .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
+       .release_swfw_sync      = &ixgbe_release_swfw_sync,
+
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
-       .init_params            = &ixgbe_init_eeprom_params_generic,
-       .read                   = &ixgbe_read_eerd_generic,
-       .write                  = &ixgbe_write_eeprom_generic,
-       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
-       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
-       .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
+       .init_params            = &ixgbe_init_eeprom_params_generic,
+       .read                   = &ixgbe_read_eerd_generic,
+       .write                  = &ixgbe_write_eeprom_generic,
+       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
+       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
+       .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
 };
 
 static struct ixgbe_phy_operations phy_ops_82599 = {
-       .identify               = &ixgbe_identify_phy_82599,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
-       .init                               = &ixgbe_init_phy_ops_82599,
-       .reset                  = &ixgbe_reset_phy_generic,
-       .read_reg               = &ixgbe_read_phy_reg_generic,
-       .write_reg              = &ixgbe_write_phy_reg_generic,
-       .setup_link             = &ixgbe_setup_phy_link_generic,
-       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
-       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
-       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
-       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
-       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
-       .check_overtemp         = &ixgbe_tn_check_overtemp,
+       .identify               = &ixgbe_identify_phy_82599,
+       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
+       .init                   = &ixgbe_init_phy_ops_82599,
+       .reset                  = &ixgbe_reset_phy_generic,
+       .read_reg               = &ixgbe_read_phy_reg_generic,
+       .write_reg              = &ixgbe_write_phy_reg_generic,
+       .setup_link             = &ixgbe_setup_phy_link_generic,
+       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
+       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
+       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
+       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
+       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
+       .check_overtemp         = &ixgbe_tn_check_overtemp,
 };
 
 struct ixgbe_info ixgbe_82599_info = {
index ebbda7d152549323edf0c965abda6a2d686d3e28..bcd952916eb2d6d9c26d5328fcc7fa5d649b2955 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
 
-static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
-static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
 
 /**
@@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
        IXGBE_READ_REG(hw, IXGBE_MRFC);
        IXGBE_READ_REG(hw, IXGBE_RLEC);
        IXGBE_READ_REG(hw, IXGBE_LXONTXC);
-       IXGBE_READ_REG(hw, IXGBE_LXONRXC);
        IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
-       IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+       if (hw->mac.type >= ixgbe_mac_82599EB) {
+               IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+       }
 
        for (i = 0; i < 8; i++) {
                IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-               IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
                IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               if (hw->mac.type >= ixgbe_mac_82599EB) {
+                       IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+               } else {
+                       IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               }
        }
-
+       if (hw->mac.type >= ixgbe_mac_82599EB)
+               for (i = 0; i < 8; i++)
+                       IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
        IXGBE_READ_REG(hw, IXGBE_PRC64);
        IXGBE_READ_REG(hw, IXGBE_PRC127);
        IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
        IXGBE_READ_REG(hw, IXGBE_BPTC);
        for (i = 0; i < 16; i++) {
                IXGBE_READ_REG(hw, IXGBE_QPRC(i));
-               IXGBE_READ_REG(hw, IXGBE_QBRC(i));
                IXGBE_READ_REG(hw, IXGBE_QPTC(i));
-               IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               if (hw->mac.type >= ixgbe_mac_82599EB) {
+                       IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+                       IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+               } else {
+                       IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               }
+       }
+
+       if (hw->mac.type == ixgbe_mac_X540) {
+               if (hw->phy.id == 0)
+                       hw->phy.ops.identify(hw);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
        }
 
        return 0;
@@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests
         */
-       if (ixgbe_disable_pcie_master(hw) != 0)
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+       ixgbe_disable_pcie_master(hw);
 
        return 0;
 }
@@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
                ixgbe_shift_out_eeprom_bits(hw, data, 16);
                ixgbe_standby_eeprom(hw);
 
-               msleep(hw->eeprom.semaphore_delay);
                /* Done with writing - release the EEPROM */
                ixgbe_release_eeprom(hw);
        }
@@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 {
        s32 status = 0;
-       u32 eec = 0;
+       u32 eec;
        u32 i;
 
-       if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
                        IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
                        hw_dbg(hw, "Could not acquire EEPROM grant\n");
 
-                       ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+                       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
                        status = IXGBE_ERR_EEPROM;
                }
-       }
 
-       /* Setup EEPROM for Read/Write */
-       if (status == 0) {
-               /* Clear CS and SK */
-               eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
-               IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-               IXGBE_WRITE_FLUSH(hw);
-               udelay(1);
+               /* Setup EEPROM for Read/Write */
+               if (status == 0) {
+                       /* Clear CS and SK */
+                       eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+                       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+                       IXGBE_WRITE_FLUSH(hw);
+                       udelay(1);
+               }
        }
        return status;
 }
@@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_EEPROM;
-       u32 timeout;
+       u32 timeout = 2000;
        u32 i;
        u32 swsm;
 
-       /* Set timeout value based on size of EEPROM */
-       timeout = hw->eeprom.word_size + 1;
-
        /* Get SMBI software semaphore between device drivers first */
        for (i = 0; i < timeout; i++) {
                /*
@@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                        status = 0;
                        break;
                }
-               msleep(1);
+               udelay(50);
        }
 
        /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 * was not granted because we don't have access to the EEPROM
                 */
                if (i >= timeout) {
-                       hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
+                       hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
                               "not granted.\n");
                        ixgbe_release_eeprom_semaphore(hw);
                        status = IXGBE_ERR_EEPROM;
                }
+       } else {
+               hw_dbg(hw, "Software semaphore SMBI between device drivers "
+                      "not granted.\n");
        }
 
        return status;
@@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
        eec &= ~IXGBE_EEC_REQ;
        IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
 
-       ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+       /* Delay before attempt to obtain semaphore again to allow FW access */
+       msleep(hw->eeprom.semaphore_delay);
 }
 
 /**
- *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
  **/
 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
        if (status == 0) {
                checksum = hw->eeprom.ops.calc_checksum(hw);
                status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
-                                           checksum);
+                                             checksum);
        } else {
                hw_dbg(hw, "EEPROM read failed\n");
        }
@@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
        u32 rar_low, rar_high;
        u32 rar_entries = hw->mac.num_rar_entries;
 
+       /* Make sure we are using a valid rar index range */
+       if (index >= rar_entries) {
+               hw_dbg(hw, "RAR index %d is out of range.\n", index);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
        /* setup VMDq pool selection before this RAR gets enabled */
        hw->mac.ops.set_vmdq(hw, index, vmdq);
 
-       /* Make sure we are using a valid rar index range */
-       if (index < rar_entries) {
-               /*
-                * HW expects these in little endian so we reverse the byte
-                * order from network order (big endian) to little endian
-                */
-               rar_low = ((u32)addr[0] |
-                          ((u32)addr[1] << 8) |
-                          ((u32)addr[2] << 16) |
-                          ((u32)addr[3] << 24));
-               /*
-                * Some parts put the VMDq setting in the extra RAH bits,
-                * so save everything except the lower 16 bits that hold part
-                * of the address and the address valid bit.
-                */
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-               rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-               rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+       /*
+        * HW expects these in little endian so we reverse the byte
+        * order from network order (big endian) to little endian
+        */
+       rar_low = ((u32)addr[0] |
+                  ((u32)addr[1] << 8) |
+                  ((u32)addr[2] << 16) |
+                  ((u32)addr[3] << 24));
+       /*
+        * Some parts put the VMDq setting in the extra RAH bits,
+        * so save everything except the lower 16 bits that hold part
+        * of the address and the address valid bit.
+        */
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+       rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+       rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
 
-               if (enable_addr != 0)
-                       rar_high |= IXGBE_RAH_AV;
+       if (enable_addr != 0)
+               rar_high |= IXGBE_RAH_AV;
 
-               IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
-               IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-       } else {
-               hw_dbg(hw, "RAR index %d is out of range.\n", index);
-               return IXGBE_ERR_RAR_INDEX;
-       }
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 
        return 0;
 }
@@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
        u32 rar_entries = hw->mac.num_rar_entries;
 
        /* Make sure we are using a valid rar index range */
-       if (index < rar_entries) {
-               /*
-                * Some parts put the VMDq setting in the extra RAH bits,
-                * so save everything except the lower 16 bits that hold part
-                * of the address and the address valid bit.
-                */
-               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-               rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
-               IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-       } else {
+       if (index >= rar_entries) {
                hw_dbg(hw, "RAR index %d is out of range.\n", index);
-               return IXGBE_ERR_RAR_INDEX;
+               return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
-       /* clear VMDq pool/queue selection for this RAR */
-       hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
-
-       return 0;
-}
-
-/**
- *  ixgbe_enable_rar - Enable Rx address register
- *  @hw: pointer to hardware structure
- *  @index: index into the RAR table
- *
- *  Enables the select receive address register.
- **/
-static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
-{
-       u32 rar_high;
-
+       /*
+        * Some parts put the VMDq setting in the extra RAH bits,
+        * so save everything except the lower 16 bits that hold part
+        * of the address and the address valid bit.
+        */
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-       rar_high |= IXGBE_RAH_AV;
+       rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-}
 
-/**
- *  ixgbe_disable_rar - Disable Rx address register
- *  @hw: pointer to hardware structure
- *  @index: index into the RAR table
- *
- *  Disables the select receive address register.
- **/
-static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
-{
-       u32 rar_high;
+       /* clear VMDq pool/queue selection for this RAR */
+       hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
 
-       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-       rar_high &= (~IXGBE_RAH_AV);
-       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+       return 0;
 }
 
 /**
@@ -1386,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
        }
 
        /* Clear the MTA */
-       hw->addr_ctrl.mc_addr_in_rar_count = 0;
        hw->addr_ctrl.mta_in_use = 0;
        IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
@@ -1400,105 +1400,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
        return 0;
 }
 
-/**
- *  ixgbe_add_uc_addr - Adds a secondary unicast address.
- *  @hw: pointer to hardware structure
- *  @addr: new address
- *
- *  Adds it to unused receive address register or goes into promiscuous mode.
- **/
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-       u32 rar_entries = hw->mac.num_rar_entries;
-       u32 rar;
-
-       hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
-                 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
-       /*
-        * Place this address in the RAR if there is room,
-        * else put the controller into promiscuous mode
-        */
-       if (hw->addr_ctrl.rar_used_count < rar_entries) {
-               rar = hw->addr_ctrl.rar_used_count -
-                     hw->addr_ctrl.mc_addr_in_rar_count;
-               hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-               hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
-               hw->addr_ctrl.rar_used_count++;
-       } else {
-               hw->addr_ctrl.overflow_promisc++;
-       }
-
-       hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
-}
-
-/**
- *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
- *  @hw: pointer to hardware structure
- *  @netdev: pointer to net device structure
- *
- *  The given list replaces any existing list.  Clears the secondary addrs from
- *  receive address registers.  Uses unused receive address registers for the
- *  first secondary addresses, and falls back to promiscuous mode as needed.
- *
- *  Drivers using secondary unicast addresses must set user_set_promisc when
- *  manually putting the device into promiscuous mode.
- **/
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
-                                     struct net_device *netdev)
-{
-       u32 i;
-       u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
-       u32 uc_addr_in_use;
-       u32 fctrl;
-       struct netdev_hw_addr *ha;
-
-       /*
-        * Clear accounting of old secondary address list,
-        * don't count RAR[0]
-        */
-       uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
-       hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
-       hw->addr_ctrl.overflow_promisc = 0;
-
-       /* Zero out the other receive addresses */
-       hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
-       for (i = 0; i < uc_addr_in_use; i++) {
-               IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
-       }
-
-       /* Add the new addresses */
-       netdev_for_each_uc_addr(ha, netdev) {
-               hw_dbg(hw, " Adding the secondary addresses:\n");
-               ixgbe_add_uc_addr(hw, ha->addr, 0);
-       }
-
-       if (hw->addr_ctrl.overflow_promisc) {
-               /* enable promisc if not already in overflow or set by user */
-               if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
-                       hw_dbg(hw, " Entering address overflow promisc mode\n");
-                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-                       fctrl |= IXGBE_FCTRL_UPE;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-                       hw->addr_ctrl.uc_set_promisc = true;
-               }
-       } else {
-               /* only disable if set by overflow, not by user */
-               if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
-                  !(hw->addr_ctrl.user_set_promisc)) {
-                       hw_dbg(hw, " Leaving address overflow promisc mode\n");
-                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-                       fctrl &= ~IXGBE_FCTRL_UPE;
-                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-                       hw->addr_ctrl.uc_set_promisc = false;
-               }
-       }
-
-       hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
-       return 0;
-}
-
 /**
  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
  *  @hw: pointer to hardware structure
@@ -1550,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
        u32 vector;
        u32 vector_bit;
        u32 vector_reg;
-       u32 mta_reg;
 
        hw->addr_ctrl.mta_in_use++;
 
@@ -1568,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
         */
        vector_reg = (vector >> 5) & 0x7F;
        vector_bit = vector & 0x1F;
-       mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
-       mta_reg |= (1 << vector_bit);
-       IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+       hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
 }
 
 /**
@@ -1596,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
        hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
        hw->addr_ctrl.mta_in_use = 0;
 
-       /* Clear the MTA */
+       /* Clear mta_shadow */
        hw_dbg(hw, " Clearing MTA\n");
-       for (i = 0; i < hw->mac.mcft_size; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
 
-       /* Add the new addresses */
+       /* Update mta shadow */
        netdev_for_each_mc_addr(ha, netdev) {
                hw_dbg(hw, " Adding the multicast addresses:\n");
                ixgbe_set_mta(hw, ha->addr);
        }
 
        /* Enable mta */
+       for (i = 0; i < hw->mac.mcft_size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+                                     hw->mac.mta_shadow[i]);
+
        if (hw->addr_ctrl.mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
                                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1624,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
  **/
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
 {
-       u32 i;
-       u32 rar_entries = hw->mac.num_rar_entries;
        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
-       if (a->mc_addr_in_rar_count > 0)
-               for (i = (rar_entries - a->mc_addr_in_rar_count);
-                    i < rar_entries; i++)
-                       ixgbe_enable_rar(hw, i);
-
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
                                hw->mac.mc_filter_type);
@@ -1648,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
 {
-       u32 i;
-       u32 rar_entries = hw->mac.num_rar_entries;
        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
 
-       if (a->mc_addr_in_rar_count > 0)
-               for (i = (rar_entries - a->mc_addr_in_rar_count);
-                    i < rar_entries; i++)
-                       ixgbe_disable_rar(hw, i);
-
        if (a->mta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
@@ -1685,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
 #endif /* CONFIG_DCB */
        /* Negotiate the fc mode to use */
        ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
                goto out;
 
        /* Disable any previous flow control settings */
@@ -1703,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
         * 2: Tx flow control is enabled (we can send pause frames but
         *    we do not support receiving pause frames).
         * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
         * 4: Priority Flow Control is enabled.
+#endif
         * other: Invalid.
         */
        switch (hw->fc.current_mode) {
@@ -1791,12 +1680,13 @@ out:
  **/
 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
+       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
        ixgbe_link_speed speed;
-       u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-       u32 links2, anlp1_reg, autoc_reg, links;
        bool link_up;
 
+       if (hw->fc.disable_fc_autoneg)
+               goto out;
+
        /*
         * AN should have completed when the cable was plugged in.
         * Look for reasons to bail out.  Bail out if:
@@ -1807,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
         * So use link_up_wait_to_complete=false.
         */
        hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
-       if (hw->fc.disable_fc_autoneg || (!link_up)) {
-               hw->fc.fc_was_autonegged = false;
-               hw->fc.current_mode = hw->fc.requested_mode;
+       if (!link_up) {
+               ret_val = IXGBE_ERR_FLOW_CONTROL;
                goto out;
        }
 
-       /*
-        * On backplane, bail out if
-        * - backplane autoneg was not completed, or if
-        * - we are 82599 and link partner is not AN enabled
-        */
-       if (hw->phy.media_type == ixgbe_media_type_backplane) {
-               links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-               if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
-                       hw->fc.fc_was_autonegged = false;
-                       hw->fc.current_mode = hw->fc.requested_mode;
-                       goto out;
-               }
+       switch (hw->phy.media_type) {
+       /* Autoneg flow control on fiber adapters */
+       case ixgbe_media_type_fiber:
+               if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+                       ret_val = ixgbe_fc_autoneg_fiber(hw);
+               break;
 
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-                       if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
-                               hw->fc.fc_was_autonegged = false;
-                               hw->fc.current_mode = hw->fc.requested_mode;
-                               goto out;
-                       }
-               }
+       /* Autoneg flow control on backplane adapters */
+       case ixgbe_media_type_backplane:
+               ret_val = ixgbe_fc_autoneg_backplane(hw);
+               break;
+
+       /* Autoneg flow control on copper adapters */
+       case ixgbe_media_type_copper:
+               if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+                       ret_val = ixgbe_fc_autoneg_copper(hw);
+               break;
+
+       default:
+               break;
        }
 
+out:
+       if (ret_val == 0) {
+               hw->fc.fc_was_autonegged = true;
+       } else {
+               hw->fc.fc_was_autonegged = false;
+               hw->fc.current_mode = hw->fc.requested_mode;
+       }
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+       u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+       s32 ret_val;
+
        /*
         * On multispeed fiber at 1g, bail out if
         * - link is up but AN did not complete, or if
         * - link is up and AN completed but timed out
         */
-       if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
-               linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-               if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-                   ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
-                       hw->fc.fc_was_autonegged = false;
-                       hw->fc.current_mode = hw->fc.requested_mode;
-                       goto out;
-               }
+
+       linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+       if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+           ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+               goto out;
        }
 
+       pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+       pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+       ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+                              pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+                              IXGBE_PCS1GANA_ASM_PAUSE,
+                              IXGBE_PCS1GANA_SYM_PAUSE,
+                              IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+       u32 links2, anlp1_reg, autoc_reg, links;
+       s32 ret_val;
+
        /*
-        * Bail out on
-        * - copper or CX4 adapters
-        * - fiber adapters running at 10gig
+        * On backplane, bail out if
+        * - backplane autoneg was not completed, or if
+        * - we are 82599 and link partner is not AN enabled
         */
-       if ((hw->phy.media_type == ixgbe_media_type_copper) ||
-            (hw->phy.media_type == ixgbe_media_type_cx4) ||
-            ((hw->phy.media_type == ixgbe_media_type_fiber) &&
-            (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+       links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
                hw->fc.fc_was_autonegged = false;
                hw->fc.current_mode = hw->fc.requested_mode;
+               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
                goto out;
        }
 
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+               links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+                       hw->fc.fc_was_autonegged = false;
+                       hw->fc.current_mode = hw->fc.requested_mode;
+                       ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+                       goto out;
+               }
+       }
        /*
-        * Read the AN advertisement and LP ability registers and resolve
+        * Read the 10g AN autoc and LP ability registers and resolve
         * local flow control settings accordingly
         */
-       if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
-           (hw->phy.media_type != ixgbe_media_type_backplane)) {
-               pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-               pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-               if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                   (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
-                       /*
-                        * Now we need to check if the user selected Rx ONLY
-                        * of pause frames.  In this case, we had to advertise
-                        * FULL flow control because we could not advertise RX
-                        * ONLY. Hence, we must now check to see if we need to
-                        * turn OFF the TRANSMISSION of PAUSE frames.
-                        */
-                       if (hw->fc.requested_mode == ixgbe_fc_full) {
-                               hw->fc.current_mode = ixgbe_fc_full;
-                               hw_dbg(hw, "Flow Control = FULL.\n");
-                       } else {
-                               hw->fc.current_mode = ixgbe_fc_rx_pause;
-                               hw_dbg(hw, "Flow Control=RX PAUSE only\n");
-                       }
-               } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                          (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_tx_pause;
-                       hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-               } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                          !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                          (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-               } else {
-                       hw->fc.current_mode = ixgbe_fc_none;
-                       hw_dbg(hw, "Flow Control = NONE.\n");
-               }
-       }
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 
-       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+       ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+               anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+               IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+       u16 technology_ability_reg = 0;
+       u16 lp_technology_ability_reg = 0;
+
+       hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+                            MDIO_MMD_AN,
+                            &technology_ability_reg);
+       hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
+                            MDIO_MMD_AN,
+                            &lp_technology_ability_reg);
+
+       return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+                                 (u32)lp_technology_ability_reg,
+                                 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+                                 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ *  ixgbe_negotiate_fc - Negotiate flow control
+ *  @hw: pointer to hardware structure
+ *  @adv_reg: flow control advertised settings
+ *  @lp_reg: link partner's flow control settings
+ *  @adv_sym: symmetric pause bit in advertisement
+ *  @adv_asm: asymmetric pause bit in advertisement
+ *  @lp_sym: symmetric pause bit in link partner advertisement
+ *  @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ *  Find the intersection between advertised settings and link partner's
+ *  advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+       if ((!(adv_reg)) ||  (!(lp_reg)))
+               return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+       if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
                /*
-                * Read the 10g AN autoc and LP ability registers and resolve
-                * local flow control settings accordingly
+                * Now we need to check if the user selected Rx ONLY
+                * of pause frames.  In this case, we had to advertise
+                * FULL flow control because we could not advertise RX
+                * ONLY. Hence, we must now check to see if we need to
+                * turn OFF the TRANSMISSION of PAUSE frames.
                 */
-               autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-               anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
-               if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
-                   (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
-                       /*
-                        * Now we need to check if the user selected Rx ONLY
-                        * of pause frames.  In this case, we had to advertise
-                        * FULL flow control because we could not advertise RX
-                        * ONLY. Hence, we must now check to see if we need to
-                        * turn OFF the TRANSMISSION of PAUSE frames.
-                        */
-                       if (hw->fc.requested_mode == ixgbe_fc_full) {
-                               hw->fc.current_mode = ixgbe_fc_full;
-                               hw_dbg(hw, "Flow Control = FULL.\n");
-                       } else {
-                               hw->fc.current_mode = ixgbe_fc_rx_pause;
-                               hw_dbg(hw, "Flow Control=RX PAUSE only\n");
-                       }
-               } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
-                          (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
-                          (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
-                          (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_tx_pause;
-                       hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-               } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
-                          (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
-                          !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
-                          (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
-                       hw->fc.current_mode = ixgbe_fc_rx_pause;
-                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+               if (hw->fc.requested_mode == ixgbe_fc_full) {
+                       hw->fc.current_mode = ixgbe_fc_full;
+                       hw_dbg(hw, "Flow Control = FULL.\n");
                } else {
-                       hw->fc.current_mode = ixgbe_fc_none;
-                       hw_dbg(hw, "Flow Control = NONE.\n");
+                       hw->fc.current_mode = ixgbe_fc_rx_pause;
+                       hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
                }
+       } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_tx_pause;
+               hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+       } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_rx_pause;
+               hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+       } else {
+               hw->fc.current_mode = ixgbe_fc_none;
+               hw_dbg(hw, "Flow Control = NONE.\n");
        }
-       /* Record that current_mode is the result of a successful autoneg */
-       hw->fc.fc_was_autonegged = true;
-
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -1965,7 +1901,8 @@ out:
 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
 {
        s32 ret_val = 0;
-       u32 reg;
+       u32 reg = 0, reg_bp = 0;
+       u16 reg_cu = 0;
 
 #ifdef CONFIG_DCB
        if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1973,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                goto out;
        }
 
-#endif
+#endif /* CONFIG_DCB */
        /* Validate the packetbuf configuration */
        if (packetbuf_num < 0 || packetbuf_num > 7) {
                hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -2011,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                hw->fc.requested_mode = ixgbe_fc_full;
 
        /*
-        * Set up the 1G flow control advertisement registers so the HW will be
-        * able to do fc autoneg once the cable is plugged in.  If we end up
-        * using 10g instead, this is harmless.
+        * Set up the 1G and 10G flow control advertisement registers so the
+        * HW will be able to do fc autoneg once the cable is plugged in.  If
+        * we link at 10G, the 1G advertisement is harmless and vice versa.
         */
-       reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber:
+       case ixgbe_media_type_backplane:
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+               reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               break;
+
+       case ixgbe_media_type_copper:
+               hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+                                       MDIO_MMD_AN, &reg_cu);
+               break;
+
+       default:
+               ;
+       }
 
        /*
         * The possible values of fc.requested_mode are:
@@ -2034,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
        case ixgbe_fc_none:
                /* Flow control completely disabled by software override. */
                reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+                                   IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
                break;
        case ixgbe_fc_rx_pause:
                /*
@@ -2045,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                 * disable the adapter's ability to send PAUSE frames.
                 */
                reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+                                  IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
                break;
        case ixgbe_fc_tx_pause:
                /*
@@ -2053,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                 */
                reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
                reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane) {
+                       reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+               } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+                       reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+               }
                break;
        case ixgbe_fc_full:
                /* Flow control (both Rx and Tx) is enabled by SW override. */
                reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+                                  IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
                break;
 #ifdef CONFIG_DCB
        case ixgbe_fc_pfc:
@@ -2070,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
                break;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-       reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-       /* Disable AN timeout */
-       if (hw->fc.strict_ieee)
-               reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+       if (hw->mac.type != ixgbe_mac_X540) {
+               /*
+                * Enable auto-negotiation between the MAC & PHY;
+                * the MAC will advertise clause 37 flow control.
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 
-       IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-       hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+               /* Disable AN timeout */
+               if (hw->fc.strict_ieee)
+                       reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
 
-       /*
-        * Set up the 10G flow control advertisement registers so the HW
-        * can do fc autoneg once the cable is plugged in.  If we end up
-        * using 1g instead, this is harmless.
-        */
-       reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+               hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+       }
 
        /*
-        * The possible values of fc.requested_mode are:
-        * 0: Flow control is completely disabled
-        * 1: Rx flow control is enabled (we can receive pause frames,
-        *    but not send pause frames).
-        * 2: Tx flow control is enabled (we can send pause frames but
-        *    we do not support receiving pause frames).
-        * 3: Both Rx and Tx flow control (symmetric) are enabled.
-        * other: Invalid.
+        * AUTOC restart handles negotiation of 1G and 10G on backplane
+        * and copper. There is no need to set the PCS1GCTL register.
+        *
         */
-       switch (hw->fc.requested_mode) {
-       case ixgbe_fc_none:
-               /* Flow control completely disabled by software override. */
-               reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
-               break;
-       case ixgbe_fc_rx_pause:
-               /*
-                * Rx Flow control is enabled and Tx Flow control is
-                * disabled by software override. Since there really
-                * isn't a way to advertise that we are capable of RX
-                * Pause ONLY, we will advertise that we support both
-                * symmetric and asymmetric Rx PAUSE.  Later, we will
-                * disable the adapter's ability to send PAUSE frames.
-                */
-               reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
-               break;
-       case ixgbe_fc_tx_pause:
-               /*
-                * Tx Flow control is enabled, and Rx Flow control is
-                * disabled by software override.
-                */
-               reg |= (IXGBE_AUTOC_ASM_PAUSE);
-               reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
-               break;
-       case ixgbe_fc_full:
-               /* Flow control (both Rx and Tx) is enabled by SW override. */
-               reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
-               break;
-#ifdef CONFIG_DCB
-       case ixgbe_fc_pfc:
-               goto out;
-               break;
-#endif /* CONFIG_DCB */
-       default:
-               hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = IXGBE_ERR_CONFIG;
-               goto out;
-               break;
+       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+               reg_bp |= IXGBE_AUTOC_AN_RESTART;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+       } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+                   (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+               hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+                                     MDIO_MMD_AN, reg_cu);
        }
-       /*
-        * AUTOC restart handles negotiation of 1G and 10G. There is
-        * no need to set the PCS1GCTL register.
-        */
-       reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
-       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 
+       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 out:
        return ret_val;
 }
@@ -2159,10 +2090,16 @@ out:
  **/
 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
+       struct ixgbe_adapter *adapter = hw->back;
        u32 i;
        u32 reg_val;
        u32 number_of_queues;
-       s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+       s32 status = 0;
+       u16 dev_status = 0;
+
+       /* Just jump out if bus mastering is already disabled */
+       if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+               goto out;
 
        /* Disable the receive unit by stopping each queue */
        number_of_queues = hw->mac.max_rx_queues;
@@ -2179,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
 
        for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
-               if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
-                       status = 0;
+               if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+                       goto check_device_status;
+               udelay(100);
+       }
+
+       hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+       status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+       /*
+        * Before proceeding, make sure that the PCIe block does not have
+        * transactions pending.
+        */
+check_device_status:
+       for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+               pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
+                                                        &dev_status);
+               if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
                        break;
-               }
                udelay(100);
        }
 
+       if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
+               hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+       else
+               goto out;
+
+       /*
+        * Two consecutive resets are required via CTRL.RST per datasheet
+        * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
+        * of this need.  The first reset prevents new master requests from
+        * being issued by our device.  We then must wait 1usec for any
+        * remaining completions from the PCIe bus to trickle in, and then reset
+        * again to clear out any effects they may have had on our device.
+        */
+        hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+out:
        return status;
 }
 
@@ -2195,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
  *  @hw: pointer to hardware structure
  *  @mask: Mask to specify which semaphore to acquire
  *
- *  Acquires the SWFW semaphore thought the GSSR register for the specified
+ *  Acquires the SWFW semaphore through the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2206,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
        s32 timeout = 200;
 
        while (timeout) {
+               /*
+                * SW EEPROM semaphore bit is used for access to all
+                * SW_FW_SYNC/GSSR bits (not just EEPROM)
+                */
                if (ixgbe_get_eeprom_semaphore(hw))
                        return IXGBE_ERR_SWFW_SYNC;
 
@@ -2223,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
        }
 
        if (!timeout) {
-               hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
+               hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
                return IXGBE_ERR_SWFW_SYNC;
        }
 
@@ -2239,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
  *  @hw: pointer to hardware structure
  *  @mask: Mask to specify which semaphore to release
  *
- *  Releases the SWFW semaphore thought the GSSR register for the specified
+ *  Releases the SWFW semaphore through the GSSR register for the specified
  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
  **/
 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2427,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        u32 mpsar_lo, mpsar_hi;
        u32 rar_entries = hw->mac.num_rar_entries;
 
-       if (rar < rar_entries) {
-               mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-               mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
 
-               if (!mpsar_lo && !mpsar_hi)
-                       goto done;
+       mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+       mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
 
-               if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
-                       if (mpsar_lo) {
-                               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-                               mpsar_lo = 0;
-                       }
-                       if (mpsar_hi) {
-                               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
-                               mpsar_hi = 0;
-                       }
-               } else if (vmdq < 32) {
-                       mpsar_lo &= ~(1 << vmdq);
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
-               } else {
-                       mpsar_hi &= ~(1 << (vmdq - 32));
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
-               }
+       if (!mpsar_lo && !mpsar_hi)
+               goto done;
 
-               /* was that the last pool using this rar? */
-               if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
-                       hw->mac.ops.clear_rar(hw, rar);
+       if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+               if (mpsar_lo) {
+                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+                       mpsar_lo = 0;
+               }
+               if (mpsar_hi) {
+                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+                       mpsar_hi = 0;
+               }
+       } else if (vmdq < 32) {
+               mpsar_lo &= ~(1 << vmdq);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
        } else {
-               hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               mpsar_hi &= ~(1 << (vmdq - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
        }
 
+       /* was that the last pool using this rar? */
+       if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+               hw->mac.ops.clear_rar(hw, rar);
 done:
        return 0;
 }
@@ -2473,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        u32 mpsar;
        u32 rar_entries = hw->mac.num_rar_entries;
 
-       if (rar < rar_entries) {
-               if (vmdq < 32) {
-                       mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-                       mpsar |= 1 << vmdq;
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
-               } else {
-                       mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-                       mpsar |= 1 << (vmdq - 32);
-                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
-               }
-       } else {
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
                hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       if (vmdq < 32) {
+               mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+               mpsar |= 1 << vmdq;
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+       } else {
+               mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+               mpsar |= 1 << (vmdq - 32);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
        }
        return 0;
 }
@@ -2497,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
 {
        int i;
 
-
        for (i = 0; i < 128; i++)
                IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
 
@@ -2726,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
  *  Reads the links register to determine if link is up and the current speed
  **/
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-                               bool *link_up, bool link_up_wait_to_complete)
+                                bool *link_up, bool link_up_wait_to_complete)
 {
-       u32 links_reg;
+       u32 links_reg, links_orig;
        u32 i;
 
+       /* clear the old state */
+       links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
        links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+       if (links_orig != links_reg) {
+               hw_dbg(hw, "LINKS changed from %08X to %08X\n",
+                      links_orig, links_reg);
+       }
+
        if (link_up_wait_to_complete) {
                for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
                        if (links_reg & IXGBE_LINKS_UP) {
@@ -2754,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
            IXGBE_LINKS_SPEED_10G_82599)
                *speed = IXGBE_LINK_SPEED_10GB_FULL;
        else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
-                IXGBE_LINKS_SPEED_1G_82599)
+                IXGBE_LINKS_SPEED_1G_82599)
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
-       else
+       else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+                IXGBE_LINKS_SPEED_100_82599)
                *speed = IXGBE_LINK_SPEED_100_FULL;
+       else
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
 
        /* if link is down, zero out the current_mode */
        if (*link_up == false) {
@@ -2813,6 +2798,28 @@ wwn_prefix_out:
        return 0;
 }
 
+/**
+ *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ *  control
+ *  @hw: pointer to hardware structure
+ *
+ *  There are several phys that do not support autoneg flow control. This
+ *  function check the device id to see if the associated phy supports
+ *  autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X540T:
+               return 0;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               return 0;
+       default:
+               return IXGBE_ERR_FC_NOT_SUPPORTED;
+       }
+}
+
 /**
  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
  *  @hw: pointer to hardware structure
index 66ed045a8cf008e2d2ba48fefb9d8ba4f3d0f35b..508f635fc2ca731db91bee4e86918881e17816c3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
 #define _IXGBE_COMMON_H_
 
 #include "ixgbe_type.h"
+#include "ixgbe.h"
 
 u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -62,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
                                      struct net_device *netdev);
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
-                                     struct net_device *netdev);
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -110,9 +109,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 
-extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
-       netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+       netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
 #define e_dev_info(format, arg...) \
        dev_info(&adapter->pdev->dev, format, ## arg)
 #define e_dev_warn(format, arg...) \
index d16c260c1f50adaf0c93aaf6cc2eae9b807c0f5b..c2ee6fcb4e91a62c347b10de89fcb0d0ea96c6b8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #include "ixgbe_dcb_82598.h"
 #include "ixgbe_dcb_82599.h"
 
+/**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+{
+       int min_percent = 100;
+       int min_credit, multiplier;
+       int i;
+
+       min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+                       DCB_CREDIT_QUANTUM;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               if (bw[i] < min_percent && bw[i])
+                       min_percent = bw[i];
+       }
+
+       multiplier = (min_credit / min_percent) + 1;
+
+       /* Find out the hw credits for each TC */
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+               if (val < min_credit)
+                       val = min_credit;
+               refill[i] = val;
+
+               max[i] = (bw[i] * MAX_CREDIT)/100;
+       }
+       return 0;
+}
+
 /**
  * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
  * @ixgbe_dcb_config: Struct containing DCB settings.
@@ -141,6 +177,59 @@ out:
        return ret_val;
 }
 
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+       int i;
+
+       *pfc_en = 0;
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+               *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+                            u16 *refill)
+{
+       struct tc_bw_alloc *p;
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               p = &cfg->tc_config[i].path[direction];
+               refill[i] = p->data_credits_refill;
+       }
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+               max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+                           u8 *bwgid)
+{
+       struct tc_bw_alloc *p;
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               p = &cfg->tc_config[i].path[direction];
+               bwgid[i] = p->bwg_id;
+       }
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+                           u8 *ptype)
+{
+       struct tc_bw_alloc *p;
+       int i;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               p = &cfg->tc_config[i].path[direction];
+               ptype[i] = p->prio_type;
+       }
+}
+
 /**
  * ixgbe_dcb_hw_config - Config and enable DCB
  * @hw: pointer to hardware structure
@@ -152,13 +241,30 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
                         struct ixgbe_dcb_config *dcb_config)
 {
        s32 ret = 0;
+       u8 pfc_en;
+       u8 ptype[MAX_TRAFFIC_CLASS];
+       u8 bwgid[MAX_TRAFFIC_CLASS];
+       u16 refill[MAX_TRAFFIC_CLASS];
+       u16 max[MAX_TRAFFIC_CLASS];
+
+       /* Unpack CEE standard containers */
+       ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+       ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+       ixgbe_dcb_unpack_max(dcb_config, max);
+       ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+       ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
+               ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
+                                               pfc_en, refill, max, bwgid,
+                                               ptype);
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+               ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
+                                               pfc_en, refill, max, bwgid,
+                                               ptype);
                break;
        default:
                break;
@@ -166,3 +272,70 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
        return ret;
 }
 
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+{
+       int ret = -EINVAL;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+                           u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
+{
+       int i;
+       u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+
+       /* Map TSA onto CEE prio type */
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               switch (tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       prio_type[i] = 2;
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       prio_type[i] = 0;
+                       break;
+               default:
+                       /* Hardware only supports priority strict or
+                        * ETS transmission selection algorithms if
+                        * we receive some other value from dcbnl
+                        * throw an error
+                        */
+                       return -EINVAL;
+               }
+       }
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+                                                       prio_type);
+               ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+                                                            bwg_id, prio_type);
+               ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+                                                            bwg_id, prio_type);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+                                                 bwg_id, prio_type);
+               ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+                                                      bwg_id, prio_type);
+               ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+                                                      bwg_id, prio_type);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
index 1cfe38ee16440e0a7c3d6fce1fb858ef18fd68c4..515bc27477f666ee6058b07293dd24120f3059c5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
        struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
        u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
        bool   pfc_mode_enable;
-       bool   round_robin_enable;
 
        enum dcb_rx_pba_cfg rx_pba_cfg;
 
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
 };
 
 /* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
 
 /* DCB credits calculation */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
 s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
                                   struct ixgbe_dcb_config *, int, u8);
 
 /* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+                           u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
 
 /* DCB definitions for credit calculation */
index 9a5e89c12e050281af83dbf2b143264fdf974fe3..c97cf9160dc0cc7b2e161fd511dbd1b5d7abe744 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
  *
  * Configure packet buffers for DCB mode.
  */
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
-                                                struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
 {
        s32 ret_val = 0;
        u32 value = IXGBE_RXPBSIZE_64KB;
        u8  i = 0;
 
        /* Setup Rx packet buffer sizes */
-       switch (dcb_config->rx_pba_cfg) {
+       switch (rx_pba) {
        case pba_80_48:
                /* Setup the first four at 80KB */
                value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
  *
  * Configure Rx Data Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+                                       u16 *refill,
+                                       u16 *max,
+                                       u8 *prio_type)
 {
-       struct tc_bw_alloc    *p;
        u32    reg           = 0;
        u32    credit_refill = 0;
        u32    credit_max    = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-               credit_refill = p->data_credits_refill;
-               credit_max    = p->data_credits_max;
+               credit_refill = refill[i];
+               credit_max    = max[i];
 
                reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RT2CR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32    reg, max_credits;
        u8     i;
 
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Enable arbiter */
        reg &= ~IXGBE_DPMCS_ARBDIS;
-       if (!(dcb_config->round_robin_enable)) {
-               /* Enable DFP and Recycle mode */
-               reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
-       }
+       /* Enable DFP and Recycle mode */
+       reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
        reg |= IXGBE_DPMCS_TSOEF;
        /* Configure Max TSO packet size 34KB including payload and headers */
        reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               max_credits = dcb_config->tc_config[i].desc_credits_max;
+               max_credits = max[i];
                reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
-               reg |= p->data_credits_refill;
-               reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+               reg |= refill[i];
+               reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_TDTQ2TCCR_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_TDTQ2TCCR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
  *
  * Configure Tx Data Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32 reg;
        u8 i;
 
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               reg = p->data_credits_refill;
-               reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
-               reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+               reg = refill[i];
+               reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+               reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_TDPT2TCCR_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_TDPT2TCCR_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,13 +228,12 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
  *
  * Configure Priority Flow Control for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 {
        u32 reg, rx_pba_size;
        u8  i;
 
-       if (!dcb_config->pfc_mode_enable)
+       if (!pfc_en)
                goto out;
 
        /* Enable Transmit Priority Flow Control */
@@ -256,19 +254,20 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
         * for each traffic class.
         */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               int enabled = pfc_en & (1 << i);
                rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
                rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
                reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+               if (enabled == pfc_enabled_tx ||
+                   enabled == pfc_enabled_full)
                        reg |= IXGBE_FCRTL_XONE;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
 
                reg = (rx_pba_size - hw->fc.high_water) << 10;
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+               if (enabled == pfc_enabled_tx ||
+                   enabled == pfc_enabled_full)
                        reg |= IXGBE_FCRTH_FCEN;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
@@ -292,7 +291,7 @@ out:
  * Configure queue statistics registers, all queues belonging to same traffic
  * class uses a single set of queue statistics counters.
  */
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
 {
        u32 reg = 0;
        u8  i   = 0;
@@ -325,13 +324,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type)
 {
-       ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
-       ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
-       ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
-       ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
-       ixgbe_dcb_config_pfc_82598(hw, dcb_config);
+       ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
+       ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+       ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_pfc_82598(hw, pfc_en);
        ixgbe_dcb_config_tc_stats_82598(hw);
 
        return 0;
index abc03ccfa088ed40bc94fb6ff81fb3bbe0914171..1e9750c2b46b776f766eae1035d7d54e9b6004ad 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 /* DCB hardware-specific driver APIs */
 
 /* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
 
 /* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+                                       u16 *refill,
+                                       u16 *max,
+                                       u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type);
 
 #endif /* _DCB_82598_CONFIG_H */
index 374e1f74d0f51c20bbfe6e245d8500f90a015dab..beaa1c1c1e671c948687fef61b448ee3caefc673 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 /**
  * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
  *
  * Configure packet buffers for DCB mode.
  */
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
 {
        s32 ret_val = 0;
        u32 value = IXGBE_RXPBSIZE_64KB;
        u8  i = 0;
 
        /* Setup Rx packet buffer sizes */
-       switch (dcb_config->rx_pba_cfg) {
+       switch (rx_pba) {
        case pba_80_48:
                /* Setup the first four at 80KB */
                value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,19 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Rx Packet Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+                                     u16 *refill,
+                                     u16 *max,
+                                     u8 *bwg_id,
+                                     u8 *prio_type)
 {
-       struct tc_bw_alloc    *p;
        u32    reg           = 0;
        u32    credit_refill = 0;
        u32    credit_max    = 0;
@@ -103,15 +107,13 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
-               credit_refill = p->data_credits_refill;
-               credit_max    = p->data_credits_max;
+               credit_refill = refill[i];
+               credit_max    = max[i];
                reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
 
-               reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
+               reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RTRPT4C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +132,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+                                          u16 *refill,
+                                          u16 *max,
+                                          u8 *bwg_id,
+                                          u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32    reg, max_credits;
        u8     i;
 
@@ -149,16 +156,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               max_credits = dcb_config->tc_config[i].desc_credits_max;
+               max_credits = max[i];
                reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
-               reg |= p->data_credits_refill;
-               reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
+               reg |= refill[i];
+               reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_RTTDT2C_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RTTDT2C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +183,19 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Packet Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+                                          u16 *refill,
+                                          u16 *max,
+                                          u8 *bwg_id,
+                                          u8 *prio_type)
 {
-       struct tc_bw_alloc *p;
        u32 reg;
        u8 i;
 
@@ -205,15 +216,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
 
        /* Configure traffic class credits and priority */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-               reg = p->data_credits_refill;
-               reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
-               reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
+               reg = refill[i];
+               reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+               reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
 
-               if (p->prio_type == prio_group)
+               if (prio_type[i] == prio_group)
                        reg |= IXGBE_RTTPT2C_GSP;
 
-               if (p->prio_type == prio_link)
+               if (prio_type[i] == prio_link)
                        reg |= IXGBE_RTTPT2C_LSP;
 
                IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,17 +243,16 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
 /**
  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure Priority Flow Control (PFC) for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
 {
        u32 i, reg, rx_pba_size;
 
        /* If PFC is disabled globally then fall back to LFC. */
-       if (!dcb_config->pfc_mode_enable) {
+       if (!pfc_en) {
                for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
                        hw->mac.ops.fc_enable(hw, i);
                goto out;
@@ -251,19 +260,18 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
 
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               int enabled = pfc_en & (1 << i);
                rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
                rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
                reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+               if (enabled)
                        reg |= IXGBE_FCRTL_XONE;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
 
                reg = (rx_pba_size - hw->fc.high_water) << 10;
-               if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-                   dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+               if (enabled)
                        reg |= IXGBE_FCRTH_FCEN;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
        }
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_config_82599 - Configure general DCB parameters
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure general DCB parameters.
  */
@@ -406,19 +413,27 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
 /**
  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type)
 {
-       ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
+       ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
        ixgbe_dcb_config_82599(hw);
-       ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
-       ixgbe_dcb_config_pfc_82599(hw, dcb_config);
+       ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
+       ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+                                              bwg_id, prio_type);
+       ixgbe_dcb_config_pfc_82599(hw, pfc_en);
        ixgbe_dcb_config_tc_stats_82599(hw);
 
        return 0;
index 3841649fb9545235756a8f647a6a6c260e58c9ab..0b39ab4ffc707c7cad1b5ec6cb27f2cd25209e57 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 /* DCB hardware-specific driver APIs */
 
 /* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
 
 /* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+                                       u16 *refill,
+                                       u16 *max,
+                                       u8 *bwg_id,
+                                       u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+                                               u16 *refill,
+                                               u16 *max,
+                                               u8 *bwg_id,
+                                               u8 *prio_type);
+
 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *config);
+                             u8 rx_pba, u8 pfc_en, u16 *refill,
+                             u16 *max, u8 *bwg_id, u8 *prio_type);
 
 #endif /* _DCB_82599_CONFIG_H */
index bf566e8a455e485978c66d8478166b0c885e64af..d7f0024014b1cc6968d179ff2802e35766d81ec5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
 #define BIT_PG_RX      0x04
 #define BIT_PG_TX      0x08
 #define BIT_APP_UPCHG  0x10
-#define BIT_RESETLINK   0x40
 #define BIT_LINKSPEED   0x80
 
 /* Responses for the DCB_C_SET_ALL command */
@@ -225,10 +224,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
            (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
             adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
            (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
+            adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
                adapter->dcb_set_bitmap |= BIT_PG_TX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +236,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
        adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
 
        if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
+           adapter->dcb_cfg.bw_percentage[0][bwg_id])
                adapter->dcb_set_bitmap |= BIT_PG_TX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +264,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
            (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
             adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
            (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
+            adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
                adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +276,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
        adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
 
        if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-           adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
+           adapter->dcb_cfg.bw_percentage[1][bwg_id])
                adapter->dcb_set_bitmap |= BIT_PG_RX;
-               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-       }
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -365,21 +356,17 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                return DCB_NO_HW_CHG;
 
        /*
-        * Only take down the adapter if the configuration change
-        * requires a reset.
+        * Only take down the adapter if an app change occured. FCoE
+        * may shuffle tx rings in this case and this can not be done
+        * without a reset currently.
         */
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
                while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                        msleep(1);
 
-               if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-                       if (netif_running(netdev))
-                               netdev->netdev_ops->ndo_stop(netdev);
-                       ixgbe_clear_interrupt_scheme(adapter);
-               } else {
-                       if (netif_running(netdev))
-                               ixgbe_down(adapter);
-               }
+               if (netif_running(netdev))
+                       netdev->netdev_ops->ndo_stop(netdev);
+               ixgbe_clear_interrupt_scheme(adapter);
        }
 
        if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +395,51 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                }
        }
 
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
-               if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-                       ixgbe_init_interrupt_scheme(adapter);
-                       if (netif_running(netdev))
-                               netdev->netdev_ops->ndo_open(netdev);
-               } else {
-                       if (netif_running(netdev))
-                               ixgbe_up(adapter);
-               }
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+               ixgbe_init_interrupt_scheme(adapter);
+               if (netif_running(netdev))
+                       netdev->netdev_ops->ndo_open(netdev);
                ret = DCB_HW_CHG_RST;
-       } else if (adapter->dcb_set_bitmap & BIT_PFC) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-                       ixgbe_dcb_config_pfc_82598(&adapter->hw,
-                                                  &adapter->dcb_cfg);
-               else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       ixgbe_dcb_config_pfc_82599(&adapter->hw,
-                                                  &adapter->dcb_cfg);
+       }
+
+       if (adapter->dcb_set_bitmap & BIT_PFC) {
+               u8 pfc_en;
+               ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+               ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
                ret = DCB_HW_CHG;
        }
+
+       if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+               u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+               u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+               int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef CONFIG_FCOE
+               if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+                       max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+               ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+                                              max_frame, DCB_TX_CONFIG);
+               ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+                                              max_frame, DCB_RX_CONFIG);
+
+               ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+                                       DCB_TX_CONFIG, refill);
+               ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+               ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+                                      DCB_TX_CONFIG, bwg_id);
+               ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+                                     DCB_TX_CONFIG, prio_type);
+
+               ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+                                       bwg_id, prio_type);
+       }
+
        if (adapter->dcb_cfg.pfc_mode_enable)
                adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-       if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
                clear_bit(__IXGBE_RESETTING, &adapter->state);
        adapter->dcb_set_bitmap = 0x00;
        return ret;
@@ -568,18 +577,29 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
        case DCB_APP_IDTYPE_ETHTYPE:
 #ifdef IXGBE_FCOE
                if (id == ETH_P_FCOE) {
-                       u8 tc;
-                       struct ixgbe_adapter *adapter;
+                       u8 old_tc;
+                       struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-                       adapter = netdev_priv(netdev);
-                       tc = adapter->fcoe.tc;
+                       /* Get current programmed tc */
+                       old_tc = adapter->fcoe.tc;
                        rval = ixgbe_fcoe_setapp(adapter, up);
-                       if ((!rval) && (tc != adapter->fcoe.tc) &&
-                           (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
-                           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+
+                       if (rval ||
+                          !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
+                          !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+                               break;
+
+                       /* The FCoE application priority may be changed multiple
+                        * times in quick sucession with switches that build up
+                        * TLVs. To avoid creating uneeded device resets this
+                        * checks the actual HW configuration and clears
+                        * BIT_APP_UPCHG if a HW configuration change is not
+                        * need
+                        */
+                       if (old_tc == adapter->fcoe.tc)
+                               adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
+                       else
                                adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-                               adapter->dcb_set_bitmap |= BIT_RESETLINK;
-                       }
                }
 #endif
                break;
@@ -591,7 +611,98 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
        return rval;
 }
 
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+                                  struct ieee_ets *ets)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+       /* No IEEE PFC settings available */
+       if (!my_ets)
+               return -EINVAL;
+
+       ets->ets_cap = MAX_TRAFFIC_CLASS;
+       ets->cbs = my_ets->cbs;
+       memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+       memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+       memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+       return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+                                  struct ieee_ets *ets)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       int err;
+       /* naively give each TC a bwg to map onto CEE hardware */
+       __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+       if (!adapter->ixgbe_ieee_ets) {
+               adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+                                                 GFP_KERNEL);
+               if (!adapter->ixgbe_ieee_ets)
+                       return -ENOMEM;
+       }
+
+
+       memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+       ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+       err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+                                     bwg_id, ets->tc_tsa);
+       return err;
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+                                  struct ieee_pfc *pfc)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+       int i;
+
+       /* No IEEE PFC settings available */
+       if (!my_pfc)
+               return -EINVAL;
+
+       pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+       pfc->pfc_en = my_pfc->pfc_en;
+       pfc->mbc = my_pfc->mbc;
+       pfc->delay = my_pfc->delay;
+
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               pfc->requests[i] = adapter->stats.pxoffrxc[i];
+               pfc->indications[i] = adapter->stats.pxofftxc[i];
+       }
+
+       return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+                                  struct ieee_pfc *pfc)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       int err;
+
+       if (!adapter->ixgbe_ieee_pfc) {
+               adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+                                                 GFP_KERNEL);
+               if (!adapter->ixgbe_ieee_pfc)
+                       return -ENOMEM;
+       }
+
+       memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+       err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+       return err;
+}
+
 const struct dcbnl_rtnl_ops dcbnl_ops = {
+       .ieee_getets    = ixgbe_dcbnl_ieee_getets,
+       .ieee_setets    = ixgbe_dcbnl_ieee_setets,
+       .ieee_getpfc    = ixgbe_dcbnl_ieee_getpfc,
+       .ieee_setpfc    = ixgbe_dcbnl_ieee_setpfc,
        .getstate       = ixgbe_dcbnl_get_state,
        .setstate       = ixgbe_dcbnl_set_state,
        .getpermhwaddr  = ixgbe_dcbnl_get_perm_hw_addr,
index 2002ea88ca2af8377240eebd60a177a8abc641db..76380a2b35aa6aeafe05c20f1b056a1ea355a240 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -152,20 +152,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
                ecmd->supported |= (SUPPORTED_1000baseT_Full |
                                    SUPPORTED_Autoneg);
 
+               switch (hw->mac.type) {
+               case ixgbe_mac_X540:
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       break;
+               default:
+                       break;
+               }
+
                ecmd->advertising = ADVERTISED_Autoneg;
-               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               /*
-                * It's possible that phy.autoneg_advertised may not be
-                * set yet.  If so display what the default would be -
-                * both 1G and 10G supported.
-                */
-               if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
-                                          ADVERTISED_10000baseT_Full)))
+               if (hw->phy.autoneg_advertised) {
+                       if (hw->phy.autoneg_advertised &
+                           IXGBE_LINK_SPEED_100_FULL)
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       if (hw->phy.autoneg_advertised &
+                           IXGBE_LINK_SPEED_10GB_FULL)
+                               ecmd->advertising |= ADVERTISED_10000baseT_Full;
+                       if (hw->phy.autoneg_advertised &
+                           IXGBE_LINK_SPEED_1GB_FULL)
+                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               } else {
+                       /*
+                        * Default advertised modes in case
+                        * phy.autoneg_advertised isn't set.
+                        */
                        ecmd->advertising |= (ADVERTISED_10000baseT_Full |
                                              ADVERTISED_1000baseT_Full);
+                       if (hw->mac.type == ixgbe_mac_X540)
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+               }
 
                if (hw->phy.media_type == ixgbe_media_type_copper) {
                        ecmd->supported |= SUPPORTED_TP;
@@ -271,8 +286,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
 
        hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
        if (link_up) {
-               ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
-                              SPEED_10000 : SPEED_1000;
+               switch (link_speed) {
+               case IXGBE_LINK_SPEED_10GB_FULL:
+                       ecmd->speed = SPEED_10000;
+                       break;
+               case IXGBE_LINK_SPEED_1GB_FULL:
+                       ecmd->speed = SPEED_1000;
+                       break;
+               case IXGBE_LINK_SPEED_100_FULL:
+                       ecmd->speed = SPEED_100;
+                       break;
+               default:
+                       break;
+               }
                ecmd->duplex = DUPLEX_FULL;
        } else {
                ecmd->speed = -1;
@@ -306,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
                if (ecmd->advertising & ADVERTISED_1000baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
+               if (ecmd->advertising & ADVERTISED_100baseT_Full)
+                       advertised |= IXGBE_LINK_SPEED_100_FULL;
+
                if (old == advertised)
                        return err;
                /* this sets the link speed and restarts auto-neg */
index c54a88274d5177b3706ba7d9e1fc2a0f3f8700d7..00af15a9cdc66ade900df22eaa3f717dd77884e6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -135,22 +135,19 @@ out_ddp_put:
        return len;
 }
 
+
 /**
- * ixgbe_fcoe_ddp_get - called to set up ddp context
+ * ixgbe_fcoe_ddp_setup - called to set up ddp context
  * @netdev: the corresponding net_device
  * @xid: the exchange id requesting ddp
  * @sgl: the scatter-gather list for this request
  * @sgc: the number of scatter-gather items
  *
- * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
- * and is expected to be called from ULD, e.g., FCP layer of libfc
- * to set up ddp for the corresponding xid of the given sglist for
- * the corresponding I/O.
- *
  * Returns : 1 for success and 0 for no ddp
  */
-int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                      struct scatterlist *sgl, unsigned int sgc)
+static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+                               struct scatterlist *sgl, unsigned int sgc,
+                               int target_mode)
 {
        struct ixgbe_adapter *adapter;
        struct ixgbe_hw *hw;
@@ -164,7 +161,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        unsigned int lastsize;
        unsigned int thisoff = 0;
        unsigned int thislen = 0;
-       u32 fcbuff, fcdmarw, fcfltrw;
+       u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
        dma_addr_t addr = 0;
 
        if (!netdev || !sgl)
@@ -275,6 +272,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
        fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
        fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
+       /* Set WRCONTX bit to allow DDP for target */
+       if (target_mode)
+               fcbuff |= (IXGBE_FCBUFF_WRCONTX);
        fcbuff |= (IXGBE_FCBUFF_VALID);
 
        fcdmarw = xid;
@@ -287,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        /* program DMA context */
        hw = &adapter->hw;
        spin_lock_bh(&fcoe->lock);
+
+       /* turn on last frame indication for target mode as FCP_RSPtarget is
+        * supposed to send FCP_RSP when it is done. */
+       if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
+               set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
+               fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
+               fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
+               IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
+       }
+
        IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
        IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
        IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -295,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
        IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
        IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
        IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+
        spin_unlock_bh(&fcoe->lock);
 
        return 1;
@@ -308,6 +319,47 @@ out_noddp_unmap:
        return 0;
 }
 
+/**
+ * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                      struct scatterlist *sgl, unsigned int sgc)
+{
+       return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
+}
+
+/**
+ * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O. The DDP in target mode is a write I/O request
+ * from the initiator.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                           struct scatterlist *sgl, unsigned int sgc)
+{
+       return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
+}
+
 /**
  * ixgbe_fcoe_ddp - check ddp status and mark it done
  * @adapter: ixgbe adapter
@@ -331,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
        struct ixgbe_fcoe *fcoe;
        struct ixgbe_fcoe_ddp *ddp;
        struct fc_frame_header *fh;
+       struct fcoe_crc_eof *crc;
 
        if (!ixgbe_rx_is_fcoe(rx_desc))
                goto ddp_out;
@@ -384,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                else if (ddp->len)
                        rc = ddp->len;
        }
-
+       /* In target mode, check the last data frame of the sequence.
+        * For DDP in target mode, data is already DDPed but the header
+        * indication of the last data frame ould allow is to tell if we
+        * got all the data and the ULP can send FCP_RSP back, as this is
+        * not a full fcoe frame, we fill the trailer here so it won't be
+        * dropped by the ULP stack.
+        */
+       if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
+           (fctl & FC_FC_END_SEQ)) {
+               crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+               crc->fcoe_eof = FC_EOF_T;
+       }
 ddp_out:
        return rc;
 }
@@ -840,5 +904,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
        }
        return rc;
 }
-
-
index 65cc8fb14fe7d040b1418d9f9bb0c9c0df0e6603..5a650a4ace66ef2e8575872ee5814b521fb58d5a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
 /* fcerr */
 #define IXGBE_FCERR_BADCRC       0x00100000
 
+/* FCoE DDP for target mode */
+#define __IXGBE_FCOE_TARGET    1
+
 struct ixgbe_fcoe_ddp {
        int len;
        u32 err;
@@ -66,6 +69,7 @@ struct ixgbe_fcoe {
        u8 tc;
        u8 up;
 #endif
+       unsigned long mode;
        atomic_t refcnt;
        spinlock_t lock;
        struct pci_pool *pool;
index 30f9ccfb4f8700efceb9b1b8eceaadea2bf9f8c7..5998dc94dd5c44cd1f02d7bc63665bc8ac068fee 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -54,7 +54,8 @@ static const char ixgbe_driver_string[] =
 
 #define DRV_VERSION "3.2.9-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
+static const char ixgbe_copyright[] =
+                               "Copyright (c) 1999-2011 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -648,7 +649,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
  *
  * Returns : a tc index for use in range 0-7, or 0-3
  */
-u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
+static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
 {
        int tc = -1;
        int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 
                i--;
                for (; i >= 0; i--) {
+                       /* free only the irqs that were actually requested */
+                       if (!adapter->q_vector[i]->rxr_count &&
+                           !adapter->q_vector[i]->txr_count)
+                               continue;
+
                        free_irq(adapter->msix_entries[i].vector,
                                 adapter->q_vector[i]);
                }
@@ -3077,6 +3083,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
 
+       /* If operating in IOV mode set RLPML for X540 */
+       if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+           hw->mac.type == ixgbe_mac_X540) {
+               rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+               rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
+                           ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
+       }
+
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /*
                 * enable cache line friendly hardware writes:
@@ -3761,7 +3775,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
        if (ret)
                goto link_cfg_out;
 
-       if (hw->mac.ops.get_link_capabilities)
+       autoneg = hw->phy.autoneg_advertised;
+       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
                ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
                                                        &negotiation);
        if (ret)
@@ -3876,7 +3891,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
         * If we're not hot-pluggable SFP+, we just need to configure link
         * and bring it up.
         */
-       if (hw->phy.type == ixgbe_phy_unknown)
+       if (hw->phy.type == ixgbe_phy_none)
                schedule_work(&adapter->sfp_config_module_task);
 
        /* enable transmits */
@@ -5174,7 +5189,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
        adapter->dcb_cfg.rx_pba_cfg = pba_equal;
        adapter->dcb_cfg.pfc_mode_enable = false;
-       adapter->dcb_cfg.round_robin_enable = false;
        adapter->dcb_set_bitmap = 0x00;
        ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
                           adapter->ring_feature[RING_F_DCB].indices);
@@ -5442,8 +5456,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* MTU < 68 is an error and causes problems on some kernels */
-       if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
-               return -EINVAL;
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
+           hw->mac.type != ixgbe_mac_X540) {
+               if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+                       return -EINVAL;
+       } else {
+               if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+                       return -EINVAL;
+       }
 
        e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
        /* must set new MTU before calling down or up */
@@ -5611,6 +5631,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        }
 
        ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+       kfree(adapter->ixgbe_ieee_pfc);
+       kfree(adapter->ixgbe_ieee_ets);
+#endif
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -6101,7 +6125,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
                               "10 Gbps" :
                               (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-                              "1 Gbps" : "unknown speed")),
+                              "1 Gbps" :
+                              (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+                              "100 Mbps" :
+                              "unknown speed"))),
                               ((flow_rx && flow_tx) ? "RX/TX" :
                               (flow_rx ? "RX" :
                               (flow_tx ? "TX" : "None"))));
@@ -6993,6 +7020,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 #endif
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+       .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
        .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
        .ndo_fcoe_enable = ixgbe_fcoe_enable,
        .ndo_fcoe_disable = ixgbe_fcoe_disable,
@@ -7706,16 +7734,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
 
 #endif /* CONFIG_IXGBE_DCA */
 
-/**
- * ixgbe_get_hw_dev return device
- * used by hardware layer to print debugging information
- **/
-struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
-{
-       struct ixgbe_adapter *adapter = hw->back;
-       return adapter->netdev;
-}
-
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
index ea82c5a1cd3ead752938593b1e59db8ba887f333..c7ed82eb2539aae89bb63589a23c68dce72bb898 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -437,6 +437,7 @@ out_no_read:
        return ret_val;
 }
 
+#ifdef CONFIG_PCI_IOV
 /**
  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
  *  @hw: pointer to the HW structure
@@ -447,24 +448,22 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-       switch (hw->mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               mbx->timeout = 0;
-               mbx->usec_delay = 0;
+       if (hw->mac.type != ixgbe_mac_82599EB &&
+           hw->mac.type != ixgbe_mac_X540)
+               return;
 
-               mbx->size = IXGBE_VFMAILBOX_SIZE;
+       mbx->timeout = 0;
+       mbx->usec_delay = 0;
 
-               mbx->stats.msgs_tx = 0;
-               mbx->stats.msgs_rx = 0;
-               mbx->stats.reqs = 0;
-               mbx->stats.acks = 0;
-               mbx->stats.rsts = 0;
-               break;
-       default:
-               break;
-       }
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
 }
+#endif /* CONFIG_PCI_IOV */
 
 struct ixgbe_mbx_operations mbx_ops_generic = {
        .read                   = ixgbe_read_mbx_pf,
index 3df9b15902186e18879fe51fccd5d327fa7e2c0f..fe6ea81dc7f8f3a7787e6db1567236f282eb1291 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
 s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
 
 extern struct ixgbe_mbx_operations mbx_ops_generic;
 
index 8f7123e8fc0a8b1ae06ce8c54374ee8f6fd78889..9190a8fca4273cd533560f2d6f294f088b423c69 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u32 phy_addr;
+       u16 ext_ability = 0;
 
        if (hw->phy.type == ixgbe_phy_unknown) {
                for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
                                ixgbe_get_phy_id(hw);
                                hw->phy.type =
                                        ixgbe_get_phy_type_from_id(hw->phy.id);
+
+                               if (hw->phy.type == ixgbe_phy_unknown) {
+                                       hw->phy.ops.read_reg(hw,
+                                                            MDIO_PMA_EXTABLE,
+                                                            MDIO_MMD_PMAPMD,
+                                                            &ext_ability);
+                                       if (ext_ability &
+                                           (MDIO_PMA_EXTABLE_10GBT |
+                                            MDIO_PMA_EXTABLE_1000BT))
+                                               hw->phy.type =
+                                                        ixgbe_phy_cu_unknown;
+                                       else
+                                               hw->phy.type =
+                                                        ixgbe_phy_generic;
+                               }
+
                                status = 0;
                                break;
                        }
                }
                /* clear value if nothing found */
-               hw->phy.mdio.prtad = 0;
+               if (status != 0)
+                       hw->phy.mdio.prtad = 0;
        } else {
                status = 0;
        }
@@ -138,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
  **/
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
 {
+       u32 i;
+       u16 ctrl = 0;
+       s32 status = 0;
+
+       if (hw->phy.type == ixgbe_phy_unknown)
+               status = ixgbe_identify_phy_generic(hw);
+
+       if (status != 0 || hw->phy.type == ixgbe_phy_none)
+               goto out;
+
        /* Don't reset PHY if it's shut down due to overtemp. */
        if (!hw->phy.reset_if_overtemp &&
            (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
-               return 0;
+               goto out;
 
        /*
         * Perform soft PHY reset to the PHY_XS.
         * This will cause a soft reset to the PHY
         */
-       return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
-                                    MDIO_CTRL1_RESET);
+       hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+                             MDIO_MMD_PHYXS,
+                             MDIO_CTRL1_RESET);
+
+       /*
+        * Poll for reset bit to self-clear indicating reset is complete.
+        * Some PHYs could take up to 3 seconds to complete and need about
+        * 1.7 usec delay after the reset is complete.
+        */
+       for (i = 0; i < 30; i++) {
+               msleep(100);
+               hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+                                    MDIO_MMD_PHYXS, &ctrl);
+               if (!(ctrl & MDIO_CTRL1_RESET)) {
+                       udelay(2);
+                       break;
+               }
+       }
+
+       if (ctrl & MDIO_CTRL1_RESET) {
+               status = IXGBE_ERR_RESET_FAILED;
+               hw_dbg(hw, "PHY reset polling failed to complete.\n");
+       }
+
+out:
+       return status;
 }
 
 /**
@@ -171,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        else
                gssr = IXGBE_GSSR_PHY0_SM;
 
-       if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -243,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                        }
                }
 
-               ixgbe_release_swfw_sync(hw, gssr);
+               hw->mac.ops.release_swfw_sync(hw, gssr);
        }
 
        return status;
@@ -269,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        else
                gssr = IXGBE_GSSR_PHY0_SM;
 
-       if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
                status = IXGBE_ERR_SWFW_SYNC;
 
        if (status == 0) {
@@ -336,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                        }
                }
 
-               ixgbe_release_swfw_sync(hw, gssr);
+               hw->mac.ops.release_swfw_sync(hw, gssr);
        }
 
        return status;
@@ -556,11 +608,10 @@ out:
 }
 
 /**
- *  ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns
- *                                      the PHY type.
+ *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
  *  @hw: pointer to hardware structure
  *
- *  Searches for and indentifies the SFP module.  Assings appropriate PHY type.
+ *  Searches for and identifies the SFP module and assigns appropriate PHY type.
  **/
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 {
@@ -581,41 +632,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                goto out;
        }
 
-       status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_IDENTIFIER,
                                             &identifier);
 
-       if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
-               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               if (hw->phy.type != ixgbe_phy_nl) {
-                       hw->phy.id = 0;
-                       hw->phy.type = ixgbe_phy_unknown;
-               }
-               goto out;
-       }
+       if (status == IXGBE_ERR_SWFW_SYNC ||
+           status == IXGBE_ERR_I2C ||
+           status == IXGBE_ERR_SFP_NOT_PRESENT)
+               goto err_read_i2c_eeprom;
 
-       if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
-                                           &comp_codes_1g);
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
-                                           &comp_codes_10g);
-               hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
-                                           &cable_tech);
-
-               /* ID Module
-                * =========
-                * 0    SFP_DA_CU
-                * 1    SFP_SR
-                * 2    SFP_LR
-                * 3    SFP_DA_CORE0 - 82599-specific
-                * 4    SFP_DA_CORE1 - 82599-specific
-                * 5    SFP_SR/LR_CORE0 - 82599-specific
-                * 6    SFP_SR/LR_CORE1 - 82599-specific
-                * 7    SFP_act_lmt_DA_CORE0 - 82599-specific
-                * 8    SFP_act_lmt_DA_CORE1 - 82599-specific
-                * 9    SFP_1g_cu_CORE0 - 82599-specific
-                * 10   SFP_1g_cu_CORE1 - 82599-specific
-                */
+       /* LAN ID is needed for sfp_type determination */
+       hw->mac.ops.set_lan_id(hw);
+
+       if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+       } else {
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_1GBE_COMP_CODES,
+                                                    &comp_codes_1g);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_10GBE_COMP_CODES,
+                                                    &comp_codes_10g);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_CABLE_TECHNOLOGY,
+                                                    &cable_tech);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+
+                /* ID Module
+                 * =========
+                 * 0   SFP_DA_CU
+                 * 1   SFP_SR
+                 * 2   SFP_LR
+                 * 3   SFP_DA_CORE0 - 82599-specific
+                 * 4   SFP_DA_CORE1 - 82599-specific
+                 * 5   SFP_SR/LR_CORE0 - 82599-specific
+                 * 6   SFP_SR/LR_CORE1 - 82599-specific
+                 * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
+                 * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
+                 * 9   SFP_1g_cu_CORE0 - 82599-specific
+                 * 10  SFP_1g_cu_CORE1 - 82599-specific
+                 */
                if (hw->mac.type == ixgbe_mac_82598EB) {
                        if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
                                hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -647,31 +719,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                                ixgbe_sfp_type_da_act_lmt_core1;
                                } else {
                                        hw->phy.sfp_type =
-                                               ixgbe_sfp_type_unknown;
+                                                       ixgbe_sfp_type_unknown;
                                }
-                       } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+                       } else if (comp_codes_10g &
+                                  (IXGBE_SFF_10GBASESR_CAPABLE |
+                                   IXGBE_SFF_10GBASELR_CAPABLE)) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
                                                      ixgbe_sfp_type_srlr_core0;
                                else
                                        hw->phy.sfp_type =
                                                      ixgbe_sfp_type_srlr_core1;
-                       else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+                       } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
                                                ixgbe_sfp_type_1g_cu_core0;
                                else
                                        hw->phy.sfp_type =
                                                ixgbe_sfp_type_1g_cu_core1;
-                       else
+                       } else {
                                hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+                       }
                }
 
                if (hw->phy.sfp_type != stored_sfp_type)
@@ -688,16 +756,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                /* Determine PHY vendor */
                if (hw->phy.type != ixgbe_phy_nl) {
                        hw->phy.id = identifier;
-                       hw->phy.ops.read_i2c_eeprom(hw,
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE0,
                                                    &oui_bytes[0]);
-                       hw->phy.ops.read_i2c_eeprom(hw,
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE1,
                                                    &oui_bytes[1]);
-                       hw->phy.ops.read_i2c_eeprom(hw,
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
                                                    IXGBE_SFF_VENDOR_OUI_BYTE2,
                                                    &oui_bytes[2]);
 
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
                        vendor_oui =
                          ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
                           (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -707,7 +792,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        case IXGBE_SFF_VENDOR_OUI_TYCO:
                                if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
                                        hw->phy.type =
-                                               ixgbe_phy_sfp_passive_tyco;
+                                                   ixgbe_phy_sfp_passive_tyco;
                                break;
                        case IXGBE_SFF_VENDOR_OUI_FTL:
                                if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -724,7 +809,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        default:
                                if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
                                        hw->phy.type =
-                                               ixgbe_phy_sfp_passive_unknown;
+                                                ixgbe_phy_sfp_passive_unknown;
                                else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
                                        hw->phy.type =
                                                ixgbe_phy_sfp_active_unknown;
@@ -734,7 +819,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        }
                }
 
-               /* All passive DA cables are supported */
+               /* Allow any DA cable vendor */
                if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
                    IXGBE_SFF_DA_ACTIVE_CABLE)) {
                        status = 0;
@@ -756,7 +841,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                        goto out;
                }
 
-               /* This is guaranteed to be 82599, no need to check for NULL */
                hw->mac.ops.get_device_caps(hw, &enforce_sfp);
                if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
                    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -776,15 +860,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
 out:
        return status;
+
+err_read_i2c_eeprom:
+       hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+       if (hw->phy.type != ixgbe_phy_nl) {
+               hw->phy.id = 0;
+               hw->phy.type = ixgbe_phy_unknown;
+       }
+       return IXGBE_ERR_SFP_NOT_PRESENT;
 }
 
 /**
- *  ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see
- *  if it supports a given SFP+ module type, if so it returns the offsets to the
- *  phy init sequence block.
+ *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
  *  @hw: pointer to hardware structure
  *  @list_offset: offset to the SFP ID list
  *  @data_offset: offset to the SFP data block
+ *
+ *  Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ *  so it returns the offsets to the phy init sequence block.
  **/
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
                                         u16 *list_offset,
@@ -899,11 +992,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                 u8 dev_addr, u8 *data)
 {
        s32 status = 0;
-       u32 max_retry = 1;
+       u32 max_retry = 10;
        u32 retry = 0;
+       u16 swfw_mask = 0;
        bool nack = 1;
 
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               swfw_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               swfw_mask = IXGBE_GSSR_PHY0_SM;
+
        do {
+               if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+                       status = IXGBE_ERR_SWFW_SYNC;
+                       goto read_byte_out;
+               }
+
                ixgbe_i2c_start(hw);
 
                /* Device Address and write indication */
@@ -946,6 +1050,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                break;
 
 fail:
+               ixgbe_release_swfw_sync(hw, swfw_mask);
+               msleep(100);
                ixgbe_i2c_bus_clear(hw);
                retry++;
                if (retry < max_retry)
@@ -955,6 +1061,9 @@ fail:
 
        } while (retry < max_retry);
 
+       ixgbe_release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
        return status;
 }
 
@@ -973,6 +1082,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        s32 status = 0;
        u32 max_retry = 1;
        u32 retry = 0;
+       u16 swfw_mask = 0;
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               swfw_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+       if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+               status = IXGBE_ERR_SWFW_SYNC;
+               goto write_byte_out;
+       }
 
        do {
                ixgbe_i2c_start(hw);
@@ -1013,6 +1133,9 @@ fail:
                        hw_dbg(hw, "I2C byte write error.\n");
        } while (retry < max_retry);
 
+       ixgbe_release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
        return status;
 }
 
@@ -1331,6 +1454,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
        u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        u32 i;
 
+       ixgbe_i2c_start(hw);
+
        ixgbe_set_i2c_data(hw, &i2cctl, 1);
 
        for (i = 0; i < 9; i++) {
@@ -1345,6 +1470,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
                udelay(IXGBE_I2C_T_LOW);
        }
 
+       ixgbe_i2c_start(hw);
+
        /* Put the i2c bus back to default state */
        ixgbe_i2c_stop(hw);
 }
index e2c6b7eac641d069e45fb3fdc6ea4aad58d88e87..9bf2783d7a740045dcb5643f53cf29b89bfafc60 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
 
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE                  0x400
+#define IXGBE_TAF_ASM_PAUSE                  0x800
+
 /* Bit-shift macros */
 #define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    24
 #define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    16
index 187b3a16ec1ff6327689d1ca93e15b814336ac6a..58c9b45989ff38c4477ed948aedcc7632578fb03 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -110,6 +110,33 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
        return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
+void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int new_mtu = msgbuf[1];
+       u32 max_frs;
+       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+       /* Only X540 supports jumbo frames in IOV mode */
+       if (adapter->hw.mac.type != ixgbe_mac_X540)
+               return;
+
+       /* MTU < 68 is an error and causes problems on some kernels */
+       if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
+               e_err(drv, "VF mtu %d out of range\n", new_mtu);
+               return;
+       }
+
+       max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+                  IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+       if (max_frs < new_mtu) {
+               max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+       }
+
+       e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+}
+
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
@@ -302,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
                                                 hash_list, vf);
                break;
        case IXGBE_VF_SET_LPE:
-               WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+               ixgbe_set_vf_lpe(adapter, msgbuf);
                break;
        case IXGBE_VF_SET_VLAN:
                add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
index 49dc14debef7b82bff70a07d4c9416a56c3ab655..e7dd029d576ae4d6af1370f9aef0b3743a38b088 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index fd3358f54139e1a9f1eaad1a6a6588009b97dda4..f190a4a8faf4a58d4cf020af20dbb5764d1ec2ac 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -91,7 +91,7 @@
 
 /* General Receive Control */
 #define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
 
 #define IXGBE_VPDDIAG0  0x10204
 #define IXGBE_VPDDIAG1  0x10208
 /* Wake Up Control */
 #define IXGBE_WUC_PME_EN     0x00000002 /* PME Enable */
 #define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_ADVD3WUC   0x00000010 /* D3Cold wake up cap. enable*/
+#define IXGBE_WUC_WKEN       0x00000010 /* Enable PE_WAKE_N pin assertion  */
 
 /* Wake Up Filter Control */
 #define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
 #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
 #define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
 #define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */
 #define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */
 #define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_PCRC8ECL  0x0E810
+#define IXGBE_PCRC8ECH  0x0E811
+#define IXGBE_PCRC8ECH_MASK     0x1F
+#define IXGBE_LDPCECL   0x0E820
+#define IXGBE_LDPCECH   0x0E821
 
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
 
 /* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS   0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
 #define IXGBE_PCI_LINK_STATUS     0xB2
 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8
 #define IXGBE_PCI_LINK_WIDTH      0x3F0
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN   0x00008000
 
 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
 #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -2240,6 +2251,7 @@ enum ixgbe_mac_type {
 
 enum ixgbe_phy_type {
        ixgbe_phy_unknown = 0,
+       ixgbe_phy_none,
        ixgbe_phy_tn,
        ixgbe_phy_aq,
        ixgbe_phy_cu_unknown,
@@ -2328,32 +2340,31 @@ enum ixgbe_bus_type {
 /* PCI bus speeds */
 enum ixgbe_bus_speed {
        ixgbe_bus_speed_unknown = 0,
-       ixgbe_bus_speed_33,
-       ixgbe_bus_speed_66,
-       ixgbe_bus_speed_100,
-       ixgbe_bus_speed_120,
-       ixgbe_bus_speed_133,
-       ixgbe_bus_speed_2500,
-       ixgbe_bus_speed_5000,
+       ixgbe_bus_speed_33      = 33,
+       ixgbe_bus_speed_66      = 66,
+       ixgbe_bus_speed_100     = 100,
+       ixgbe_bus_speed_120     = 120,
+       ixgbe_bus_speed_133     = 133,
+       ixgbe_bus_speed_2500    = 2500,
+       ixgbe_bus_speed_5000    = 5000,
        ixgbe_bus_speed_reserved
 };
 
 /* PCI bus widths */
 enum ixgbe_bus_width {
        ixgbe_bus_width_unknown = 0,
-       ixgbe_bus_width_pcie_x1,
-       ixgbe_bus_width_pcie_x2,
+       ixgbe_bus_width_pcie_x1 = 1,
+       ixgbe_bus_width_pcie_x2 = 2,
        ixgbe_bus_width_pcie_x4 = 4,
        ixgbe_bus_width_pcie_x8 = 8,
-       ixgbe_bus_width_32,
-       ixgbe_bus_width_64,
+       ixgbe_bus_width_32      = 32,
+       ixgbe_bus_width_64      = 64,
        ixgbe_bus_width_reserved
 };
 
 struct ixgbe_addr_filter_info {
        u32 num_mc_addrs;
        u32 rar_used_count;
-       u32 mc_addr_in_rar_count;
        u32 mta_in_use;
        u32 overflow_promisc;
        bool uc_set_promisc;
@@ -2491,6 +2502,8 @@ struct ixgbe_mac_operations {
        s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
        s32 (*setup_sfp)(struct ixgbe_hw *);
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+       s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+       void (*release_swfw_sync)(struct ixgbe_hw *, u16);
 
        /* Link */
        void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2513,7 +2526,6 @@ struct ixgbe_mac_operations {
        s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
-       s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
        s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
@@ -2554,6 +2566,7 @@ struct ixgbe_eeprom_info {
        u16                             address_bits;
 };
 
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED      0x01
 struct ixgbe_mac_info {
        struct ixgbe_mac_operations     ops;
        enum ixgbe_mac_type             type;
@@ -2564,6 +2577,8 @@ struct ixgbe_mac_info {
        u16                             wwnn_prefix;
        /* prefix for World Wide Port Name (WWPN) */
        u16                             wwpn_prefix;
+#define IXGBE_MAX_MTA                  128
+       u32                             mta_shadow[IXGBE_MAX_MTA];
        s32                             mc_filter_type;
        u32                             mcft_size;
        u32                             vft_size;
@@ -2576,6 +2591,7 @@ struct ixgbe_mac_info {
        u32                             orig_autoc2;
        bool                            orig_link_settings_stored;
        bool                            autotry_restart;
+       u8                              flags;
 };
 
 struct ixgbe_phy_info {
@@ -2682,7 +2698,9 @@ struct ixgbe_info {
 #define IXGBE_ERR_EEPROM_VERSION                -24
 #define IXGBE_ERR_NO_SPACE                      -25
 #define IXGBE_ERR_OVERTEMP                      -26
-#define IXGBE_ERR_RAR_INDEX                     -27
+#define IXGBE_ERR_FC_NOT_NEGOTIATED             -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED              -28
+#define IXGBE_ERR_FLOW_CONTROL                  -29
 #define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
 #define IXGBE_ERR_PBA_SECTION                   -31
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
index f2518b01067d166f0971212f3c0f621ec1289de5..f47e93fe32bef9572e9efc53a1eb02f7d02a45ae 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
 
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
-//#include "ixgbe_mbx.h"
 
 #define IXGBE_X540_MAX_TX_QUEUES 128
 #define IXGBE_X540_MAX_RX_QUEUES 128
@@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
         * access and verify no pending requests before reset
         */
-       status = ixgbe_disable_pcie_master(hw);
-       if (status != 0) {
-               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
-       }
+       ixgbe_disable_pcie_master(hw);
 
+mac_reset_top:
        /*
         * Issue global reset to the MAC.  Needs to be SW reset if link is up.
         * If link reset is used when link is up, it might reset the PHY when
@@ -148,6 +144,19 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
                hw_dbg(hw, "Reset polling failed to complete.\n");
        }
 
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.  We use 1usec since that is
+        * what is needed for ixgbe_disable_pcie_master().  The second reset
+        * then clears out any effects of those events.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               udelay(1);
+               goto mac_reset_top;
+       }
+
        /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
        ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
        ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
         * clear the multicast table.  Also reset num_rar_entries to 128,
         * since we modify this value when programming the SAN MAC address.
         */
-       hw->mac.num_rar_entries = 128;
+       hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
        hw->mac.ops.init_rx_addrs(hw);
 
        /* Store the permanent mac address */
@@ -242,8 +251,11 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
- * @hw: pointer to hardware structure
+ *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
  **/
 static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
 {
@@ -262,7 +274,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
                                          IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
                hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
-                       eeprom->type, eeprom->word_size);
+                      eeprom->type, eeprom->word_size);
        }
 
        return 0;
@@ -278,7 +290,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
        s32 status;
 
-       if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
                status = ixgbe_read_eerd_generic(hw, offset, data);
        else
                status = IXGBE_ERR_SWFW_SYNC;
@@ -311,7 +323,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
               (data << IXGBE_EEPROM_RW_REG_DATA) |
               IXGBE_EEPROM_RW_REG_START;
 
-       if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) {
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
                if (status != 0) {
                        hw_dbg(hw, "Eeprom write EEWR timed out\n");
@@ -676,7 +688,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .set_vmdq               = &ixgbe_set_vmdq_generic,
        .clear_vmdq             = &ixgbe_clear_vmdq_generic,
        .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
-       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
        .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
        .enable_mc              = &ixgbe_enable_mc_generic,
        .disable_mc             = &ixgbe_disable_mc_generic,
@@ -687,6 +698,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .setup_sfp              = NULL,
        .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
        .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+       .acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540,
+       .release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
@@ -702,7 +715,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
        .identify               = &ixgbe_identify_phy_generic,
        .identify_sfp           = &ixgbe_identify_sfp_module_generic,
        .init                   = NULL,
-       .reset                  = &ixgbe_reset_phy_generic,
+       .reset                  = NULL,
        .read_reg               = &ixgbe_read_phy_reg_generic,
        .write_reg              = &ixgbe_write_phy_reg_generic,
        .setup_link             = &ixgbe_setup_phy_link_generic,
index de643eb2ada657bf0de8e923c3bf9ce658ad052f..78abb6f1a866d1190b7894b12c411e20c1128005 100644 (file)
@@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
 #define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN   0x00008000
 
 /* DCA Control */
 #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
index 464e6c9d3fc21a46c75b91b324617bec02e17772..82768812552de98a464cfa8eb4ef5a0b41ac72bb 100644 (file)
@@ -51,7 +51,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
        "Intel(R) 82599 Virtual Function";
 
-#define DRV_VERSION "1.0.19-k0"
+#define DRV_VERSION "1.1.0-k0"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
 }
 
 /*
- * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
  * @adapter: pointer to adapter struct
  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  * @queue: queue to map the corresponding interrupt to
@@ -178,8 +178,6 @@ static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
            tx_ring->tx_buffer_info[eop].time_stamp &&
            time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
                /* detected Tx unit hang */
-               union ixgbe_adv_tx_desc *tx_desc;
-               tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
                printk(KERN_ERR "Detected Tx Unit Hang\n"
                       "  Tx Queue             <%d>\n"
                       "  TDH, TDT             <%x>, <%x>\n"
@@ -334,7 +332,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-       int ret;
 
        if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
                if (adapter->vlgrp && is_vlan)
@@ -345,9 +342,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
                        napi_gro_receive(&q_vector->napi, skb);
        } else {
                if (adapter->vlgrp && is_vlan)
-                       ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+                       vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
                else
-                       ret = netif_rx(skb);
+                       netif_rx(skb);
        }
 }
 
@@ -1017,7 +1014,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
 }
 
 /**
- * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
  * @irq: unused
  * @data: pointer to our q_vector struct for this interrupt vector
  **/
@@ -1665,6 +1662,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
                j = adapter->rx_ring[i].reg_idx;
                rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
                rxdctl |= IXGBE_RXDCTL_ENABLE;
+               if (hw->mac.type == ixgbe_mac_X540_vf) {
+                       rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+                       rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
+                                  IXGBE_RXDCTL_RLPML_EN);
+               }
                IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
                ixgbevf_rx_desc_queue_enable(adapter, i);
        }
@@ -1967,7 +1969,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
 }
 
 /*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
  *
  * This is the top level queue allocation routine.  The order here is very
@@ -2216,7 +2218,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
 
        hw->vendor_id = pdev->vendor;
        hw->device_id = pdev->device;
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+       hw->revision_id = pdev->revision;
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
        hw->subsystem_device_id = pdev->subsystem_device;
 
@@ -3217,10 +3219,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+       u32 msg[2];
+
+       if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+               max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
 
        /* MTU < 68 is an error and causes problems on some kernels */
-       if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+       if ((new_mtu < 68) || (max_frame > max_possible_frame))
                return -EINVAL;
 
        hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3228,6 +3236,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       msg[0] = IXGBE_VF_SET_LPE;
+       msg[1] = max_frame;
+       hw->mbx.ops.write_posted(hw, msg, 2);
+
        if (netif_running(netdev))
                ixgbevf_reinit_locked(adapter);
 
@@ -3272,8 +3284,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
 {
-       struct ixgbevf_adapter *adapter;
-       adapter = netdev_priv(dev);
        dev->netdev_ops = &ixgbe_netdev_ops;
        ixgbevf_set_ethtool_ops(dev);
        dev->watchdog_timeo = 5 * HZ;
@@ -3519,9 +3529,9 @@ static struct pci_driver ixgbevf_driver = {
 };
 
 /**
- * ixgbe_init_module - Driver Registration Routine
+ * ixgbevf_init_module - Driver Registration Routine
  *
- * ixgbe_init_module is the first routine called when the driver is
+ * ixgbevf_init_module is the first routine called when the driver is
  * loaded. All it does is register with the PCI subsystem.
  **/
 static int __init ixgbevf_init_module(void)
@@ -3539,9 +3549,9 @@ static int __init ixgbevf_init_module(void)
 module_init(ixgbevf_init_module);
 
 /**
- * ixgbe_exit_module - Driver Exit Cleanup Routine
+ * ixgbevf_exit_module - Driver Exit Cleanup Routine
  *
- * ixgbe_exit_module is called just before the driver is removed
+ * ixgbevf_exit_module is called just before the driver is removed
  * from memory.
  **/
 static void __exit ixgbevf_exit_module(void)
@@ -3551,7 +3561,7 @@ static void __exit ixgbevf_exit_module(void)
 
 #ifdef DEBUG
 /**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbevf_get_hw_dev_name - return device name string
  * used by hardware layer to print debugging information
  **/
 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
index e97ebef3cf47c140a5fb5925d3e47bacbe262aad..f690474f44092ae1cc1872053bf28bdd92669278 100644 (file)
@@ -160,6 +160,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
        }
 }
 
+static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+       jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+       jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+       jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+       jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+       jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+       u32 speed = jme->reg_ghc & GHC_SPEED;
+       if (speed == GHC_SPEED_1000M)
+               jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+       else
+               jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+       jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+       jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+                            GPREG1_RSSPATCH);
+       jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+       jme->reg_ghc |= GHC_SWRST;
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+       jme->reg_ghc &= ~GHC_SWRST;
+       jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
 static inline void
 jme_reset_mac_processor(struct jme_adapter *jme)
 {
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
        u32 gpreg0;
        int i;
 
-       jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
-       udelay(2);
-       jwrite32(jme, JME_GHC, jme->reg_ghc);
+       jme_reset_ghc_speed(jme);
+       jme_reset_250A2_workaround(jme);
+
+       jme_mac_rxclk_on(jme);
+       jme_mac_txclk_on(jme);
+       udelay(1);
+       jme_assert_ghc_reset(jme);
+       udelay(1);
+       jme_mac_rxclk_off(jme);
+       jme_mac_txclk_off(jme);
+       udelay(1);
+       jme_clear_ghc_reset(jme);
+       udelay(1);
+       jme_mac_rxclk_on(jme);
+       jme_mac_txclk_on(jme);
+       udelay(1);
+       jme_mac_rxclk_off(jme);
+       jme_mac_txclk_off(jme);
 
        jwrite32(jme, JME_RXDBA_LO, 0x00000000);
        jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
        else
                gpreg0 = GPREG0_DEFAULT;
        jwrite32(jme, JME_GPREG0, gpreg0);
-       jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
-}
-
-static inline void
-jme_reset_ghc_speed(struct jme_adapter *jme)
-{
-       jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
-       jwrite32(jme, JME_GHC, jme->reg_ghc);
 }
 
 static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
 }
 
 static inline void
-jme_set_phyfifoa(struct jme_adapter *jme)
+jme_set_phyfifo_5level(struct jme_adapter *jme)
 {
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
 }
 
 static inline void
-jme_set_phyfifob(struct jme_adapter *jme)
+jme_set_phyfifo_8level(struct jme_adapter *jme)
 {
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
 }
@@ -351,7 +419,7 @@ static int
 jme_check_link(struct net_device *netdev, int testonly)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
+       u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
        char linkmsg[64];
        int rc = 0;
 
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
 
                jme->phylink = phylink;
 
-               ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
-                               GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
-                               GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
+               /*
+                * The speed/duplex setting of jme->reg_ghc already cleared
+                * by jme_reset_mac_processor()
+                */
                switch (phylink & PHY_LINK_SPEED_MASK) {
                case PHY_LINK_SPEED_10M:
-                       ghc |= GHC_SPEED_10M |
-                               GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+                       jme->reg_ghc |= GHC_SPEED_10M;
                        strcat(linkmsg, "10 Mbps, ");
                        break;
                case PHY_LINK_SPEED_100M:
-                       ghc |= GHC_SPEED_100M |
-                               GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+                       jme->reg_ghc |= GHC_SPEED_100M;
                        strcat(linkmsg, "100 Mbps, ");
                        break;
                case PHY_LINK_SPEED_1000M:
-                       ghc |= GHC_SPEED_1000M |
-                               GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+                       jme->reg_ghc |= GHC_SPEED_1000M;
                        strcat(linkmsg, "1000 Mbps, ");
                        break;
                default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
 
                if (phylink & PHY_LINK_DUPLEX) {
                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
-                       ghc |= GHC_DPX;
+                       jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+                       jme->reg_ghc |= GHC_DPX;
                } else {
                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
                                                TXMCS_BACKOFF |
                                                TXMCS_CARRIERSENSE |
                                                TXMCS_COLLISION);
-                       jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
-                               ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
-                               TXTRHD_TXREN |
-                               ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+                       jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
                }
 
-               gpreg1 = GPREG1_DEFAULT;
+               jwrite32(jme, JME_GHC, jme->reg_ghc);
+
                if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+                       jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+                                            GPREG1_RSSPATCH);
                        if (!(phylink & PHY_LINK_DUPLEX))
-                               gpreg1 |= GPREG1_HALFMODEPATCH;
+                               jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
                        switch (phylink & PHY_LINK_SPEED_MASK) {
                        case PHY_LINK_SPEED_10M:
-                               jme_set_phyfifoa(jme);
-                               gpreg1 |= GPREG1_RSSPATCH;
+                               jme_set_phyfifo_8level(jme);
+                               jme->reg_gpreg1 |= GPREG1_RSSPATCH;
                                break;
                        case PHY_LINK_SPEED_100M:
-                               jme_set_phyfifob(jme);
-                               gpreg1 |= GPREG1_RSSPATCH;
+                               jme_set_phyfifo_5level(jme);
+                               jme->reg_gpreg1 |= GPREG1_RSSPATCH;
                                break;
                        case PHY_LINK_SPEED_1000M:
-                               jme_set_phyfifoa(jme);
+                               jme_set_phyfifo_8level(jme);
                                break;
                        default:
                                break;
                        }
                }
-
-               jwrite32(jme, JME_GPREG1, gpreg1);
-               jwrite32(jme, JME_GHC, ghc);
-               jme->reg_ghc = ghc;
+               jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 
                strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
                                        "Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
         * Enable TX Engine
         */
        wmb();
-       jwrite32(jme, JME_TXCS, jme->reg_txcs |
+       jwrite32f(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
                                TXCS_ENABLE);
 
+       /*
+        * Start clock for TX MAC Processor
+        */
+       jme_mac_txclk_on(jme);
 }
 
 static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
 
        if (!i)
                pr_err("Disable TX engine timeout\n");
+
+       /*
+        * Stop clock for TX MAC Processor
+        */
+       jme_mac_txclk_off(jme);
 }
 
 static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
        /*
         * Setup Unicast Filter
         */
+       jme_set_unicastaddr(jme->dev);
        jme_set_multi(jme->dev);
 
        /*
         * Enable RX Engine
         */
        wmb();
-       jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+       jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
                                RXCS_QUEUESEL_Q0 |
                                RXCS_ENABLE |
                                RXCS_QST);
+
+       /*
+        * Start clock for RX MAC Processor
+        */
+       jme_mac_rxclk_on(jme);
 }
 
 static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
        if (!i)
                pr_err("Disable RX engine timeout\n");
 
+       /*
+        * Stop clock for RX MAC Processor
+        */
+       jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+       u16 csum = 0xFFFFu;
+
+       if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+               return csum;
+       if (skb->protocol != htons(ETH_P_IP))
+               return csum;
+       skb_set_network_header(skb, ETH_HLEN);
+       if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+           (skb->len < (ETH_HLEN +
+                       (ip_hdr(skb)->ihl << 2) +
+                       sizeof(struct udphdr)))) {
+               skb_reset_network_header(skb);
+               return csum;
+       }
+       skb_set_transport_header(skb,
+                       ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+       csum = udp_hdr(skb)->check;
+       skb_reset_transport_header(skb);
+       skb_reset_network_header(skb);
+
+       return csum;
 }
 
 static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
 {
        if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
                return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
        }
 
        if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
-                       == RXWBFLAG_UDPON)) {
+                       == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
                if (flags & RXWBFLAG_IPV4)
                        netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
                return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
                skb_put(skb, framesize);
                skb->protocol = eth_type_trans(skb, jme->dev);
 
-               if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+               if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
                        skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
        tasklet_disable(&jme->rxempty_task);
 
        if (netif_carrier_ok(netdev)) {
-               jme_reset_ghc_speed(jme);
                jme_disable_rx_engine(jme);
                jme_disable_tx_engine(jme);
                jme_reset_mac_processor(jme);
@@ -1576,6 +1684,38 @@ jme_free_irq(struct jme_adapter *jme)
        }
 }
 
+static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+       u32 reg;
+
+       reg = jread32(jme, JME_PHY_PWR);
+       reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+                PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+       jwrite32(jme, JME_PHY_PWR, reg);
+
+       pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+       reg &= ~PE1_GPREG0_PBG;
+       reg |= PE1_GPREG0_ENBG;
+       pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+       u32 reg;
+
+       reg = jread32(jme, JME_PHY_PWR);
+       reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+              PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+       jwrite32(jme, JME_PHY_PWR, reg);
+
+       pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+       reg &= ~PE1_GPREG0_PBG;
+       reg |= PE1_GPREG0_PDD3COLD;
+       pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
 static inline void
 jme_phy_on(struct jme_adapter *jme)
 {
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
        bmcr &= ~BMCR_PDOWN;
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+       if (new_phy_power_ctrl(jme->chip_main_rev))
+               jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+       u32 bmcr;
+
+       bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+       bmcr |= BMCR_PDOWN;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+       if (new_phy_power_ctrl(jme->chip_main_rev))
+               jme_new_phy_off(jme);
 }
 
 static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
 
        jme_start_irq(jme);
 
-       if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-               jme_phy_on(jme);
+       jme_phy_on(jme);
+       if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
-       } else {
+       else
                jme_reset_phy_processor(jme);
-       }
 
        jme_reset_link(jme);
 
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
        }
 }
 
-static inline void
-jme_phy_off(struct jme_adapter *jme)
-{
-       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
-}
-
 static void
 jme_powersave_phy(struct jme_adapter *jme)
 {
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
        tasklet_disable(&jme->rxclean_task);
        tasklet_disable(&jme->rxempty_task);
 
-       jme_reset_ghc_speed(jme);
        jme_disable_rx_engine(jme);
        jme_disable_tx_engine(jme);
        jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        return NETDEV_TX_OK;
 }
 
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+       u32 val;
+
+       val = (netdev->dev_addr[3] & 0xff) << 24 |
+             (netdev->dev_addr[2] & 0xff) << 16 |
+             (netdev->dev_addr[1] & 0xff) <<  8 |
+             (netdev->dev_addr[0] & 0xff);
+       jwrite32(jme, JME_RXUMA_LO, val);
+       val = (netdev->dev_addr[5] & 0xff) << 8 |
+             (netdev->dev_addr[4] & 0xff);
+       jwrite32(jme, JME_RXUMA_HI, val);
+}
+
 static int
 jme_set_macaddr(struct net_device *netdev, void *p)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        struct sockaddr *addr = p;
-       u32 val;
 
        if (netif_running(netdev))
                return -EBUSY;
 
        spin_lock_bh(&jme->macaddr_lock);
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
-       val = (addr->sa_data[3] & 0xff) << 24 |
-             (addr->sa_data[2] & 0xff) << 16 |
-             (addr->sa_data[1] & 0xff) <<  8 |
-             (addr->sa_data[0] & 0xff);
-       jwrite32(jme, JME_RXUMA_LO, val);
-       val = (addr->sa_data[5] & 0xff) << 8 |
-             (addr->sa_data[4] & 0xff);
-       jwrite32(jme, JME_RXUMA_HI, val);
+       jme_set_unicastaddr(netdev);
        spin_unlock_bh(&jme->macaddr_lock);
 
        return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
 
        jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
        jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+       jme->chip_main_rev = jme->chiprev & 0xF;
+       jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
 }
 
 static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
        jme->reg_rxmcs = RXMCS_DEFAULT;
        jme->reg_txpfc = 0;
        jme->reg_pmcs = PMCS_MFEN;
+       jme->reg_gpreg1 = GPREG1_DEFAULT;
        set_bit(JME_FLAG_TXCSUM, &jme->flags);
        set_bit(JME_FLAG_TSO, &jme->flags);
 
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
        jme->mii_if.mdio_write = jme_mdio_write;
 
        jme_clear_pm(jme);
-       jme_set_phyfifoa(jme);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+       jme_set_phyfifo_5level(jme);
+       jme->pcirev = pdev->revision;
        if (!jme->fpgaver)
                jme_phy_init(jme);
        jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
                goto err_out_unmap;
        }
 
-       netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+       netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
                   "JMC250 Gigabit Ethernet" :
                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
                   "JMC260 Fast Ethernet" : "Unknown",
                   (jme->fpgaver != 0) ? " (FPGA)" : "",
                   (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
-                  jme->rev, netdev->dev_addr);
+                  jme->pcirev, netdev->dev_addr);
 
        return 0;
 
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
                        jme_polling_mode(jme);
 
                jme_stop_pcc_timer(jme);
-               jme_reset_ghc_speed(jme);
                jme_disable_rx_engine(jme);
                jme_disable_tx_engine(jme);
                jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
        jme_clear_pm(jme);
        pci_restore_state(pdev);
 
-       if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-               jme_phy_on(jme);
+       jme_phy_on(jme);
+       if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
-       } else {
+       else
                jme_reset_phy_processor(jme);
-       }
 
        jme_start_irq(jme);
        netif_device_attach(netdev);
index eac09264bf2a0da8d48247df6b8e482835a5a249..8bf30451e8217eecef52cfc2595416e7600359cb 100644 (file)
@@ -26,7 +26,7 @@
 #define __JME_H_INCLUDED__
 
 #define DRV_NAME       "jme"
-#define DRV_VERSION    "1.0.7"
+#define DRV_VERSION    "1.0.8"
 #define PFX            DRV_NAME ": "
 
 #define PCI_DEVICE_ID_JMICRON_JMC250   0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
 #define HALF_US 500    /* 500 ns */
 #define JMESPIIOCTL    SIOCDEVPRIVATE
 
+#define PCI_PRIV_PE1           0xE4
+
+enum pci_priv_pe1_bit_masks {
+       PE1_ASPMSUPRT   = 0x00000003, /*
+                                      * RW:
+                                      * Aspm_support[1:0]
+                                      * (R/W Port of 5C[11:10])
+                                      */
+       PE1_MULTIFUN    = 0x00000004, /* RW: Multi_fun_bit */
+       PE1_RDYDMA      = 0x00000008, /* RO: ~link.rdy_for_dma */
+       PE1_ASPMOPTL    = 0x00000030, /* RW: link.rx10s_option[1:0] */
+       PE1_ASPMOPTH    = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+       PE1_GPREG0      = 0x0000FF00, /*
+                                      * SRW:
+                                      * Cfg_gp_reg0
+                                      * [7:6] phy_giga BG control
+                                      * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+                                      * [4:0] Reserved
+                                      */
+       PE1_GPREG0_PBG  = 0x0000C000, /* phy_giga BG control */
+       PE1_GPREG1      = 0x00FF0000, /* RW: Cfg_gp_reg1 */
+       PE1_REVID       = 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+       PE1_GPREG0_ENBG         = 0x00000000, /* en BG */
+       PE1_GPREG0_PDD3COLD     = 0x00004000, /* giga_PD + d3cold */
+       PE1_GPREG0_PDPCIESD     = 0x00008000, /* giga_PD + pcie_shutdown */
+       PE1_GPREG0_PDPCIEIDDQ   = 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
 /*
  * Dynamic(adaptive)/Static PCC values
  */
@@ -403,6 +434,7 @@ struct jme_adapter {
        u32                     reg_rxmcs;
        u32                     reg_ghc;
        u32                     reg_pmcs;
+       u32                     reg_gpreg1;
        u32                     phylink;
        u32                     tx_ring_size;
        u32                     tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
        u32                     rx_ring_mask;
        u8                      mrrs;
        unsigned int            fpgaver;
-       unsigned int            chiprev;
-       u8                      rev;
+       u8                      chiprev;
+       u8                      chip_main_rev;
+       u8                      chip_sub_rev;
+       u8                      pcirev;
        u32                     msg_enable;
        struct ethtool_cmd      old_ecmd;
        unsigned int            old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
        JME_PMCS        = JME_MAC | 0x60, /* Power Management Control/Stat */
 
 
+       JME_PHY_PWR     = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
        JME_PHY_CS      = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
        JME_PHY_LINK    = JME_PHY | 0x30, /* PHY Link Status Register */
        JME_SMBCSR      = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
        TXTRHD_TXRL_SHIFT       = 0,
 };
 
+enum jme_txtrhd_values {
+       TXTRHD_FULLDUPLEX       = 0x00000000,
+       TXTRHD_HALFDUPLEX       = TXTRHD_TXPEN |
+                                 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+                                 TXTRHD_TXREN |
+                                 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
 /*
  * RX Control/Status Bits
  */
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
  */
 enum jme_ghc_bit_mask {
        GHC_SWRST               = 0x40000000,
+       GHC_TO_CLK_SRC          = 0x00C00000,
+       GHC_TXMAC_CLK_SRC       = 0x00300000,
        GHC_DPX                 = 0x00000040,
        GHC_SPEED               = 0x00000030,
        GHC_LINK_POLL           = 0x00000001,
@@ -832,6 +877,21 @@ enum jme_pmcs_bit_masks {
        PMCS_MFEN       = 0x00000001,
 };
 
+/*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+       PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+       PHY_PWR_DWN1SW  = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+       PHY_PWR_DWN2    = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+       PHY_PWR_CLKSEL  = 0x08000000, /*
+                                      * XTL_OUT Clock select
+                                      * (an internal free-running clock)
+                                      * 0: xtl_out = phy_giga.A_XTL25_O
+                                      * 1: xtl_out = phy_giga.PD_OSC
+                                      */
+};
+
 /*
  * Giga PHY Status Registers
  */
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
 
 /*
  * General Purpose REG-1
- * Note: All theses bits defined here are for
- *       Chip mode revision 0x11 only
  */
-enum jme_gpreg1_masks {
+enum jme_gpreg1_bit_masks {
+       GPREG1_RXCLKOFF         = 0x04000000,
+       GPREG1_PCREQN           = 0x00020000,
+       GPREG1_HALFMODEPATCH    = 0x00000040, /* For Chip revision 0x11 only */
+       GPREG1_RSSPATCH         = 0x00000020, /* For Chip revision 0x11 only */
        GPREG1_INTRDELAYUNIT    = 0x00000018,
        GPREG1_INTRDELAYENABLE  = 0x00000007,
 };
 
 enum jme_gpreg1_vals {
-       GPREG1_RSSPATCH         = 0x00000040,
-       GPREG1_HALFMODEPATCH    = 0x00000020,
-
        GPREG1_INTDLYUNIT_16NS  = 0x00000000,
        GPREG1_INTDLYUNIT_256NS = 0x00000008,
        GPREG1_INTDLYUNIT_1US   = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
        GPREG1_INTDLYEN_6U      = 0x00000006,
        GPREG1_INTDLYEN_7U      = 0x00000007,
 
-       GPREG1_DEFAULT          = 0x00000000,
+       GPREG1_DEFAULT          = GPREG1_PCREQN,
 };
 
 /*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
 /*
  * Workaround
  */
-static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+static inline int is_buggy250(unsigned short device, u8 chiprev)
 {
        return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
 }
 
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+       return chip_main_rev >= 5;
+}
+
 /*
  * Function prototypes
  */
 static int jme_set_settings(struct net_device *netdev,
                                struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
 static void jme_set_multi(struct net_device *netdev);
 
 #endif
index 2d9663a1c54d91b3e25246f671ab35c51b606921..ea0dc451da9c874716c689a051cb967082c8bf6b 100644 (file)
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
 
 static const struct ethtool_ops loopback_ethtool_ops = {
        .get_link               = always_on,
-       .set_tso                = ethtool_op_set_tso,
-       .get_tx_csum            = always_on,
-       .get_sg                 = always_on,
-       .get_rx_csum            = always_on,
 };
 
 static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
        dev->priv_flags        &= ~IFF_XMIT_DST_RELEASE;
+       dev->hw_features        = NETIF_F_ALL_TSO | NETIF_F_UFO;
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
-               | NETIF_F_TSO
+               | NETIF_F_ALL_TSO
+               | NETIF_F_UFO
                | NETIF_F_NO_CSUM
+               | NETIF_F_RXCSUM
                | NETIF_F_HIGHDMA
                | NETIF_F_LLTX
                | NETIF_F_NETNS_LOCAL;
index fc27a9926d9e52c28a63d6be8852518e5cebb4da..6696e56e63206024c7f529331f7088c72406982d 100644 (file)
@@ -39,7 +39,7 @@ struct macvtap_queue {
        struct socket sock;
        struct socket_wq wq;
        int vnet_hdr_sz;
-       struct macvlan_dev *vlan;
+       struct macvlan_dev __rcu *vlan;
        struct file *file;
        unsigned int flags;
 };
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
        struct macvlan_dev *vlan;
 
        spin_lock(&macvtap_lock);
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_protected(q->vlan,
+                                        lockdep_is_held(&macvtap_lock));
        if (vlan) {
                int index = get_slot(vlan, q);
 
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
        /* macvtap_put_queue can free some slots, so go through all slots */
        spin_lock(&macvtap_lock);
        for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
-               q = rcu_dereference(vlan->taps[i]);
+               q = rcu_dereference_protected(vlan->taps[i],
+                                             lockdep_is_held(&macvtap_lock));
                if (q) {
                        qlist[j++] = q;
                        rcu_assign_pointer(vlan->taps[i], NULL);
@@ -570,7 +572,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
        }
 
        rcu_read_lock_bh();
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
                macvlan_start_xmit(skb, vlan->dev);
        else
@@ -584,7 +586,7 @@ err_kfree:
 
 err:
        rcu_read_lock_bh();
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
                vlan->dev->stats.tx_dropped++;
        rcu_read_unlock_bh();
@@ -632,7 +634,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
        ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
 
        rcu_read_lock_bh();
-       vlan = rcu_dereference(q->vlan);
+       vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
                macvlan_count_rx(vlan, len, ret == 0, 0);
        rcu_read_unlock_bh();
@@ -728,7 +730,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
 
        case TUNGETIFF:
                rcu_read_lock_bh();
-               vlan = rcu_dereference(q->vlan);
+               vlan = rcu_dereference_bh(q->vlan);
                if (vlan)
                        dev_hold(vlan->dev);
                rcu_read_unlock_bh();
@@ -737,7 +739,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                        return -ENOLINK;
 
                ret = 0;
-               if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+               if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
                    put_user(q->flags, &ifr->ifr_flags))
                        ret = -EFAULT;
                dev_put(vlan->dev);
index 210b2b164b303b9283e11c5d7802571f108ef29c..0a6c6a2e7550f17235827811be88140ba891a0f6 100644 (file)
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
        if (!new_carrier) {
                netif_carrier_off(mii->dev);
                if (ok_to_print)
-                       printk(KERN_INFO "%s: link down\n", mii->dev->name);
+                       netdev_info(mii->dev, "link down\n");
                return 0; /* duplex did not change */
        }
 
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
                duplex = 1;
 
        if (ok_to_print)
-               printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
-                      mii->dev->name,
-                      lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" :
-                      media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
-                      duplex ? "full" : "half",
-                      lpa);
+               netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
+                           lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+                           media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
+                           100 : 10,
+                           duplex ? "full" : "half",
+                           lpa);
 
        if ((init_media) || (mii->full_duplex != duplex)) {
                mii->full_duplex = duplex;
index 02076e16542a30b2a4e98e5debd667c53eea77e3..34425b94452f33837758c1852fd4c27a3ebddc10 100644 (file)
@@ -35,6 +35,8 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/dma-mapping.h>
 #include <linux/in.h>
@@ -627,9 +629,8 @@ err:
                if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
                        (RX_FIRST_DESC | RX_LAST_DESC)) {
                        if (net_ratelimit())
-                               dev_printk(KERN_ERR, &mp->dev->dev,
-                                          "received packet spanning "
-                                          "multiple descriptors\n");
+                               netdev_err(mp->dev,
+                                          "received packet spanning multiple descriptors\n");
                }
 
                if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
                txq->tx_dropped++;
-               dev_printk(KERN_DEBUG, &dev->dev,
-                          "failed to linearize skb with tiny "
-                          "unaligned fragment\n");
+               netdev_printk(KERN_DEBUG, dev,
+                             "failed to linearize skb with tiny unaligned fragment\n");
                return NETDEV_TX_BUSY;
        }
 
        if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
                if (net_ratelimit())
-                       dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
+                       netdev_err(dev, "tx queue full?!\n");
                kfree_skb(skb);
                return NETDEV_TX_OK;
        }
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                        skb = __skb_dequeue(&txq->tx_skb);
 
                if (cmd_sts & ERROR_SUMMARY) {
-                       dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
+                       netdev_info(mp->dev, "tx error\n");
                        mp->dev->stats.tx_errors++;
                }
 
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
        int ret;
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
        writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
        ret = readl(smi_reg);
        if (!(ret & SMI_READ_VALID)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
+               pr_warn("SMI bus read not valid\n");
                return -ENODEV;
        }
 
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
        void __iomem *smi_reg = msp->base + SMI_REG;
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
                (addr << 16) | (val & 0xffff), smi_reg);
 
        if (smi_wait_ready(msp)) {
-               printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+               pr_warn("SMI bus busy timeout\n");
                return -ETIMEDOUT;
        }
 
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
        if (netif_running(dev)) {
                mv643xx_eth_stop(dev);
                if (mv643xx_eth_open(dev)) {
-                       dev_printk(KERN_ERR, &dev->dev,
-                                  "fatal error on re-opening device after "
-                                  "ring param change\n");
+                       netdev_err(dev,
+                                  "fatal error on re-opening device after ring param change\n");
                        return -ENOMEM;
                }
        }
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
        }
 
        if (rxq->rx_desc_area == NULL) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
+               netdev_err(mp->dev,
                           "can't allocate rx ring (%d bytes)\n", size);
                goto out;
        }
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
        rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
                                                                GFP_KERNEL);
        if (rxq->rx_skb == NULL) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
-                          "can't allocate rx skb ring\n");
+               netdev_err(mp->dev, "can't allocate rx skb ring\n");
                goto out_free;
        }
 
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
        }
 
        if (rxq->rx_desc_count) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
-                          "error freeing rx ring -- %d skbs stuck\n",
+               netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
                           rxq->rx_desc_count);
        }
 
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
        }
 
        if (txq->tx_desc_area == NULL) {
-               dev_printk(KERN_ERR, &mp->dev->dev,
+               netdev_err(mp->dev,
                           "can't allocate tx ring (%d bytes)\n", size);
                return -ENOMEM;
        }
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
                if (netif_carrier_ok(dev)) {
                        int i;
 
-                       printk(KERN_INFO "%s: link down\n", dev->name);
+                       netdev_info(dev, "link down\n");
 
                        netif_carrier_off(dev);
 
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
        duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
        fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
 
-       printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
-                        "flow control %sabled\n", dev->name,
-                        speed, duplex ? "full" : "half",
-                        fc ? "en" : "dis");
+       netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+                   speed, duplex ? "full" : "half", fc ? "en" : "dis");
 
        if (!netif_carrier_ok(dev))
                netif_carrier_on(dev);
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
        err = request_irq(dev->irq, mv643xx_eth_irq,
                          IRQF_SHARED, dev->name, dev);
        if (err) {
-               dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+               netdev_err(dev, "can't assign irq\n");
                return -EAGAIN;
        }
 
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
         */
        mv643xx_eth_stop(dev);
        if (mv643xx_eth_open(dev)) {
-               dev_printk(KERN_ERR, &dev->dev,
-                          "fatal error on re-opening device after "
-                          "MTU change\n");
+               netdev_err(dev,
+                          "fatal error on re-opening device after MTU change\n");
        }
 
        return 0;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
 {
        struct mv643xx_eth_private *mp = netdev_priv(dev);
 
-       dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
+       netdev_info(dev, "tx timeout\n");
 
        schedule_work(&mp->tx_timeout_task);
 }
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        int ret;
 
        if (!mv643xx_eth_version_printed++)
-               printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
-                       "driver version %s\n", mv643xx_eth_driver_version);
+               pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
+                         mv643xx_eth_driver_version);
 
        ret = -EINVAL;
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        pd = pdev->dev.platform_data;
        if (pd == NULL) {
-               dev_printk(KERN_ERR, &pdev->dev,
-                          "no mv643xx_eth_platform_data\n");
+               dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
                return -ENODEV;
        }
 
        if (pd->shared == NULL) {
-               dev_printk(KERN_ERR, &pdev->dev,
-                          "no mv643xx_eth_platform_data->shared\n");
+               dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
                return -ENODEV;
        }
 
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        if (err)
                goto out;
 
-       dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
-                  mp->port_num, dev->dev_addr);
+       netdev_notice(dev, "port %d with MAC address %pM\n",
+                     mp->port_num, dev->dev_addr);
 
        if (mp->tx_desc_sram_size > 0)
-               dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
+               netdev_notice(dev, "configured with sram\n");
 
        return 0;
 
index ea5cfe2c3a040aec2815c8020f15feb745064039..a7f2eed9a08ac390cdab1e96c477be05aaa1978c 100644 (file)
@@ -253,7 +253,7 @@ struct myri10ge_priv {
        unsigned long serial_number;
        int vendor_specific_offset;
        int fw_multicast_support;
-       unsigned long features;
+       u32 features;
        u32 max_tso6;
        u32 read_dma;
        u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
 static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
 {
        struct myri10ge_priv *mgp = netdev_priv(netdev);
-       unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+       u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
 
        if (tso_enabled)
                netdev->features |= flags;
index 35fda5ac8120c056eb94d0f14bea376494d2d59c..392a6c4b72e5e4decda29324c4b82708311ec7dd 100644 (file)
@@ -77,7 +77,6 @@ config NATIONAL_PHY
          Currently supports the DP83865 PHY.
 
 config STE10XP
-       depends on PHYLIB
        tristate "Driver for STMicroelectronics STe10Xp PHYs"
        ---help---
          This is the driver for the STe100p and STe101p PHYs.
index 0fd1678bc5a928eac50ec9862567ed2019f5bb4b..590f902deb6ba29f348c78dad429275cac23a276 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/phy.h>
-
-#define        PHY_ID_KSZ9021                  0x00221611
-#define        PHY_ID_KS8737                   0x00221720
-#define        PHY_ID_KS8041                   0x00221510
-#define        PHY_ID_KS8051                   0x00221550
-/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define        PHY_ID_KS8001                   0x0022161A
+#include <linux/micrel_phy.h>
 
 /* general Interrupt control/status reg in vendor specific block. */
 #define MII_KSZPHY_INTCS                       0x1B
@@ -46,6 +40,7 @@
 #define KSZPHY_CTRL_INT_ACTIVE_HIGH            (1 << 9)
 #define KSZ9021_CTRL_INT_ACTIVE_HIGH           (1 << 14)
 #define KS8737_CTRL_INT_ACTIVE_HIGH            (1 << 14)
+#define KSZ8051_RMII_50MHZ_CLK                 (1 << 7)
 
 static int kszphy_ack_interrupt(struct phy_device *phydev)
 {
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int ks8051_config_init(struct phy_device *phydev)
+{
+       int regval;
+
+       if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+               regval = phy_read(phydev, MII_KSZPHY_CTRL);
+               regval |= KSZ8051_RMII_50MHZ_CLK;
+               phy_write(phydev, MII_KSZPHY_CTRL, regval);
+       }
+
+       return 0;
+}
+
 static struct phy_driver ks8737_driver = {
        .phy_id         = PHY_ID_KS8737,
        .phy_id_mask    = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = ks8051_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index c7a6c4466978d620353b4db6fa8058f377d228a4..9f6d670748d11f3c6198e2792373ff6c33bce76c 100644 (file)
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        ppp_release(NULL, file);
                        err = 0;
                } else
-                       printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
-                              atomic_long_read(&file->f_count));
+                       pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+                               atomic_long_read(&file->f_count));
                mutex_unlock(&ppp_mutex);
                return err;
        }
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        if (pf->kind != INTERFACE) {
                /* can't happen */
-               printk(KERN_ERR "PPP: not interface or channel??\n");
+               pr_err("PPP: not interface or channel??\n");
                return -EINVAL;
        }
 
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                }
                vj = slhc_init(val2+1, val+1);
                if (!vj) {
-                       printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+                       netdev_err(ppp->dev,
+                                  "PPP: no memory (VJ compressor)\n");
                        err = -ENOMEM;
                        break;
                }
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
 {
        int err;
 
-       printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+       pr_info("PPP generic driver version " PPP_VERSION "\n");
 
        err = register_pernet_device(&ppp_net_ops);
        if (err) {
-               printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+               pr_err("failed to register PPP pernet device (%d)\n", err);
                goto out;
        }
 
        err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
        if (err) {
-               printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+               pr_err("failed to register PPP device (%d)\n", err);
                goto out_net;
        }
 
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
        new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
        if (!new_skb) {
                if (net_ratelimit())
-                       printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+                       netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
                return NULL;
        }
        if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
                 * the same number.
                 */
                if (net_ratelimit())
-                       printk(KERN_ERR "ppp: compressor dropped pkt\n");
+                       netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
                kfree_skb(skb);
                kfree_skb(new_skb);
                new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                if (ppp->pass_filter &&
                    sk_run_filter(skb, ppp->pass_filter) == 0) {
                        if (ppp->debug & 1)
-                               printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "PPP: outbound frame "
+                                             "not passed\n");
                        kfree_skb(skb);
                        return;
                }
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
                                    GFP_ATOMIC);
                if (!new_skb) {
-                       printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+                       netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
                        goto drop;
                }
                skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
            proto != PPP_LCP && proto != PPP_CCP) {
                if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
                        if (net_ratelimit())
-                               printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+                               netdev_err(ppp->dev,
+                                          "ppp: compression required but "
+                                          "down - pkt dropped.\n");
                        goto drop;
                }
                skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
  noskb:
        spin_unlock_bh(&pch->downl);
        if (ppp->debug & 1)
-               printk(KERN_ERR "PPP: no memory (fragment)\n");
+               netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
        ++ppp->dev->stats.tx_errors;
        ++ppp->nxseq;
        return 1;       /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        /* copy to a new sk_buff with more tailroom */
                        ns = dev_alloc_skb(skb->len + 128);
                        if (!ns) {
-                               printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+                               netdev_err(ppp->dev, "PPP: no memory "
+                                          "(VJ decomp)\n");
                                goto err;
                        }
                        skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 
                len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
                if (len <= 0) {
-                       printk(KERN_DEBUG "PPP: VJ decompression error\n");
+                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                     "PPP: VJ decompression error\n");
                        goto err;
                }
                len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        goto err;
 
                if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
-                       printk(KERN_ERR "PPP: VJ uncompressed error\n");
+                       netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
                        goto err;
                }
                proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                        if (ppp->pass_filter &&
                            sk_run_filter(skb, ppp->pass_filter) == 0) {
                                if (ppp->debug & 1)
-                                       printk(KERN_DEBUG "PPP: inbound frame "
-                                              "not passed\n");
+                                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                                     "PPP: inbound frame "
+                                                     "not passed\n");
                                kfree_skb(skb);
                                return;
                        }
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
 
                ns = dev_alloc_skb(obuff_size);
                if (!ns) {
-                       printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+                       netdev_err(ppp->dev, "ppp_decompress_frame: "
+                                  "no memory\n");
                        goto err;
                }
                /* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
        u32 seq = ppp->nextseq;
        u32 minseq = ppp->minseq;
        struct sk_buff_head *list = &ppp->mrq;
-       struct sk_buff *p, *next;
+       struct sk_buff *p, *tmp;
        struct sk_buff *head, *tail;
        struct sk_buff *skb = NULL;
        int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
                return NULL;
        head = list->next;
        tail = NULL;
-       for (p = head; p != (struct sk_buff *) list; p = next) {
-               next = p->next;
+       skb_queue_walk_safe(list, p, tmp) {
+       again:
                if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
                        /* this can't happen, anyway ignore the skb */
-                       printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
-                              PPP_MP_CB(p)->sequence, seq);
-                       head = next;
+                       netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+                                  "seq %u < %u\n",
+                                  PPP_MP_CB(p)->sequence, seq);
+                       __skb_unlink(p, list);
+                       kfree_skb(p);
                        continue;
                }
                if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
                        lost = 1;
                        seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
                                minseq + 1: PPP_MP_CB(p)->sequence;
-                       next = p;
-                       continue;
+                       goto again;
                }
 
                /*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
                    (PPP_MP_CB(head)->BEbits & B)) {
                        if (len > ppp->mrru + 2) {
                                ++ppp->dev->stats.rx_length_errors;
-                               printk(KERN_DEBUG "PPP: reconstructed packet"
-                                      " is too long (%d)\n", len);
-                       } else if (p == head) {
-                               /* fragment is complete packet - reuse skb */
-                               tail = p;
-                               skb = skb_get(p);
-                               break;
-                       } else if ((skb = dev_alloc_skb(len)) == NULL) {
-                               ++ppp->dev->stats.rx_missed_errors;
-                               printk(KERN_DEBUG "PPP: no memory for "
-                                      "reconstructed packet");
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "PPP: reconstructed packet"
+                                             " is too long (%d)\n", len);
                        } else {
                                tail = p;
                                break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
                 * and we haven't found a complete valid packet yet,
                 * we can discard up to and including this fragment.
                 */
-               if (PPP_MP_CB(p)->BEbits & E)
-                       head = next;
+               if (PPP_MP_CB(p)->BEbits & E) {
+                       struct sk_buff *tmp2;
 
+                       skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+                               __skb_unlink(p, list);
+                               kfree_skb(p);
+                       }
+                       head = skb_peek(list);
+                       if (!head)
+                               break;
+               }
                ++seq;
        }
 
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
                   signal a receive error. */
                if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
                        if (ppp->debug & 1)
-                               printk(KERN_DEBUG "  missed pkts %u..%u\n",
-                                      ppp->nextseq,
-                                      PPP_MP_CB(head)->sequence-1);
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "  missed pkts %u..%u\n",
+                                             ppp->nextseq,
+                                             PPP_MP_CB(head)->sequence-1);
                        ++ppp->dev->stats.rx_dropped;
                        ppp_receive_error(ppp);
                }
 
-               if (head != tail)
-                       /* copy to a single skb */
-                       for (p = head; p != tail->next; p = p->next)
-                               skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
-               ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
-               head = tail->next;
-       }
+               skb = head;
+               if (head != tail) {
+                       struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+                       p = skb_queue_next(list, head);
+                       __skb_unlink(skb, list);
+                       skb_queue_walk_from_safe(list, p, tmp) {
+                               __skb_unlink(p, list);
+                               *fragpp = p;
+                               p->next = NULL;
+                               fragpp = &p->next;
+
+                               skb->len += p->len;
+                               skb->data_len += p->len;
+                               skb->truesize += p->len;
+
+                               if (p == tail)
+                                       break;
+                       }
+               } else {
+                       __skb_unlink(skb, list);
+               }
 
-       /* Discard all the skbuffs that we have copied the data out of
-          or that we can't use. */
-       while ((p = list->next) != head) {
-               __skb_unlink(p, list);
-               kfree_skb(p);
+               ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
        }
 
        return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ret = register_netdev(dev);
        if (ret != 0) {
                unit_put(&pn->units_idr, unit);
-               printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
-                      dev->name, ret);
+               netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+                          dev->name, ret);
                goto out2;
        }
 
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
 
        if (!ppp->file.dead || ppp->n_channels) {
                /* "can't happen" */
-               printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
-                      "n_channels=%d !\n", ppp, ppp->file.dead,
-                      ppp->n_channels);
+               netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+                          "but dead=%d n_channels=%d !\n",
+                          ppp, ppp->file.dead, ppp->n_channels);
                return;
        }
 
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
 
        if (!pch->file.dead) {
                /* "can't happen" */
-               printk(KERN_ERR "ppp: destroying undead channel %p !\n",
-                      pch);
+               pr_err("ppp: destroying undead channel %p !\n", pch);
                return;
        }
        skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
 {
        /* should never happen */
        if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
-               printk(KERN_ERR "PPP: removing module but units remain!\n");
+               pr_err("PPP: removing module but units remain!\n");
        unregister_chrdev(PPP_MAJOR, "ppp");
        device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
        class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
 
 again:
        if (!idr_pre_get(p, GFP_KERNEL)) {
-               printk(KERN_ERR "PPP: No free memory for idr\n");
+               pr_err("PPP: No free memory for idr\n");
                return -ENOMEM;
        }
 
index 164cfad6ce798ec9fdd6e16cf5dcc1b1635017dc..1af549c89d517b781180a0b8e51deb09b2725b4e 100644 (file)
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        struct pptp_opt *opt = &po->proto.pptp;
        struct pptp_gre_header *hdr;
        unsigned int header_len = sizeof(*hdr);
-       int err = 0;
        int islcp;
        int len;
        unsigned char *data;
@@ -198,8 +197,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
                                        .saddr = opt->src_addr.sin_addr.s_addr,
                                        .tos = RT_TOS(0) } },
                        .proto = IPPROTO_GRE };
-               err = ip_route_output_key(&init_net, &rt, &fl);
-               if (err)
+               rt = ip_route_output_key(&init_net, &fl);
+               if (IS_ERR(rt))
                        goto tx_error;
        }
        tdev = rt->dst.dev;
@@ -477,7 +476,8 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
                                        .tos = RT_CONN_FLAGS(sk) } },
                        .proto = IPPROTO_GRE };
                security_sk_classify_flow(sk, &fl);
-               if (ip_route_output_key(&init_net, &rt, &fl)) {
+               rt = ip_route_output_key(&init_net, &fl);
+               if (IS_ERR(rt)) {
                        error = -EHOSTUNREACH;
                        goto end;
                }
index 1a3584edd79cb5b88633360a5866f19b413172e6..2d21c60085bcf9f7c92721a004bc8f13b986e07f 100644 (file)
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
        u32 previousBit;
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        /* Clock in a zero, then do the start bit */
        ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
        u32 dataBit;
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        /* Read the data bits */
        /* The first bit is a dummy.  Clock right over it. */
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
        u32 value;
        struct ql3xxx_port_registers __iomem *port_regs =
                qdev->mem_map_registers;
-       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+       __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
        struct ql3xxx_host_memory_registers __iomem *hmem_regs =
                (void __iomem *)port_regs;
        u32 delay = 10;
index 44e316fd67b85917b6bb3af2abd48501507d3b5b..dc44564ef6f9984fdc9a214491d9bdced015eb80 100644 (file)
@@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
 #define LINKEVENT_LINKSPEED_MBPS       0
 #define LINKEVENT_LINKSPEED_ENCODED    1
 
-#define AUTO_FW_RESET_ENABLED  0x01
 /* firmware response header:
  *     63:58 - message type
  *     57:56 - owner
@@ -1133,14 +1132,10 @@ struct qlcnic_eswitch {
 #define MAX_BW                 100     /* % of link speed */
 #define MAX_VLAN_ID            4095
 #define MIN_VLAN_ID            2
-#define MAX_TX_QUEUES          1
-#define MAX_RX_QUEUES          4
 #define DEFAULT_MAC_LEARN      1
 
 #define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
 #define IS_VALID_BW(bw)                (bw <= MAX_BW)
-#define IS_VALID_TX_QUEUES(que)        (que > 0 && que <= MAX_TX_QUEUES)
-#define IS_VALID_RX_QUEUES(que)        (que > 0 && que <= MAX_RX_QUEUES)
 
 struct qlcnic_pci_func_cfg {
        u16     func_type;
index 37c04b4fade3bc2710fcd024790dbbdc06c969a0..cd88c7e1bfa9c76c3ae156da021a020f27346be5 100644 (file)
@@ -42,7 +42,7 @@ static int use_msi_x = 1;
 module_param(use_msi_x, int, 0444);
 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
 
-static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+static int auto_fw_reset = 1;
 module_param(auto_fw_reset, int, 0644);
 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
 
@@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
                if (adapter->need_fw_reset)
                        goto detach;
 
-               if (adapter->reset_context &&
-                   auto_fw_reset == AUTO_FW_RESET_ENABLED) {
+               if (adapter->reset_context && auto_fw_reset) {
                        qlcnic_reset_hw_context(adapter);
                        adapter->netdev->trans_start = jiffies;
                }
@@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
 
        qlcnic_dev_request_reset(adapter);
 
-       if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
+       if (auto_fw_reset)
                clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
 
        dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2982,7 +2981,7 @@ detach:
        adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
                QLCNIC_DEV_NEED_RESET;
 
-       if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+       if (auto_fw_reset &&
                !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
 
                qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
                if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
                        return QL_STATUS_INVALID_PARAM;
 
-               if (!IS_VALID_BW(np_cfg[i].min_bw)
-                               || !IS_VALID_BW(np_cfg[i].max_bw)
-                               || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
-                               || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
+               if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+                   !IS_VALID_BW(np_cfg[i].max_bw))
                        return QL_STATUS_INVALID_PARAM;
        }
        return 0;
index 7ffdb80adf40fd7ffd0e716e777b7b2e9f7e35ef..5e403511289de11b17f189c9649cdeb66cfdc1ee 100644 (file)
@@ -37,6 +37,7 @@
 
 #define FIRMWARE_8168D_1       "rtl_nic/rtl8168d-1.fw"
 #define FIRMWARE_8168D_2       "rtl_nic/rtl8168d-2.fw"
+#define FIRMWARE_8105E_1       "rtl_nic/rtl8105e-1.fw"
 
 #ifdef RTL8169_DEBUG
 #define assert(expr) \
@@ -124,6 +125,8 @@ enum mac_version {
        RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
        RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
        RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
+       RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
+       RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
 };
 
 #define _R(NAME,MAC,MASK) \
@@ -161,7 +164,9 @@ static const struct {
        _R("RTL8168d/8111d",    RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
        _R("RTL8168d/8111d",    RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
        _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
-       _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_28, 0xff7e1880)  // PCI-E
+       _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
+       _R("RTL8105e",          RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
+       _R("RTL8105e",          RTL_GIGA_MAC_VER_30, 0xff7e1880)  // PCI-E
 };
 #undef _R
 
@@ -268,9 +273,15 @@ enum rtl8168_8101_registers {
 #define        EPHYAR_REG_MASK                 0x1f
 #define        EPHYAR_REG_SHIFT                16
 #define        EPHYAR_DATA_MASK                0xffff
+       DLLPR                   = 0xd0,
+#define        PM_SWITCH                       (1 << 6)
        DBG_REG                 = 0xd1,
 #define        FIX_NAK_1                       (1 << 4)
 #define        FIX_NAK_2                       (1 << 3)
+       TWSI                    = 0xd2,
+       MCU                     = 0xd3,
+#define        EN_NDP                          (1 << 3)
+#define        EN_OOB_RESET                    (1 << 2)
        EFUSEAR                 = 0xdc,
 #define        EFUSEAR_FLAG                    0x80000000
 #define        EFUSEAR_WRITE_CMD               0x80000000
@@ -527,9 +538,6 @@ struct rtl8169_private {
        u16 napi_event;
        u16 intr_mask;
        int phy_1000_ctrl_reg;
-#ifdef CONFIG_R8169_VLAN
-       struct vlan_group *vlgrp;
-#endif
 
        struct mdio_ops {
                void (*write)(void __iomem *, int, int);
@@ -541,7 +549,7 @@ struct rtl8169_private {
                void (*up)(struct rtl8169_private *);
        } pll_power_ops;
 
-       int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+       int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
        int (*get_settings)(struct net_device *, struct ethtool_cmd *);
        void (*phy_reset_enable)(struct rtl8169_private *tp);
        void (*hw_start)(struct net_device *);
@@ -569,6 +577,7 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(RTL8169_VERSION);
 MODULE_FIRMWARE(FIRMWARE_8168D_1);
 MODULE_FIRMWARE(FIRMWARE_8168D_2);
+MODULE_FIRMWARE(FIRMWARE_8105E_1);
 
 static int rtl8169_open(struct net_device *dev);
 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -1098,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev)
 }
 
 static int rtl8169_set_speed_tbi(struct net_device *dev,
-                                u8 autoneg, u16 speed, u8 duplex)
+                                u8 autoneg, u16 speed, u8 duplex, u32 ignored)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
@@ -1121,17 +1130,30 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
 }
 
 static int rtl8169_set_speed_xmii(struct net_device *dev,
-                                 u8 autoneg, u16 speed, u8 duplex)
+                                 u8 autoneg, u16 speed, u8 duplex, u32 adv)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        int giga_ctrl, bmcr;
+       int rc = -EINVAL;
+
+       rtl_writephy(tp, 0x1f, 0x0000);
 
        if (autoneg == AUTONEG_ENABLE) {
                int auto_nego;
 
                auto_nego = rtl_readphy(tp, MII_ADVERTISE);
-               auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
-                             ADVERTISE_100HALF | ADVERTISE_100FULL);
+               auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+                               ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+               if (adv & ADVERTISED_10baseT_Half)
+                       auto_nego |= ADVERTISE_10HALF;
+               if (adv & ADVERTISED_10baseT_Full)
+                       auto_nego |= ADVERTISE_10FULL;
+               if (adv & ADVERTISED_100baseT_Half)
+                       auto_nego |= ADVERTISE_100HALF;
+               if (adv & ADVERTISED_100baseT_Full)
+                       auto_nego |= ADVERTISE_100FULL;
+
                auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 
                giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
@@ -1145,27 +1167,22 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
                    (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
                    (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
                    (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
-                   (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
-                       giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
-               } else {
+                   (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
+                   (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
+                   (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
+                       if (adv & ADVERTISED_1000baseT_Half)
+                               giga_ctrl |= ADVERTISE_1000HALF;
+                       if (adv & ADVERTISED_1000baseT_Full)
+                               giga_ctrl |= ADVERTISE_1000FULL;
+               } else if (adv & (ADVERTISED_1000baseT_Half |
+                                 ADVERTISED_1000baseT_Full)) {
                        netif_info(tp, link, dev,
                                   "PHY does not support 1000Mbps\n");
+                       goto out;
                }
 
                bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
 
-               if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
-                   (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
-                   (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
-                       /*
-                        * Wake up the PHY.
-                        * Vendor specific (0x1f) and reserved (0x0e) MII
-                        * registers.
-                        */
-                       rtl_writephy(tp, 0x1f, 0x0000);
-                       rtl_writephy(tp, 0x0e, 0x0000);
-               }
-
                rtl_writephy(tp, MII_ADVERTISE, auto_nego);
                rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
        } else {
@@ -1176,12 +1193,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
                else if (speed == SPEED_100)
                        bmcr = BMCR_SPEED100;
                else
-                       return -EINVAL;
+                       goto out;
 
                if (duplex == DUPLEX_FULL)
                        bmcr |= BMCR_FULLDPLX;
-
-               rtl_writephy(tp, 0x1f, 0x0000);
        }
 
        tp->phy_1000_ctrl_reg = giga_ctrl;
@@ -1199,16 +1214,18 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
                }
        }
 
-       return 0;
+       rc = 0;
+out:
+       return rc;
 }
 
 static int rtl8169_set_speed(struct net_device *dev,
-                            u8 autoneg, u16 speed, u8 duplex)
+                            u8 autoneg, u16 speed, u8 duplex, u32 advertising)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        int ret;
 
-       ret = tp->set_speed(dev, autoneg, speed, duplex);
+       ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
 
        if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
                mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -1223,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        int ret;
 
        spin_lock_irqsave(&tp->lock, flags);
-       ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+       ret = rtl8169_set_speed(dev,
+               cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
        spin_unlock_irqrestore(&tp->lock, flags);
 
        return ret;
@@ -1257,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
        return 0;
 }
 
-#ifdef CONFIG_R8169_VLAN
-
 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
                                      struct sk_buff *skb)
 {
@@ -1266,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
                TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 }
 
-static void rtl8169_vlan_rx_register(struct net_device *dev,
-                                    struct vlan_group *grp)
+#define NETIF_F_HW_VLAN_TX_RX  (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
+
+static void rtl8169_vlan_mode(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
        unsigned long flags;
 
        spin_lock_irqsave(&tp->lock, flags);
-       tp->vlgrp = grp;
-       /*
-        * Do not disable RxVlan on 8110SCd.
-        */
-       if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
+       if (dev->features & NETIF_F_HW_VLAN_RX)
                tp->cp_cmd |= RxVlan;
        else
                tp->cp_cmd &= ~RxVlan;
        RTL_W16(CPlusCmd, tp->cp_cmd);
+       /* PCI commit */
        RTL_R16(CPlusCmd);
        spin_unlock_irqrestore(&tp->lock, flags);
+
+       dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
 }
 
-static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
-                              struct sk_buff *skb, int polling)
+static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
 {
        u32 opts2 = le32_to_cpu(desc->opts2);
-       struct vlan_group *vlgrp = tp->vlgrp;
-       int ret;
 
-       if (vlgrp && (opts2 & RxVlanTag)) {
-               u16 vtag = swab16(opts2 & 0xffff);
+       if (opts2 & RxVlanTag)
+               __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 
-               if (likely(polling))
-                       vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
-               else
-                       __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
-               ret = 0;
-       } else
-               ret = -1;
        desc->opts2 = 0;
-       return ret;
 }
 
-#else /* !CONFIG_R8169_VLAN */
-
-static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
-                                     struct sk_buff *skb)
-{
-       return 0;
-}
-
-static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
-                              struct sk_buff *skb, int polling)
-{
-       return -1;
-}
-
-#endif
-
 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -1494,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        }
 }
 
+static int rtl8169_set_flags(struct net_device *dev, u32 data)
+{
+       struct rtl8169_private *tp = netdev_priv(dev);
+       unsigned long old_feat = dev->features;
+       int rc;
+
+       if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
+           !(data & ETH_FLAG_RXVLAN)) {
+               netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
+               return -EINVAL;
+       }
+
+       rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
+       if (rc)
+               return rc;
+
+       if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
+               rtl8169_vlan_mode(dev);
+
+       return 0;
+}
+
 static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_drvinfo            = rtl8169_get_drvinfo,
        .get_regs_len           = rtl8169_get_regs_len,
@@ -1513,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_strings            = rtl8169_get_strings,
        .get_sset_count         = rtl8169_get_sset_count,
        .get_ethtool_stats      = rtl8169_get_ethtool_stats,
+       .set_flags              = rtl8169_set_flags,
+       .get_flags              = ethtool_op_get_flags,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1561,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
                { 0x7c800000, 0x30000000,       RTL_GIGA_MAC_VER_11 },
 
                /* 8101 family. */
+               { 0x7cf00000, 0x40a00000,       RTL_GIGA_MAC_VER_30 },
+               { 0x7cf00000, 0x40900000,       RTL_GIGA_MAC_VER_29 },
+               { 0x7c800000, 0x40800000,       RTL_GIGA_MAC_VER_30 },
                { 0x7cf00000, 0x34a00000,       RTL_GIGA_MAC_VER_09 },
                { 0x7cf00000, 0x24a00000,       RTL_GIGA_MAC_VER_09 },
                { 0x7cf00000, 0x34900000,       RTL_GIGA_MAC_VER_08 },
@@ -2437,6 +2453,33 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 }
 
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
+{
+       static const struct phy_reg phy_reg_init[] = {
+               { 0x1f, 0x0005 },
+               { 0x1a, 0x0000 },
+               { 0x1f, 0x0000 },
+
+               { 0x1f, 0x0004 },
+               { 0x1c, 0x0000 },
+               { 0x1f, 0x0000 },
+
+               { 0x1f, 0x0001 },
+               { 0x15, 0x7701 },
+               { 0x1f, 0x0000 }
+       };
+
+       /* Disable ALDPS before ram code */
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(100);
+
+       if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
+               netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+
+       rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
 static void rtl_hw_phy_config(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -2504,6 +2547,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
        case RTL_GIGA_MAC_VER_28:
                rtl8168d_4_hw_phy_config(tp);
                break;
+       case RTL_GIGA_MAC_VER_29:
+       case RTL_GIGA_MAC_VER_30:
+               rtl8105e_hw_phy_config(tp);
+               break;
 
        default:
                break;
@@ -2635,11 +2682,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 
        rtl8169_phy_reset(dev, tp);
 
-       /*
-        * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
-        * only 8101. Don't panic.
-        */
-       rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
+       rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+               ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+               ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+               tp->mii.supports_gmii ?
+                       ADVERTISED_1000baseT_Half |
+                       ADVERTISED_1000baseT_Full : 0);
 
        if (RTL_R8(PHYstatus) & TBI_Enable)
                netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2795,9 +2843,6 @@ static const struct net_device_ops rtl8169_netdev_ops = {
        .ndo_set_mac_address    = rtl_set_mac_address,
        .ndo_do_ioctl           = rtl8169_ioctl,
        .ndo_set_multicast_list = rtl_set_rx_mode,
-#ifdef CONFIG_R8169_VLAN
-       .ndo_vlan_rx_register   = rtl8169_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = rtl8169_netpoll,
 #endif
@@ -2952,6 +2997,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_09:
        case RTL_GIGA_MAC_VER_10:
        case RTL_GIGA_MAC_VER_16:
+       case RTL_GIGA_MAC_VER_29:
+       case RTL_GIGA_MAC_VER_30:
                ops->down       = r810x_pll_power_down;
                ops->up         = r810x_pll_power_up;
                break;
@@ -3104,6 +3151,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Identify chip attached to board */
        rtl8169_get_mac_version(tp, ioaddr);
 
+       /*
+        * Pretend we are using VLANs; This bypasses a nasty bug where
+        * Interrupts stop flowing on high load on 8110SCd controllers.
+        */
+       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+               tp->cp_cmd |= RxVlan;
+
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
 
@@ -3172,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
 
-#ifdef CONFIG_R8169_VLAN
-       dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
-       dev->features |= NETIF_F_GRO;
+       dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
 
        tp->intr_mask = 0xffff;
        tp->hw_start = cfg->hw_start;
@@ -3293,12 +3344,7 @@ static int rtl8169_open(struct net_device *dev)
 
        rtl8169_init_phy(dev, tp);
 
-       /*
-        * Pretend we are using VLANs; This bypasses a nasty bug where
-        * Interrupts stop flowing on high load on 8110SCd controllers.
-        */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-               RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+       rtl8169_vlan_mode(dev);
 
        rtl_pll_power_up(tp);
 
@@ -3915,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
        rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
 }
 
+static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+       static const struct ephy_info e_info_8105e_1[] = {
+               { 0x07, 0, 0x4000 },
+               { 0x19, 0, 0x0200 },
+               { 0x19, 0, 0x0020 },
+               { 0x1e, 0, 0x2000 },
+               { 0x03, 0, 0x0001 },
+               { 0x19, 0, 0x0100 },
+               { 0x19, 0, 0x0004 },
+               { 0x0a, 0, 0x0020 }
+       };
+
+       /* Force LAN exit from ASPM if Rx/Tx are not idel */
+       RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+       /* disable Early Tally Counter */
+       RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+
+       RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+       RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
+
+       rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+}
+
+static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+       rtl_hw_start_8105e_1(ioaddr, pdev);
+       rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+}
+
 static void rtl_hw_start_8101(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -3945,6 +4022,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
        case RTL_GIGA_MAC_VER_09:
                rtl_hw_start_8102e_2(ioaddr, pdev);
                break;
+
+       case RTL_GIGA_MAC_VER_29:
+               rtl_hw_start_8105e_1(ioaddr, pdev);
+               break;
+       case RTL_GIGA_MAC_VER_30:
+               rtl_hw_start_8105e_2(ioaddr, pdev);
+               break;
        }
 
        RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -4603,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
 
-                       if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
-                               if (likely(polling))
-                                       napi_gro_receive(&tp->napi, skb);
-                               else
-                                       netif_rx(skb);
-                       }
+                       rtl8169_rx_vlan_tag(desc, skb);
+
+                       if (likely(polling))
+                               napi_gro_receive(&tp->napi, skb);
+                       else
+                               netif_rx(skb);
 
                        dev->stats.rx_bytes += pkt_size;
                        dev->stats.rx_packets++;
index 39c17cecb8b98f5258bd9245ad451b14d6b2473b..2ad6364103ea45db5c0f8094172d1a51c0401479 100644 (file)
@@ -7556,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
                         */
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        if (ring_data->lro) {
-                               u32 tcp_len;
+                               u32 tcp_len = 0;
                                u8 *tcp;
                                int ret = 0;
 
index 002bac7438434f8eb240a891815e1223b55b7c5c..b8bd936374f29e5142deb4d75f4ec700af42a5bf 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/topology.h>
 #include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
 #include "net_driver.h"
 #include "efx.h"
 #include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
                        channel->irq_mod_score = 0;
                }
 
+               efx_filter_rfs_expire(channel);
+
                /* There is no race here; although napi_disable() will
                 * only wait for napi_complete(), this isn't a problem
                 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
 
                efx_for_each_channel_rx_queue(rx_queue, channel)
                        efx_fini_rx_queue(rx_queue);
-               efx_for_each_channel_tx_queue(tx_queue, channel)
+               efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                        efx_fini_tx_queue(tx_queue);
                efx_fini_eventq(channel);
        }
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
 
        efx_for_each_channel_rx_queue(rx_queue, channel)
                efx_remove_rx_queue(rx_queue);
-       efx_for_each_channel_tx_queue(tx_queue, channel)
+       efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                efx_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
 }
@@ -1101,8 +1104,8 @@ static int efx_init_io(struct efx_nic *efx)
                rc = -EIO;
                goto fail3;
        }
-       efx->membase = ioremap_nocache(efx->membase_phys,
-                                      efx->type->mem_map_size);
+       efx->membase = ioremap_wc(efx->membase_phys,
+                                 efx->type->mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not map memory BAR at %llx+%x\n",
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
        return count;
 }
 
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i, rc;
+
+       efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+       if (!efx->net_dev->rx_cpu_rmap)
+               return -ENOMEM;
+       for (i = 0; i < efx->n_rx_channels; i++) {
+               rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+                                     xentries[i].vector);
+               if (rc) {
+                       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+                       efx->net_dev->rx_cpu_rmap = NULL;
+                       return rc;
+               }
+       }
+#endif
+       return 0;
+}
+
 /* Probe the number and type of interrupts we are able to obtain, and
  * the resulting numbers of channels and RX queues.
  */
-static void efx_probe_interrupts(struct efx_nic *efx)
+static int efx_probe_interrupts(struct efx_nic *efx)
 {
        int max_channels =
                min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
                                efx->n_tx_channels = efx->n_channels;
                                efx->n_rx_channels = efx->n_channels;
                        }
+                       rc = efx_init_rx_cpu_rmap(efx, xentries);
+                       if (rc) {
+                               pci_disable_msix(efx->pci_dev);
+                               return rc;
+                       }
                        for (i = 0; i < n_channels; i++)
                                efx_get_channel(efx, i)->irq =
                                        xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
                efx->n_tx_channels = 1;
                efx->legacy_irq = efx->pci_dev->irq;
        }
+
+       return 0;
 }
 
 static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
 
 static void efx_set_channels(struct efx_nic *efx)
 {
-       struct efx_channel *channel;
-       struct efx_tx_queue *tx_queue;
-
        efx->tx_channel_offset =
                separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-
-       /* Channel pointers were set in efx_init_struct() but we now
-        * need to clear them for TX queues in any RX-only channels. */
-       efx_for_each_channel(channel, efx) {
-               if (channel->channel - efx->tx_channel_offset >=
-                   efx->n_tx_channels) {
-                       efx_for_each_channel_tx_queue(tx_queue, channel)
-                               tx_queue->channel = NULL;
-               }
-       }
 }
 
 static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
 
        /* Determine the number of channels and queues by trying to hook
         * in MSI-X interrupts. */
-       efx_probe_interrupts(efx);
+       rc = efx_probe_interrupts(efx);
+       if (rc)
+               goto fail;
 
        if (efx->n_channels > 1)
                get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
        efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
 
        return 0;
+
+fail:
+       efx->type->remove(efx);
+       return rc;
 }
 
 static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
        efx->irq_rx_adaptive = rx_adaptive;
        efx->irq_rx_moderation = rx_ticks;
        efx_for_each_channel(channel, efx) {
-               if (efx_channel_get_rx_queue(channel))
+               if (efx_channel_has_rx_queue(channel))
                        channel->irq_moderation = rx_ticks;
-               else if (efx_channel_get_tx_queue(channel, 0))
+               else if (efx_channel_has_tx_queues(channel))
                        channel->irq_moderation = tx_ticks;
        }
 }
@@ -1848,6 +1873,10 @@ static const struct net_device_ops efx_netdev_ops = {
        .ndo_set_multicast_list = efx_set_multicast_list,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = efx_netpoll,
+#endif
+       .ndo_setup_tc           = efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = efx_filter_rfs,
 #endif
 };
 
@@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        efx_for_each_channel(channel, efx) {
                struct efx_tx_queue *tx_queue;
-               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       tx_queue->core_txq = netdev_get_tx_queue(
-                               efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
-               }
+               efx_for_each_channel_tx_queue(tx_queue, channel)
+                       efx_init_tx_queue_core_txq(tx_queue);
        }
 
        /* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+       efx->net_dev->rx_cpu_rmap = NULL;
+#endif
        efx_nic_fini_interrupt(efx);
        efx_fini_channels(efx);
        efx_fini_port(efx);
@@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
        int i, rc;
 
        /* Allocate and initialise a struct net_device and struct efx_nic */
-       net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+       net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+                                    EFX_MAX_RX_QUEUES);
        if (!net_dev)
                return -ENOMEM;
        net_dev->features |= (type->offload_features | NETIF_F_SG |
index d43a7e5212b1d0b4d5d5068bf8e3d71c8bb7a1f0..3d83a1f74fef4557315a7d7ae9c29f6fe61f86ac 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
 extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
 extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
 extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
 extern netdev_tx_t
 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
 
 /* RX */
 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
                                    struct efx_filter_spec *spec);
 extern void efx_filter_clear_rx(struct efx_nic *efx,
                                enum efx_filter_priority priority);
+#ifdef CONFIG_RFS_ACCEL
+extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                         u16 rxq_index, u32 flow_id);
+extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+       if (channel->rfs_filters_added >= 60 &&
+           __efx_filter_rfs_expire(channel->efx, 100))
+               channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
index ca886d98bdc78e6b5a2a618ee7afb6ee271b329e..807178ef65ad1110d125dd05e37a0d6dd326c261 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
        enum {
                EFX_ETHTOOL_STAT_SOURCE_mac_stats,
                EFX_ETHTOOL_STAT_SOURCE_nic,
-               EFX_ETHTOOL_STAT_SOURCE_channel
+               EFX_ETHTOOL_STAT_SOURCE_channel,
+               EFX_ETHTOOL_STAT_SOURCE_tx_queue
        } source;
        unsigned offset;
        u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
        EFX_ETHTOOL_STAT(field, channel, n_##field,             \
                         unsigned int, efx_get_uint_stat)
 
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field)                       \
+       EFX_ETHTOOL_STAT(tx_##field, tx_queue, field,           \
+                        unsigned int, efx_get_uint_stat)
+
 static struct efx_ethtool_stat efx_ethtool_stats[] = {
        EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
        EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
        EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
        EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
        EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+       EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+       EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+       EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+       EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
        EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
        EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
        EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
        strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
        strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
        if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
-               siena_print_fwver(efx, info->fw_version,
-                                 sizeof(info->fw_version));
+               efx_mcdi_print_fwver(efx, info->fw_version,
+                                    sizeof(info->fw_version));
        strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
 }
 
@@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
        struct efx_mac_stats *mac_stats = &efx->mac_stats;
        struct efx_ethtool_stat *stat;
        struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
        struct rtnl_link_stats64 temp;
        int i;
 
@@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
                                data[i] += stat->get_stat((void *)channel +
                                                          stat->offset);
                        break;
+               case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+                       data[i] = 0;
+                       efx_for_each_channel(channel, efx) {
+                               efx_for_each_channel_tx_queue(tx_queue, channel)
+                                       data[i] +=
+                                               stat->get_stat((void *)tx_queue
+                                                              + stat->offset);
+                       }
+                       break;
                }
        }
 }
@@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
 static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
 {
        struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
-       unsigned long features;
+       u32 features;
 
        features = NETIF_F_TSO;
        if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+       u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
 
        if (enable)
                net_dev->features |= features;
@@ -635,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
        /* Find lowest IRQ moderation across all used TX queues */
        coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
        efx_for_each_channel(channel, efx) {
-               if (!efx_channel_get_tx_queue(channel, 0))
+               if (!efx_channel_has_tx_queues(channel))
                        continue;
                if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
                        if (channel->channel < efx->n_rx_channels)
@@ -680,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
 
        /* If the channel is shared only allow RX parameters to be set */
        efx_for_each_channel(channel, efx) {
-               if (efx_channel_get_rx_queue(channel) &&
-                   efx_channel_get_tx_queue(channel, 0) &&
+               if (efx_channel_has_rx_queue(channel) &&
+                   efx_channel_has_tx_queues(channel) &&
                    tx_usecs) {
                        netif_err(efx, drv, efx->net_dev, "Channel is shared. "
                                  "Only RX coalescing may be set\n");
index 61ddd2c6e750cfd5fe93725612d3af256832acfb..734fcfb52e856aa67fa08e28e3df82d3663a9aab 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
        /* RX control FIFO thresholds (32 entries) */
        const unsigned ctrl_xon_thr = 20;
        const unsigned ctrl_xoff_thr = 25;
-       /* RX data FIFO thresholds (256-byte units; size varies) */
-       int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
-       int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
        efx_oword_t reg;
 
        efx_reado(efx, &reg, FR_AZ_RX_CFG);
        if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
                /* Data FIFO size is 5.5K */
-               if (data_xon_thr < 0)
-                       data_xon_thr = 512 >> 8;
-               if (data_xoff_thr < 0)
-                       data_xoff_thr = 2048 >> 8;
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
                                    huge_buf_size);
-               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
-               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
+               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+               EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
                EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
        } else {
                /* Data FIFO size is 80K; register fields moved */
-               if (data_xon_thr < 0)
-                       data_xon_thr = 27648 >> 8; /* ~3*max MTU */
-               if (data_xoff_thr < 0)
-                       data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
                                    huge_buf_size);
-               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
-               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
+               /* Send XON and XOFF at ~3 * max MTU away from empty/full */
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
                EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
index 2dd16f0b3ced007ff2aeb761373ce98d40c76af3..b9cc846811d642b7ac87ff832eb90c7c1ec5b4cf 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index b49e84394641a1493c2afd6b896402eb8f588821..2c9ee5db3bf779bfacc563533d0c0a2263776673 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index d4722c41c4cefa39abf95ab1c23374200a30d8ff..95a980fd63d5d9c195c683512808fa43d6d91a2d 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/in.h>
+#include <net/ip.h>
 #include "efx.h"
 #include "filter.h"
 #include "io.h"
  */
 #define FILTER_CTL_SRCH_MAX 200
 
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define FILTER_CTL_SRCH_HINT_MAX 5
+
 enum efx_filter_table_id {
        EFX_FILTER_TABLE_RX_IP = 0,
        EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
 struct efx_filter_state {
        spinlock_t      lock;
        struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+#ifdef CONFIG_RFS_ACCEL
+       u32             *rps_flow_id;
+       unsigned        rps_expire_index;
+#endif
 };
 
 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
                             struct efx_filter_spec *spec, u32 key,
                             bool for_insert, int *depth_required)
 {
-       unsigned hash, incr, filter_idx, depth;
+       unsigned hash, incr, filter_idx, depth, depth_max;
        struct efx_filter_spec *cmp;
 
        hash = efx_filter_hash(key);
        incr = efx_filter_increment(key);
+       depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
+                    FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
 
        for (depth = 1, filter_idx = hash & (table->size - 1);
-            depth <= FILTER_CTL_SRCH_MAX &&
-                    test_bit(filter_idx, table->used_bitmap);
+            depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
             ++depth) {
                cmp = &table->spec[filter_idx];
                if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
        }
        if (!for_insert)
                return -ENOENT;
-       if (depth > FILTER_CTL_SRCH_MAX)
+       if (depth > depth_max)
                return -EBUSY;
 found:
        *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
        spin_lock_init(&state->lock);
 
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+#ifdef CONFIG_RFS_ACCEL
+               state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
+                                            sizeof(*state->rps_flow_id),
+                                            GFP_KERNEL);
+               if (!state->rps_flow_id)
+                       goto fail;
+#endif
                table = &state->table[EFX_FILTER_TABLE_RX_IP];
                table->id = EFX_FILTER_TABLE_RX_IP;
                table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
                kfree(state->table[table_id].used_bitmap);
                vfree(state->table[table_id].spec);
        }
+#ifdef CONFIG_RFS_ACCEL
+       kfree(state->rps_flow_id);
+#endif
        kfree(state);
 }
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_channel *channel;
+       struct efx_filter_state *state = efx->filter_state;
+       struct efx_filter_spec spec;
+       const struct iphdr *ip;
+       const __be16 *ports;
+       int nhoff;
+       int rc;
+
+       nhoff = skb_network_offset(skb);
+
+       if (skb->protocol != htons(ETH_P_IP))
+               return -EPROTONOSUPPORT;
+
+       /* RFS must validate the IP header length before calling us */
+       EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
+       ip = (const struct iphdr *)(skb->data + nhoff);
+       if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+               return -EPROTONOSUPPORT;
+       EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
+       ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+       rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+                                     ip->daddr, ports[1], ip->saddr, ports[0]);
+       if (rc)
+               return rc;
+
+       rc = efx_filter_insert_filter(efx, &spec, true);
+       if (rc < 0)
+               return rc;
+
+       /* Remember this so we can check whether to expire the filter later */
+       state->rps_flow_id[rc] = flow_id;
+       channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+       ++channel->rfs_filters_added;
+
+       netif_info(efx, rx_status, efx->net_dev,
+                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+                  (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+                  &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+                  rxq_index, flow_id, rc);
+
+       return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
+       unsigned mask = table->size - 1;
+       unsigned index;
+       unsigned stop;
+
+       if (!spin_trylock_bh(&state->lock))
+               return false;
+
+       index = state->rps_expire_index;
+       stop = (index + quota) & mask;
+
+       while (index != stop) {
+               if (test_bit(index, table->used_bitmap) &&
+                   table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+                   rps_may_expire_flow(efx->net_dev,
+                                       table->spec[index].dmaq_id,
+                                       state->rps_flow_id[index], index)) {
+                       netif_info(efx, rx_status, efx->net_dev,
+                                  "expiring filter %d [flow %u]\n",
+                                  index, state->rps_flow_id[index]);
+                       efx_filter_table_clear_entry(efx, table, index);
+               }
+               index = (index + 1) & mask;
+       }
+
+       state->rps_expire_index = stop;
+       if (table->used == 0)
+               efx_filter_table_reset_search_depth(table);
+
+       spin_unlock_bh(&state->lock);
+       return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
index 6da4ae20a039df235f135d39a51e8e7213ba374f..d9d8c2ef1074a3de6278eff70d7894cc8cc65991 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -48,9 +48,9 @@
  *   replacing the low 96 bits with zero does not affect functionality.
  * - If the host writes to the last dword address of such a register
  *   (i.e. the high 32 bits) the underlying register will always be
- *   written.  If the collector does not hold values for the low 96
- *   bits of the register, they will be written as zero.  Writing to
- *   the last qword does not have this effect and must not be done.
+ *   written.  If the collector and the current write together do not
+ *   provide values for all 128 bits of the register, the low 96 bits
+ *   will be written as zero.
  * - If the host writes to the address of any other part of such a
  *   register while the collector already holds values for some other
  *   register, the write is discarded and the collector maintains its
@@ -103,6 +103,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
 #endif
+       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -125,6 +126,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
        __raw_writel((__force u32)value->u32[0], membase + addr);
        __raw_writel((__force u32)value->u32[1], membase + addr + 4);
 #endif
+       wmb();
        mmiowb();
        spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
@@ -139,6 +141,7 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
 
        /* No lock required */
        _efx_writed(efx, value->u32[0], reg);
+       wmb();
 }
 
 /* Read a 128-bit CSR, locking as appropriate. */
@@ -237,12 +240,14 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
 
 #ifdef EFX_USE_QWORD_IO
        _efx_writeq(efx, value->u64[0], reg + 0);
+       _efx_writeq(efx, value->u64[1], reg + 8);
 #else
        _efx_writed(efx, value->u32[0], reg + 0);
        _efx_writed(efx, value->u32[1], reg + 4);
-#endif
        _efx_writed(efx, value->u32[2], reg + 8);
        _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+       wmb();
 }
 #define efx_writeo_page(efx, value, reg, page)                         \
        _efx_writeo_page(efx, value,                                    \
index b716e827b291bca53dab4464543553c1cdaa9673..5e118f0d2479231e1f0069062c6cd9d5582d423a 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -94,14 +94,15 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
 
        efx_writed(efx, &hdr, pdu);
 
-       for (i = 0; i < inlen; i += 4)
+       for (i = 0; i < inlen; i += 4) {
                _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
-
-       /* Ensure the payload is written out before the header */
-       wmb();
+               /* use wmb() within loop to inhibit write combining */
+               wmb();
+       }
 
        /* ring the doorbell with a distinctive value */
        _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+       wmb();
 }
 
 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
@@ -602,7 +603,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
  **************************************************************************
  */
 
-int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
 {
        u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
        size_t outlength;
@@ -616,29 +617,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
        if (rc)
                goto fail;
 
-       if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
-               *version = 0;
-               *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
-               return 0;
-       }
-
        if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
                rc = -EIO;
                goto fail;
        }
 
        ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
-       *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
-                   ((u64)le16_to_cpu(ver_words[1]) << 32) |
-                   ((u64)le16_to_cpu(ver_words[2]) << 16) |
-                   le16_to_cpu(ver_words[3]));
-       *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
-
-       return 0;
+       snprintf(buf, len, "%u.%u.%u.%u",
+                le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+                le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+       return;
 
 fail:
        netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
-       return rc;
+       buf[0] = 0;
 }
 
 int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
index c792f1d65e4880d84e85a136289e945e1ca863f1..aced2a7856fcbc60310524a86eb44745c90f791a 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 
-extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build);
+extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
 extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                               bool *was_attached_out);
 extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
index f88f4bf986ff423d56ea6c8507b1254dc1dfec86..33f7294edb478350fa49f4808229b06e1e70167c 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 90359e6440063bc232a4fd2a6b40143b0eb77347..b86a15f221ade983c075e18205f7187860c57f3e 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 0e97eed663c6bce54e708d94f2e5cf90d3ef0a09..ec3f740f54655fa206e1763276554f406e5d8865 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 56b0266b441fcb9f880e53896c7f35a569058a05..19e68c26d1030a64bda6acec5486e363763b47b9 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
        return spins ? spins : -ETIMEDOUT;
 }
 
-static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
 {
        int status;
 
-       if (LOOPBACK_INTERNAL(efx))
-               return 0;
-
        if (mmd != MDIO_MMD_AN) {
                /* Read MMD STATUS2 to check it is responding. */
                status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
                }
        }
 
-       /* Read MMD STATUS 1 to check for fault. */
-       status = efx_mdio_read(efx, mmd, MDIO_STAT1);
-       if (status & MDIO_STAT1_FAULT) {
-               if (fault_fatal) {
-                       netif_err(efx, hw, efx->net_dev,
-                                 "PHY MMD %d reporting fatal"
-                                 " fault: status %x\n", mmd, status);
-                       return -EIO;
-               } else {
-                       netif_dbg(efx, hw, efx->net_dev,
-                                 "PHY MMD %d reporting status"
-                                 " %x (expected)\n", mmd, status);
-               }
-       }
        return 0;
 }
 
@@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
        return rc;
 }
 
-int efx_mdio_check_mmds(struct efx_nic *efx,
-                       unsigned int mmd_mask, unsigned int fatal_mask)
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
 {
        int mmd = 0, probe_mmd, devs1, devs2;
        u32 devices;
@@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
 
        /* Check all required MMDs are responding and happy. */
        while (mmd_mask) {
-               if (mmd_mask & 1) {
-                       int fault_fatal = fatal_mask & 1;
-                       if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
-                               return -EIO;
-               }
+               if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+                       return -EIO;
                mmd_mask = mmd_mask >> 1;
-               fatal_mask = fatal_mask >> 1;
                mmd++;
        }
 
@@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
                          "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
                rc = -EINVAL;
        } else {
-               rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+               rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
        }
 
        mutex_unlock(&efx->mac_lock);
index 75791d3d4963f0a1a445c1fc269b938bd3499ccb..df0703940c837094b57ac266529520445ec1f151 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
                              int spins, int spintime);
 
 /* As efx_mdio_check_mmd but for multiple MMDs */
-int efx_mdio_check_mmds(struct efx_nic *efx,
-                       unsigned int mmd_mask, unsigned int fatal_mask);
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Check the link status of specified mmds in bit mask */
 extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
index d38627448c221a69a4420c0db71213557beb7dc7..e646bfce2d84933f8f1d43df8344c3d1d3bdc5b8 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 28df8665256a2167a5081fe4a69e3903ac22b1b5..215d5c51bfa0c424e2030347690dded37e5433ee 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
  *
  **************************************************************************/
 
-#define EFX_DRIVER_VERSION     "3.0"
+#define EFX_DRIVER_VERSION     "3.1"
 
 #ifdef EFX_ENABLE_DEBUG
 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
 /* Checksum generation is a per-queue option in hardware, so each
  * queue visible to the networking core is backed by two hardware TX
  * queues. */
-#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TXQ_TYPE_OFFLOAD   1
-#define EFX_TXQ_TYPES          2
-#define EFX_MAX_TX_QUEUES      (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
+#define EFX_MAX_TX_TC          2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD   1       /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI   2       /* flag */
+#define EFX_TXQ_TYPES          4
+#define EFX_MAX_TX_QUEUES      (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
 
 /**
  * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
  * @buffer: The software buffer ring
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
  * @flushed: Used when handling queue flushing
  * @read_count: Current read pointer.
  *     This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
        struct efx_tx_buffer *buffer;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
+       bool initialised;
        enum efx_flush_state flushed;
 
        /* Members used mainly on the completion path */
@@ -210,15 +214,17 @@ struct efx_tx_queue {
  *     If both this and page are %NULL, the buffer slot is currently free.
  * @page: The associated page buffer, if any.
  *     If both this and skb are %NULL, the buffer slot is currently free.
- * @data: Pointer to ethernet header
  * @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
  */
 struct efx_rx_buffer {
        dma_addr_t dma_addr;
-       struct sk_buff *skb;
-       struct page *page;
-       char *data;
+       union {
+               struct sk_buff *skb;
+               struct page *page;
+       } u;
        unsigned int len;
+       bool is_page;
 };
 
 /**
@@ -358,6 +364,9 @@ struct efx_channel {
 
        unsigned int irq_count;
        unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+       unsigned int rfs_filters_added;
+#endif
 
        int rx_alloc_level;
        int rx_alloc_push_pages;
@@ -377,7 +386,7 @@ struct efx_channel {
        bool rx_pkt_csummed;
 
        struct efx_rx_queue rx_queue;
-       struct efx_tx_queue tx_queue[2];
+       struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
 };
 
 enum efx_led_mode {
@@ -906,7 +915,7 @@ struct efx_nic_type {
        unsigned int phys_addr_channels;
        unsigned int tx_dc_base;
        unsigned int rx_dc_base;
-       unsigned long offload_features;
+       u32 offload_features;
        u32 reset_world_flags;
 };
 
@@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
        return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
 }
 
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+       return channel->channel - channel->efx->tx_channel_offset <
+               channel->efx->n_tx_channels;
+}
+
 static inline struct efx_tx_queue *
 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
 {
-       struct efx_tx_queue *tx_queue = channel->tx_queue;
-       EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
-       return tx_queue->channel ? tx_queue + type : NULL;
+       EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+                           type >= EFX_TXQ_TYPES);
+       return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+       return !(tx_queue->efx->net_dev->num_tc < 2 &&
+                tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
 }
 
 /* Iterate over all TX queues belonging to a channel */
 #define efx_for_each_channel_tx_queue(_tx_queue, _channel)             \
-       for (_tx_queue = efx_channel_get_tx_queue(channel, 0);          \
-            _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+       if (!efx_channel_has_tx_queues(_channel))                       \
+               ;                                                       \
+       else                                                            \
+               for (_tx_queue = (_channel)->tx_queue;                  \
+                    _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+                            efx_tx_queue_used(_tx_queue);              \
+                    _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)    \
+       for (_tx_queue = (_channel)->tx_queue;                          \
+            _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;          \
             _tx_queue++)
 
 static inline struct efx_rx_queue *
@@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
        return &efx->channel[index]->rx_queue;
 }
 
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+       return channel->channel < channel->efx->n_rx_channels;
+}
+
 static inline struct efx_rx_queue *
 efx_channel_get_rx_queue(struct efx_channel *channel)
 {
-       return channel->channel < channel->efx->n_rx_channels ?
-               &channel->rx_queue : NULL;
+       EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+       return &channel->rx_queue;
 }
 
 /* Iterate over all RX queues belonging to a channel */
 #define efx_for_each_channel_rx_queue(_rx_queue, _channel)             \
-       for (_rx_queue = efx_channel_get_rx_queue(channel);             \
-            _rx_queue;                                                 \
-            _rx_queue = NULL)
+       if (!efx_channel_has_rx_queue(_channel))                        \
+               ;                                                       \
+       else                                                            \
+               for (_rx_queue = &(_channel)->rx_queue;                 \
+                    _rx_queue;                                         \
+                    _rx_queue = NULL)
 
 static inline struct efx_channel *
 efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
index da386599ab68d0a42686eaf4c7af2837fa4b04a9..e8396614daf38b4e7d7c923a65d2bb109f066d98 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
 #define RX_DC_ENTRIES 64
 #define RX_DC_ENTRIES_ORDER 3
 
-/* RX FIFO XOFF watermark
- *
- * When the amount of the RX FIFO increases used increases past this
- * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xoff_thresh = -1;
-module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
-
-/* RX FIFO XON watermark
- *
- * When the amount of the RX FIFO used decreases below this
- * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xon_thresh = -1;
-module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
-
 /* If EFX_MAX_INT_ERRORS internal errors occur within
  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  * disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
 
 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
 {
-       efx_oword_t tx_desc_ptr;
        struct efx_nic *efx = tx_queue->efx;
+       efx_oword_t reg;
 
        tx_queue->flushed = FLUSH_NONE;
 
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
        efx_init_special_buffer(efx, &tx_queue->txd);
 
        /* Push TX descriptor ring to card */
-       EFX_POPULATE_OWORD_10(tx_desc_ptr,
+       EFX_POPULATE_OWORD_10(reg,
                              FRF_AZ_TX_DESCQ_EN, 1,
                              FRF_AZ_TX_ISCSI_DDIG_EN, 0,
                              FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
 
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
                int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
-               EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
-               EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+               EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
                                    !csum);
        }
 
-       efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+       efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
                         tx_queue->queue);
 
        if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
-               efx_oword_t reg;
-
                /* Only 128 bits in this register */
                BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
 
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
                        set_bit_le(tx_queue->queue, (void *)&reg);
                efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
        }
+
+       if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+               EFX_POPULATE_OWORD_1(reg,
+                                    FRF_BZ_TX_PACE,
+                                    (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+                                    FFE_BZ_TX_PACE_OFF :
+                                    FFE_BZ_TX_PACE_RESERVED);
+               efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+                                tx_queue->queue);
+       }
 }
 
 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
 
        /* Flush all tx queues in parallel */
        efx_for_each_channel(channel, efx) {
-               efx_for_each_channel_tx_queue(tx_queue, channel)
-                       efx_flush_tx_queue(tx_queue);
+               efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+                       if (tx_queue->initialised)
+                               efx_flush_tx_queue(tx_queue);
+               }
        }
 
        /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
                                        ++rx_pending;
                                }
                        }
-                       efx_for_each_channel_tx_queue(tx_queue, channel) {
-                               if (tx_queue->flushed != FLUSH_DONE)
+                       efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+                               if (tx_queue->initialised &&
+                                   tx_queue->flushed != FLUSH_DONE)
                                        ++tx_pending;
                        }
                }
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
        /* Mark the queues as all flushed. We're going to return failure
         * leading to a reset, or fake up success anyway */
        efx_for_each_channel(channel, efx) {
-               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       if (tx_queue->flushed != FLUSH_DONE)
+               efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+                       if (tx_queue->initialised &&
+                           tx_queue->flushed != FLUSH_DONE)
                                netif_err(efx, hw, efx->net_dev,
                                          "tx queue %d flush command timed out\n",
                                          tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
                EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
        efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+       if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+               EFX_POPULATE_OWORD_4(temp,
+                                    /* Default values */
+                                    FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+                                    FRF_BZ_TX_PACE_SB_AF, 0xb,
+                                    FRF_BZ_TX_PACE_FB_BASE, 0,
+                                    /* Allow large pace values in the
+                                     * fast bin. */
+                                    FRF_BZ_TX_PACE_BIN_TH,
+                                    FFE_BZ_TX_PACE_RESERVED);
+               efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+       }
 }
 
 /* Register dump */
index eb0586925b51175bcc9cda3a39991c2a2e29521d..d9de1b647d416b601f62cb1e6a1a6f56f09a4830 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 
 /**
  * struct siena_nic_data - Siena NIC state
- * @fw_version: Management controller firmware version
- * @fw_build: Firmware build number
  * @mcdi: Management-Controller-to-Driver Interface
  * @wol_filter_id: Wake-on-LAN packet filter id
  */
 struct siena_nic_data {
-       u64 fw_version;
-       u32 fw_build;
        struct efx_mcdi_iface mcdi;
        int wol_filter_id;
 };
 
-extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-
 extern struct efx_nic_type falcon_a1_nic_type;
 extern struct efx_nic_type falcon_b0_nic_type;
 extern struct efx_nic_type siena_a0_nic_type;
@@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
 /* MAC/PHY */
 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
 extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
 
 /* Interrupts and test events */
 extern int efx_nic_init_interrupt(struct efx_nic *efx);
index 1dab609757fb5490a7bf8af68f34acc81aac99d4..b3b79472421eaedf1bb9369f4b552d17c39a2714 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index ea3ae00893156b49e0a564cb80a1d460934e0ca2..55f90924247e270f9ec83fdfaef0321834e9bc67 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 96430ed81c36712a3dec2134f35f241f91466a07..cc2c86b76a7bd466133bb5990ef37e5f8768287b 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
 #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
 #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
 
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
 /* DRIVER_EV */
 /* Sub-fields of an RX flush completion event */
 #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
index 3925fd62117754b7b764cbec2c0d3d9fb3ee4970..c0fdb59030fb4b9a3a2907f7742284a42949f5d7 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
  */
 #define EFX_RXD_HEAD_ROOM 2
 
-static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
+/* Offset of ethernet header within page */
+static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+                                            struct efx_rx_buffer *buf)
 {
        /* Offset is always within one page, so we don't need to consider
         * the page order.
         */
-       return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
+       return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+               efx->type->rx_buffer_hash_size);
 }
 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
 {
        return PAGE_SIZE << efx->rx_buffer_order;
 }
 
-static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
 {
+       if (buf->is_page)
+               return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
+       else
+               return ((u8 *)buf->u.skb->data +
+                       efx->type->rx_buffer_hash_size);
+}
+
+static inline u32 efx_rx_buf_hash(const u8 *eh)
+{
+       /* The ethernet header is always directly after any hash. */
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
-       return __le32_to_cpup((const __le32 *)(buf->data - 4));
+       return __le32_to_cpup((const __le32 *)(eh - 4));
 #else
-       const u8 *data = (const u8 *)(buf->data - 4);
+       const u8 *data = eh - 4;
        return ((u32)data[0]       |
                (u32)data[1] << 8  |
                (u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
        struct efx_nic *efx = rx_queue->efx;
        struct net_device *net_dev = efx->net_dev;
        struct efx_rx_buffer *rx_buf;
+       struct sk_buff *skb;
        int skb_len = efx->rx_buffer_len;
        unsigned index, count;
 
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
                index = rx_queue->added_count & rx_queue->ptr_mask;
                rx_buf = efx_rx_buffer(rx_queue, index);
 
-               rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
-               if (unlikely(!rx_buf->skb))
+               rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
+               if (unlikely(!skb))
                        return -ENOMEM;
-               rx_buf->page = NULL;
 
                /* Adjust the SKB for padding and checksum */
-               skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+               skb_reserve(skb, NET_IP_ALIGN);
                rx_buf->len = skb_len - NET_IP_ALIGN;
-               rx_buf->data = (char *)rx_buf->skb->data;
-               rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+               rx_buf->is_page = false;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                rx_buf->dma_addr = pci_map_single(efx->pci_dev,
-                                                 rx_buf->data, rx_buf->len,
+                                                 skb->data, rx_buf->len,
                                                  PCI_DMA_FROMDEVICE);
                if (unlikely(pci_dma_mapping_error(efx->pci_dev,
                                                   rx_buf->dma_addr))) {
-                       dev_kfree_skb_any(rx_buf->skb);
-                       rx_buf->skb = NULL;
+                       dev_kfree_skb_any(skb);
+                       rx_buf->u.skb = NULL;
                        return -EIO;
                }
 
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                index = rx_queue->added_count & rx_queue->ptr_mask;
                rx_buf = efx_rx_buffer(rx_queue, index);
                rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
-               rx_buf->skb = NULL;
-               rx_buf->page = page;
-               rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+               rx_buf->u.page = page;
                rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+               rx_buf->is_page = true;
                ++rx_queue->added_count;
                ++rx_queue->alloc_page_count;
                ++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
                                struct efx_rx_buffer *rx_buf)
 {
-       if (rx_buf->page) {
+       if (rx_buf->is_page && rx_buf->u.page) {
                struct efx_rx_page_state *state;
 
-               EFX_BUG_ON_PARANOID(rx_buf->skb);
-
-               state = page_address(rx_buf->page);
+               state = page_address(rx_buf->u.page);
                if (--state->refcnt == 0) {
                        pci_unmap_page(efx->pci_dev,
                                       state->dma_addr,
                                       efx_rx_buf_size(efx),
                                       PCI_DMA_FROMDEVICE);
                }
-       } else if (likely(rx_buf->skb)) {
+       } else if (!rx_buf->is_page && rx_buf->u.skb) {
                pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
                                 rx_buf->len, PCI_DMA_FROMDEVICE);
        }
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
 static void efx_free_rx_buffer(struct efx_nic *efx,
                               struct efx_rx_buffer *rx_buf)
 {
-       if (rx_buf->page) {
-               __free_pages(rx_buf->page, efx->rx_buffer_order);
-               rx_buf->page = NULL;
-       } else if (likely(rx_buf->skb)) {
-               dev_kfree_skb_any(rx_buf->skb);
-               rx_buf->skb = NULL;
+       if (rx_buf->is_page && rx_buf->u.page) {
+               __free_pages(rx_buf->u.page, efx->rx_buffer_order);
+               rx_buf->u.page = NULL;
+       } else if (!rx_buf->is_page && rx_buf->u.skb) {
+               dev_kfree_skb_any(rx_buf->u.skb);
+               rx_buf->u.skb = NULL;
        }
 }
 
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
 static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
                                    struct efx_rx_buffer *rx_buf)
 {
-       struct efx_rx_page_state *state = page_address(rx_buf->page);
+       struct efx_rx_page_state *state = page_address(rx_buf->u.page);
        struct efx_rx_buffer *new_buf;
        unsigned fill_level, index;
 
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
        }
 
        ++state->refcnt;
-       get_page(rx_buf->page);
+       get_page(rx_buf->u.page);
 
        index = rx_queue->added_count & rx_queue->ptr_mask;
        new_buf = efx_rx_buffer(rx_queue, index);
        new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
-       new_buf->skb = NULL;
-       new_buf->page = rx_buf->page;
-       new_buf->data = (void *)
-               ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+       new_buf->u.page = rx_buf->u.page;
        new_buf->len = rx_buf->len;
+       new_buf->is_page = true;
        ++rx_queue->added_count;
 }
 
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
        struct efx_rx_buffer *new_buf;
        unsigned index;
 
-       if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
-           page_count(rx_buf->page) == 1)
+       if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+           page_count(rx_buf->u.page) == 1)
                efx_resurrect_rx_buffer(rx_queue, rx_buf);
 
        index = rx_queue->added_count & rx_queue->ptr_mask;
        new_buf = efx_rx_buffer(rx_queue, index);
 
        memcpy(new_buf, rx_buf, sizeof(*new_buf));
-       rx_buf->page = NULL;
-       rx_buf->skb = NULL;
+       rx_buf->u.page = NULL;
        ++rx_queue->added_count;
 }
 
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
                 * data at the end of the skb will be trashed. So
                 * we have no choice but to leak the fragment.
                 */
-               *leak_packet = (rx_buf->skb != NULL);
+               *leak_packet = !rx_buf->is_page;
                efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
        } else {
                if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  */
 static void efx_rx_packet_gro(struct efx_channel *channel,
                              struct efx_rx_buffer *rx_buf,
-                             bool checksummed)
+                             const u8 *eh, bool checksummed)
 {
        struct napi_struct *napi = &channel->napi_str;
        gro_result_t gro_result;
 
        /* Pass the skb/page into the GRO engine */
-       if (rx_buf->page) {
+       if (rx_buf->is_page) {
                struct efx_nic *efx = channel->efx;
-               struct page *page = rx_buf->page;
+               struct page *page = rx_buf->u.page;
                struct sk_buff *skb;
 
-               EFX_BUG_ON_PARANOID(rx_buf->skb);
-               rx_buf->page = NULL;
+               rx_buf->u.page = NULL;
 
                skb = napi_get_frags(napi);
                if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
                }
 
                if (efx->net_dev->features & NETIF_F_RXHASH)
-                       skb->rxhash = efx_rx_buf_hash(rx_buf);
+                       skb->rxhash = efx_rx_buf_hash(eh);
 
                skb_shinfo(skb)->frags[0].page = page;
                skb_shinfo(skb)->frags[0].page_offset =
-                       efx_rx_buf_offset(rx_buf);
+                       efx_rx_buf_offset(efx, rx_buf);
                skb_shinfo(skb)->frags[0].size = rx_buf->len;
                skb_shinfo(skb)->nr_frags = 1;
 
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
 
                gro_result = napi_gro_frags(napi);
        } else {
-               struct sk_buff *skb = rx_buf->skb;
+               struct sk_buff *skb = rx_buf->u.skb;
 
-               EFX_BUG_ON_PARANOID(!skb);
                EFX_BUG_ON_PARANOID(!checksummed);
-               rx_buf->skb = NULL;
+               rx_buf->u.skb = NULL;
 
                gro_result = napi_gro_receive(napi, skb);
        }
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
        bool leak_packet = false;
 
        rx_buf = efx_rx_buffer(rx_queue, index);
-       EFX_BUG_ON_PARANOID(!rx_buf->data);
-       EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
-       EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
 
        /* This allows the refill path to post another buffer.
         * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.
         */
-       prefetch(rx_buf->data);
+       prefetch(efx_rx_buf_eh(efx, rx_buf));
 
        /* Pipeline receives so that we give time for packet headers to be
         * prefetched into cache.
         */
-       rx_buf->len = len;
+       rx_buf->len = len - efx->type->rx_buffer_hash_size;
 out:
        if (channel->rx_pkt)
                __efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
 {
        struct efx_nic *efx = channel->efx;
        struct sk_buff *skb;
-
-       rx_buf->data += efx->type->rx_buffer_hash_size;
-       rx_buf->len -= efx->type->rx_buffer_hash_size;
+       u8 *eh = efx_rx_buf_eh(efx, rx_buf);
 
        /* If we're in loopback test, then pass the packet directly to the
         * loopback layer, and free the rx_buf here
         */
        if (unlikely(efx->loopback_selftest)) {
-               efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+               efx_loopback_rx_packet(efx, eh, rx_buf->len);
                efx_free_rx_buffer(efx, rx_buf);
                return;
        }
 
-       if (rx_buf->skb) {
-               prefetch(skb_shinfo(rx_buf->skb));
+       if (!rx_buf->is_page) {
+               skb = rx_buf->u.skb;
 
-               skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
-               skb_put(rx_buf->skb, rx_buf->len);
+               prefetch(skb_shinfo(skb));
+
+               skb_reserve(skb, efx->type->rx_buffer_hash_size);
+               skb_put(skb, rx_buf->len);
 
                if (efx->net_dev->features & NETIF_F_RXHASH)
-                       rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+                       skb->rxhash = efx_rx_buf_hash(eh);
 
                /* Move past the ethernet header. rx_buf->data still points
                 * at the ethernet header */
-               rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
-                                                      efx->net_dev);
+               skb->protocol = eth_type_trans(skb, efx->net_dev);
 
-               skb_record_rx_queue(rx_buf->skb, channel->channel);
+               skb_record_rx_queue(skb, channel->channel);
        }
 
-       if (likely(checksummed || rx_buf->page)) {
-               efx_rx_packet_gro(channel, rx_buf, checksummed);
+       if (likely(checksummed || rx_buf->is_page)) {
+               efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
                return;
        }
 
        /* We now own the SKB */
-       skb = rx_buf->skb;
-       rx_buf->skb = NULL;
-       EFX_BUG_ON_PARANOID(!skb);
+       skb = rx_buf->u.skb;
+       rx_buf->u.skb = NULL;
 
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
index 0ebfb99f12991fc0e4a7bfe3b17799c736834798..a0f49b348d62872be7512019083badfc50ab9db8 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
                        goto out;
                }
 
-               /* Test both types of TX queue */
+               /* Test all enabled types of TX queue */
                efx_for_each_channel_tx_queue(tx_queue, channel) {
                        state->offload_csum = (tx_queue->queue &
                                               EFX_TXQ_TYPE_OFFLOAD);
index aed495a4dad7c7756a7dd6e67a0ba4d5156aad1e..dba5456e70f354796f57591f8d9cd0eff100273f 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index bf8456176443a5a12bc2fb4bdb31a6c704d37f7e..e4dd8986b1feaa6c5b848dc8f8c948c4305592d8 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
        if (rc)
                goto fail1;
 
-       rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
-       if (rc) {
-               netif_err(efx, probe, efx->net_dev,
-                         "Failed to read MCPU firmware version - rc %d\n", rc);
-               goto fail1; /* MCPU absent? */
-       }
-
        /* Let the BMC know that the driver is now in charge of link and
         * filter settings. We must do this before we reset the NIC */
        rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
               FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
        efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 
-       if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
-               /* No MCDI operation has been defined to set thresholds */
-               netif_err(efx, hw, efx->net_dev,
-                         "ignoring RX flow control thresholds\n");
-
        /* Enable event logging */
        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
        if (rc)
@@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
        efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
 }
 
-void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
-{
-       struct siena_nic_data *nic_data = efx->nic_data;
-       snprintf(buf, len, "%u.%u.%u.%u",
-                (unsigned int)(nic_data->fw_version >> 48),
-                (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
-                (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
-                (unsigned int)(nic_data->fw_version & 0xffff));
-}
-
 /**************************************************************************
  *
  * Wake on LAN
index 879b7f6bde3dcf90d2433a8b202d3de791683360..71f2e3ebe1c7a41b6ea644041949434d310231cf 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index f102912eba910552791a5199ae833bd8dc35706a..efdceb35aaae6a406b8d4d8f366e7e3187d0beef 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
                if (rc < 0)
                        return rc;
 
-               rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
+               rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
                if (rc < 0)
                        return rc;
        }
index 2f5e9da657bf7cc66d1ad945d0c69340f19dada0..13980190821762aa47a31729b688e5b36001ef77 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_tx_queue *tx_queue;
+       unsigned index, type;
 
        if (unlikely(efx->port_inhibited))
                return NETDEV_TX_BUSY;
 
-       tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
-                                   skb->ip_summed == CHECKSUM_PARTIAL ?
-                                   EFX_TXQ_TYPE_OFFLOAD : 0);
+       index = skb_get_queue_mapping(skb);
+       type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+       if (index >= efx->n_tx_channels) {
+               index -= efx->n_tx_channels;
+               type |= EFX_TXQ_TYPE_HIGHPRI;
+       }
+       tx_queue = efx_get_tx_queue(efx, index, type);
 
        return efx_enqueue_skb(tx_queue, skb);
 }
 
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+       struct efx_nic *efx = tx_queue->efx;
+
+       /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+       tx_queue->core_txq =
+               netdev_get_tx_queue(efx->net_dev,
+                                   tx_queue->queue / EFX_TXQ_TYPES +
+                                   ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+                                    efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       unsigned tc;
+       int rc;
+
+       if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+               return -EINVAL;
+
+       if (num_tc == net_dev->num_tc)
+               return 0;
+
+       for (tc = 0; tc < num_tc; tc++) {
+               net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+               net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+       }
+
+       if (num_tc > net_dev->num_tc) {
+               /* Initialise high-priority queues as necessary */
+               efx_for_each_channel(channel, efx) {
+                       efx_for_each_possible_channel_tx_queue(tx_queue,
+                                                              channel) {
+                               if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+                                       continue;
+                               if (!tx_queue->buffer) {
+                                       rc = efx_probe_tx_queue(tx_queue);
+                                       if (rc)
+                                               return rc;
+                               }
+                               if (!tx_queue->initialised)
+                                       efx_init_tx_queue(tx_queue);
+                               efx_init_tx_queue_core_txq(tx_queue);
+                       }
+               }
+       } else {
+               /* Reduce number of classes before number of queues */
+               net_dev->num_tc = num_tc;
+       }
+
+       rc = netif_set_real_num_tx_queues(net_dev,
+                                         max_t(int, num_tc, 1) *
+                                         efx->n_tx_channels);
+       if (rc)
+               return rc;
+
+       /* Do not destroy high-priority queues when they become
+        * unused.  We would have to flush them first, and it is
+        * fairly difficult to flush a subset of TX queues.  Leave
+        * it to efx_fini_channels().
+        */
+
+       net_dev->num_tc = num_tc;
+       return 0;
+}
+
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 
        /* Set up TX descriptor ring */
        efx_nic_init_tx(tx_queue);
+
+       tx_queue->initialised = true;
 }
 
 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       if (!tx_queue->initialised)
+               return;
+
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
                  "shutting down TX queue %d\n", tx_queue->queue);
 
+       tx_queue->initialised = false;
+
        /* Flush TX queue, remove descriptor ring */
        efx_nic_fini_tx(tx_queue);
 
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       if (!tx_queue->buffer)
+               return;
+
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
                  "destroying TX queue %d\n", tx_queue->queue);
        efx_nic_remove_tx(tx_queue);
index 351794a79215d071aa39ab468b93681d0c7ff205..d9886addcc995f03420d43fb6bff2d06c62f4551 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
                goto fail;
 
        /* Check that all the MMDs we expect are present and responding. */
-       rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
+       rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
        if (rc < 0)
                goto fail;
 
index e0d63083c3a866081c8fcea67760cca575a65dc7..e4dd3a7f304b5b70e874530fda18d251cc4515b1 100644 (file)
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
index 819c1750e2abf750cbb73ad779dd695a1464326d..095e525808841843f6075b9a21171aaa3573b703 100644 (file)
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/ethtool.h>
 #include <asm/cacheflush.h>
 
 #include "sh_eth.h"
 
+#define SH_ETH_DEF_MSG_ENABLE \
+               (NETIF_MSG_LINK | \
+               NETIF_MSG_TIMER | \
+               NETIF_MSG_RX_ERR| \
+               NETIF_MSG_TX_ERR)
+
 /* There is CPU dependent code */
 #if defined(CONFIG_CPU_SUBTYPE_SH7724)
 #define SH_ETH_RESET_DEFAULT   1
@@ -817,6 +824,20 @@ static int sh_eth_rx(struct net_device *ndev)
        return 0;
 }
 
+static void sh_eth_rcv_snd_disable(u32 ioaddr)
+{
+       /* disable tx and rx */
+       writel(readl(ioaddr + ECMR) &
+               ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(u32 ioaddr)
+{
+       /* enable tx and rx */
+       writel(readl(ioaddr + ECMR) |
+               (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+}
+
 /* error control function */
 static void sh_eth_error(struct net_device *ndev, int intr_status)
 {
@@ -843,11 +864,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                                if (mdp->ether_link_active_low)
                                        link_stat = ~link_stat;
                        }
-                       if (!(link_stat & PHY_ST_LINK)) {
-                               /* Link Down : disable tx and rx */
-                               writel(readl(ioaddr + ECMR) &
-                                         ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
-                       } else {
+                       if (!(link_stat & PHY_ST_LINK))
+                               sh_eth_rcv_snd_disable(ioaddr);
+                       else {
                                /* Link Up */
                                writel(readl(ioaddr + EESIPR) &
                                          ~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                                writel(readl(ioaddr + EESIPR) |
                                          DMAC_M_ECI, ioaddr + EESIPR);
                                /* enable tx and rx */
-                               writel(readl(ioaddr + ECMR) |
-                                         (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+                               sh_eth_rcv_snd_enable(ioaddr);
                        }
                }
        }
@@ -867,6 +885,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                /* Write buck end. unused write back interrupt */
                if (intr_status & EESR_TABT)    /* Transmit Abort int */
                        mdp->stats.tx_aborted_errors++;
+                       if (netif_msg_tx_err(mdp))
+                               dev_err(&ndev->dev, "Transmit Abort\n");
        }
 
        if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                if (intr_status & EESR_RFRMER) {
                        /* Receive Frame Overflow int */
                        mdp->stats.rx_frame_errors++;
-                       dev_err(&ndev->dev, "Receive Frame Overflow\n");
+                       if (netif_msg_rx_err(mdp))
+                               dev_err(&ndev->dev, "Receive Abort\n");
                }
        }
 
-       if (!mdp->cd->no_ade) {
-               if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
-                   intr_status & EESR_TFE)
-                       mdp->stats.tx_fifo_errors++;
+       if (intr_status & EESR_TDE) {
+               /* Transmit Descriptor Empty int */
+               mdp->stats.tx_fifo_errors++;
+               if (netif_msg_tx_err(mdp))
+                       dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+       }
+
+       if (intr_status & EESR_TFE) {
+               /* FIFO under flow */
+               mdp->stats.tx_fifo_errors++;
+               if (netif_msg_tx_err(mdp))
+                       dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
        }
 
        if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
 
                if (readl(ioaddr + EDRRR) ^ EDRRR_R)
                        writel(EDRRR_R, ioaddr + EDRRR);
-               dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+               if (netif_msg_rx_err(mdp))
+                       dev_err(&ndev->dev, "Receive Descriptor Empty\n");
        }
+
        if (intr_status & EESR_RFE) {
                /* Receive FIFO Overflow int */
                mdp->stats.rx_fifo_errors++;
-               dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+               if (netif_msg_rx_err(mdp))
+                       dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+       }
+
+       if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+               /* Address Error */
+               mdp->stats.tx_fifo_errors++;
+               if (netif_msg_tx_err(mdp))
+                       dev_err(&ndev->dev, "Address Error\n");
        }
 
        mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                mdp->duplex = -1;
        }
 
-       if (new_state)
+       if (new_state && netif_msg_link(mdp))
                phy_print_status(phydev);
 }
 
@@ -1063,6 +1102,132 @@ static int sh_eth_phy_start(struct net_device *ndev)
        return 0;
 }
 
+static int sh_eth_get_settings(struct net_device *ndev,
+                       struct ethtool_cmd *ecmd)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&mdp->lock, flags);
+       ret = phy_ethtool_gset(mdp->phydev, ecmd);
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
+       return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+               struct ethtool_cmd *ecmd)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long flags;
+       int ret;
+       u32 ioaddr = ndev->base_addr;
+
+       spin_lock_irqsave(&mdp->lock, flags);
+
+       /* disable tx and rx */
+       sh_eth_rcv_snd_disable(ioaddr);
+
+       ret = phy_ethtool_sset(mdp->phydev, ecmd);
+       if (ret)
+               goto error_exit;
+
+       if (ecmd->duplex == DUPLEX_FULL)
+               mdp->duplex = 1;
+       else
+               mdp->duplex = 0;
+
+       if (mdp->cd->set_duplex)
+               mdp->cd->set_duplex(ndev);
+
+error_exit:
+       mdelay(1);
+
+       /* enable tx and rx */
+       sh_eth_rcv_snd_enable(ioaddr);
+
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
+       return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&mdp->lock, flags);
+       ret = phy_start_aneg(mdp->phydev);
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
+       return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+       "rx_current", "tx_current",
+       "rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return SH_ETH_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+                       struct ethtool_stats *stats, u64 *data)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int i = 0;
+
+       /* device-specific stats */
+       data[i++] = mdp->cur_rx;
+       data[i++] = mdp->cur_tx;
+       data[i++] = mdp->dirty_rx;
+       data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+       switch (stringset) {
+       case ETH_SS_STATS:
+               memcpy(data, *sh_eth_gstrings_stats,
+                                       sizeof(sh_eth_gstrings_stats));
+               break;
+       }
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+       .get_settings   = sh_eth_get_settings,
+       .set_settings   = sh_eth_set_settings,
+       .nway_reset             = sh_eth_nway_reset,
+       .get_msglevel   = sh_eth_get_msglevel,
+       .set_msglevel   = sh_eth_set_msglevel,
+       .get_link               = ethtool_op_get_link,
+       .get_strings    = sh_eth_get_strings,
+       .get_ethtool_stats  = sh_eth_get_ethtool_stats,
+       .get_sset_count     = sh_eth_get_sset_count,
+};
+
 /* network device open function */
 static int sh_eth_open(struct net_device *ndev)
 {
@@ -1073,8 +1238,8 @@ static int sh_eth_open(struct net_device *ndev)
 
        ret = request_irq(ndev->irq, sh_eth_interrupt,
 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7764) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7757)
+       defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+       defined(CONFIG_CPU_SUBTYPE_SH7757)
                                IRQF_SHARED,
 #else
                                0,
@@ -1123,8 +1288,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
 
        netif_stop_queue(ndev);
 
-       /* worning message out. */
-       printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
+       if (netif_msg_timer(mdp))
+               dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
               " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
 
        /* tx_errors count up */
@@ -1167,6 +1332,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        spin_lock_irqsave(&mdp->lock, flags);
        if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
                if (!sh_eth_txfree(ndev)) {
+                       if (netif_msg_tx_queued(mdp))
+                               dev_warn(&ndev->dev, "TxFD exhausted.\n");
                        netif_stop_queue(ndev);
                        spin_unlock_irqrestore(&mdp->lock, flags);
                        return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 
        /* set function */
        ndev->netdev_ops = &sh_eth_netdev_ops;
+       SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
        ndev->watchdog_timeo = TX_TIMEOUT;
 
+       /* debug message level */
+       mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
        mdp->post_rx = POST_RX >> (devno << 1);
        mdp->post_fw = POST_FW >> (devno << 1);
 
index 640e368ebeee9f60f7543e9290f2130ad1f21c41..84d4167eee9ad83a3c40b99f4221201aacde40f3 100644 (file)
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        sis_priv->mii_info.reg_num_mask = 0x1f;
 
        /* Get Mac address according to the chip revision */
-       pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
+       sis_priv->chipset_rev = pci_dev->revision;
        if(netif_msg_probe(sis_priv))
                printk(KERN_DEBUG "%s: detected revision %2.2x, "
                                "trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
        /* save our host bridge revision */
        dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
        if (dev) {
-               pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev);
+               sis_priv->host_bridge_rev = dev->revision;
                pci_dev_put(dev);
        }
 
index 726df611ee17da997b31a8219de2554080fdeed7..43654a3bb0ec81591cb6d55302b984fab7782066 100644 (file)
@@ -81,6 +81,7 @@ static const char version[] =
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/workqueue.h>
+#include <linux/of.h>
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+       { .compatible = "smsc,lan91c94", },
+       { .compatible = "smsc,lan91c111", },
+       {},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
 static struct dev_pm_ops smc_drv_pm_ops = {
        .suspend        = smc_drv_suspend,
        .resume         = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
                .name   = CARDNAME,
                .owner  = THIS_MODULE,
                .pm     = &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+               .of_match_table = smc91x_match,
+#endif
        },
 };
 
index 1c5408f83937a47716ed561181e262b0009c95b7..c1a344829b54827b6a39391e558c9ed63f334e3d 100644 (file)
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
 
        if (txmac_stat & MAC_TXSTAT_URUN) {
                netdev_err(dev, "TX MAC xmit underrun\n");
-               gp->net_stats.tx_fifo_errors++;
+               dev->stats.tx_fifo_errors++;
        }
 
        if (txmac_stat & MAC_TXSTAT_MPE) {
                netdev_err(dev, "TX MAC max packet size error\n");
-               gp->net_stats.tx_errors++;
+               dev->stats.tx_errors++;
        }
 
        /* The rest are all cases of one of the 16-bit TX
         * counters expiring.
         */
        if (txmac_stat & MAC_TXSTAT_NCE)
-               gp->net_stats.collisions += 0x10000;
+               dev->stats.collisions += 0x10000;
 
        if (txmac_stat & MAC_TXSTAT_ECE) {
-               gp->net_stats.tx_aborted_errors += 0x10000;
-               gp->net_stats.collisions += 0x10000;
+               dev->stats.tx_aborted_errors += 0x10000;
+               dev->stats.collisions += 0x10000;
        }
 
        if (txmac_stat & MAC_TXSTAT_LCE) {
-               gp->net_stats.tx_aborted_errors += 0x10000;
-               gp->net_stats.collisions += 0x10000;
+               dev->stats.tx_aborted_errors += 0x10000;
+               dev->stats.collisions += 0x10000;
        }
 
        /* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
                u32 smac = readl(gp->regs + MAC_SMACHINE);
 
                netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
-               gp->net_stats.rx_over_errors++;
-               gp->net_stats.rx_fifo_errors++;
+               dev->stats.rx_over_errors++;
+               dev->stats.rx_fifo_errors++;
 
                ret = gem_rxmac_reset(gp);
        }
 
        if (rxmac_stat & MAC_RXSTAT_ACE)
-               gp->net_stats.rx_frame_errors += 0x10000;
+               dev->stats.rx_frame_errors += 0x10000;
 
        if (rxmac_stat & MAC_RXSTAT_CCE)
-               gp->net_stats.rx_crc_errors += 0x10000;
+               dev->stats.rx_crc_errors += 0x10000;
 
        if (rxmac_stat & MAC_RXSTAT_LCE)
-               gp->net_stats.rx_length_errors += 0x10000;
+               dev->stats.rx_length_errors += 0x10000;
 
        /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
         * events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
                if (netif_msg_rx_err(gp))
                        printk(KERN_DEBUG "%s: no buffer for rx frame\n",
                                gp->dev->name);
-               gp->net_stats.rx_dropped++;
+               dev->stats.rx_dropped++;
        }
 
        if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
                if (netif_msg_rx_err(gp))
                        printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
                                gp->dev->name);
-               gp->net_stats.rx_errors++;
+               dev->stats.rx_errors++;
 
                goto do_reset;
        }
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
                                break;
                }
                gp->tx_skbs[entry] = NULL;
-               gp->net_stats.tx_bytes += skb->len;
+               dev->stats.tx_bytes += skb->len;
 
                for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
                        txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
                        entry = NEXT_TX(entry);
                }
 
-               gp->net_stats.tx_packets++;
+               dev->stats.tx_packets++;
                dev_kfree_skb_irq(skb);
        }
        gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
 
 static int gem_rx(struct gem *gp, int work_to_do)
 {
+       struct net_device *dev = gp->dev;
        int entry, drops, work_done = 0;
        u32 done;
        __sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
 
                len = (status & RXDCTRL_BUFSZ) >> 16;
                if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
-                       gp->net_stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (len < ETH_ZLEN)
-                               gp->net_stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        if (len & RXDCTRL_BAD)
-                               gp->net_stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
 
                        /* We'll just return it to GEM. */
                drop_it:
-                       gp->net_stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        goto next;
                }
 
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
 
                netif_receive_skb(skb);
 
-               gp->net_stats.rx_packets++;
-               gp->net_stats.rx_bytes += len;
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
 
        next:
                entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
 static struct net_device_stats *gem_get_stats(struct net_device *dev)
 {
        struct gem *gp = netdev_priv(dev);
-       struct net_device_stats *stats = &gp->net_stats;
 
        spin_lock_irq(&gp->lock);
        spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
         * so we shield against this
         */
        if (gp->running) {
-               stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+               dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
                writel(0, gp->regs + MAC_FCSERR);
 
-               stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+               dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
                writel(0, gp->regs + MAC_AERR);
 
-               stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+               dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
                writel(0, gp->regs + MAC_LERR);
 
-               stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
-               stats->collisions +=
+               dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+               dev->stats.collisions +=
                        (readl(gp->regs + MAC_ECOLL) +
                         readl(gp->regs + MAC_LCOLL));
                writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
        spin_unlock(&gp->tx_lock);
        spin_unlock_irq(&gp->lock);
 
-       return &gp->net_stats;
+       return &dev->stats;
 }
 
 static int gem_set_mac_address(struct net_device *dev, void *addr)
index 19905460def692c23fd9928d991c2b70728cc282..ede017872367d5853ed8e3177d73b9791bb3e3c4 100644 (file)
@@ -994,7 +994,6 @@ struct gem {
        u32                     status;
 
        struct napi_struct      napi;
-       struct net_device_stats net_stats;
 
        int                     tx_fifo_sz;
        int                     rx_fifo_sz;
index 06c0e50336563eb415165a127dc6ec716cc0431e..ebec88882c3b07fdbfb37e312546b33431bf9b40 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    116
+#define TG3_MIN_NUM                    117
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "December 3, 2010"
+#define DRV_MODULE_RELDATE     "January 25, 2011"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -1776,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
                tg3_phy_cl45_read(tp, MDIO_MMD_AN,
                                  TG3_CL45_D7_EEERES_STAT, &val);
 
-               if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
-                   val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+               switch (val) {
+               case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+                       switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+                       case ASIC_REV_5717:
+                       case ASIC_REV_5719:
+                       case ASIC_REV_57765:
+                               /* Enable SM_DSP clock and tx 6dB coding. */
+                               val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+                                     MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+                                     MII_TG3_AUXCTL_ACTL_TX_6DB;
+                               tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+                               tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+                               /* Turn off SM_DSP clock. */
+                               val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+                                     MII_TG3_AUXCTL_ACTL_TX_6DB;
+                               tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+                       }
+                       /* Fallthrough */
+               case TG3_CL45_D7_EEERES_STAT_LP_100TX:
                        tp->setlpicnt = 2;
+               }
        }
 
        if (!tp->setlpicnt) {
@@ -2100,7 +2120,7 @@ out:
 
 static void tg3_frob_aux_power(struct tg3 *tp)
 {
-       struct tg3 *tp_peer = tp;
+       bool need_vaux = false;
 
        /* The GPIOs do something completely different on 57765. */
        if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
@@ -2108,23 +2128,32 @@ static void tg3_frob_aux_power(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                return;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
+            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
+           tp->pdev_peer != tp->pdev) {
                struct net_device *dev_peer;
 
                dev_peer = pci_get_drvdata(tp->pdev_peer);
+
                /* remove_one() may have been run on the peer. */
-               if (!dev_peer)
-                       tp_peer = tp;
-               else
-                       tp_peer = netdev_priv(dev_peer);
+               if (dev_peer) {
+                       struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+                       if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
+                               return;
+
+                       if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
+                           (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
+                               need_vaux = true;
+               }
        }
 
-       if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
-           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
-           (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
-           (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
+       if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
+           (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+               need_vaux = true;
+
+       if (need_vaux) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
@@ -2154,10 +2183,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
                        u32 no_gpio2;
                        u32 grc_local_ctrl = 0;
 
-                       if (tp_peer != tp &&
-                           (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
-                               return;
-
                        /* Workaround to prevent overdrawing Amps. */
                        if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
                            ASIC_REV_5714) {
@@ -2196,10 +2221,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
        } else {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
-                       if (tp_peer != tp &&
-                           (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
-                               return;
-
                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
                                    (GRC_LCLCTRL_GPIO_OE1 |
                                     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
@@ -2968,11 +2989,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
                      MII_TG3_AUXCTL_ACTL_TX_6DB;
                tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 
-               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
-                   !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
-                       tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
-                                        val | MII_TG3_DSP_CH34TP2_HIBW01);
+               switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+               case ASIC_REV_5717:
+               case ASIC_REV_57765:
+                       if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+                               tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+                                                MII_TG3_DSP_CH34TP2_HIBW01);
+                       /* Fall through */
+               case ASIC_REV_5719:
+                       val = MII_TG3_DSP_TAP26_ALNOKO |
+                             MII_TG3_DSP_TAP26_RMRXSTO |
+                             MII_TG3_DSP_TAP26_OPCSINPT;
+                       tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+               }
 
                val = 0;
                if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -7801,7 +7830,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
 
                tw32_f(TG3_CPMU_EEE_DBTMR2,
-                      TG3_CPMU_DBTMR1_APE_TX_2047US |
+                      TG3_CPMU_DBTMR2_APE_TX_2047US |
                       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
        }
 
@@ -8075,8 +8104,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Program the jumbo buffer descriptor ring control
         * blocks on those devices that have them.
         */
-       if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
-           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+           ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
                /* Setup replenish threshold. */
                tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
 
@@ -8163,10 +8193,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                              RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
                              RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
 
-       /* If statement applies to 5705 and 5750 PCI devices only */
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
-           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+           tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
                if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
                        rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
@@ -8194,8 +8222,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
            (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
                val = tr32(TG3_RDMA_RSRVCTRL_REG);
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
-                       val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
-                       val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+                       val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+                                TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+                                TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+                       val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+                              TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+                              TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
                }
                tw32(TG3_RDMA_RSRVCTRL_REG,
                     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8317,7 +8349,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
 
-       if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+       if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+               tp->irq_cnt > 1) {
                val = tr32(MSGINT_MODE);
                val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
                tw32(MSGINT_MODE, val);
@@ -8334,17 +8367,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
               WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
               WDMAC_MODE_LNGREAD_ENAB);
 
-       /* If statement applies to 5705 and 5750 PCI devices only */
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
-            tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+           tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
                if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
                    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
                     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
                        /* nothing */
                } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
-                          !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
-                          !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+                          !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
                        val |= WDMAC_MODE_RX_ACCEL;
                }
        }
@@ -9057,7 +9087,8 @@ static void tg3_ints_init(struct tg3 *tp)
 
        if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
                u32 msi_mode = tr32(MSGINT_MODE);
-               if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+               if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+                   tp->irq_cnt > 1)
                        msi_mode |= MSGINT_MODE_MULTIVEC_EN;
                tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
        }
@@ -10452,16 +10483,53 @@ static int tg3_test_nvram(struct tg3 *tp)
                goto out;
        }
 
+       err = -EIO;
+
        /* Bootstrap checksum at offset 0x10 */
        csum = calc_crc((unsigned char *) buf, 0x10);
-       if (csum != be32_to_cpu(buf[0x10/4]))
+       if (csum != le32_to_cpu(buf[0x10/4]))
                goto out;
 
        /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
        csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
-       if (csum != be32_to_cpu(buf[0xfc/4]))
+       if (csum != le32_to_cpu(buf[0xfc/4]))
                goto out;
 
+       for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
+               /* The data is in little-endian format in NVRAM.
+                * Use the big-endian read routines to preserve
+                * the byte order as it exists in NVRAM.
+                */
+               if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
+                       goto out;
+       }
+
+       i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
+                            PCI_VPD_LRDT_RO_DATA);
+       if (i > 0) {
+               j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+               if (j < 0)
+                       goto out;
+
+               if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
+                       goto out;
+
+               i += PCI_VPD_LRDT_TAG_SIZE;
+               j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+                                             PCI_VPD_RO_KEYWORD_CHKSUM);
+               if (j > 0) {
+                       u8 csum8 = 0;
+
+                       j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+                       for (i = 0; i <= j; i++)
+                               csum8 += ((u8 *)buf)[i];
+
+                       if (csum8)
+                               goto out;
+               }
+       }
+
        err = 0;
 
 out:
@@ -10833,13 +10901,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
        if (loopback_mode == TG3_MAC_LOOPBACK) {
                /* HW errata - mac loopback fails in some cases on 5780.
                 * Normal traffic and PHY loopback are not affected by
-                * errata.
+                * errata.  Also, the MAC loopback test is deprecated for
+                * all newer ASIC revisions.
                 */
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+                   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
                        return 0;
 
-               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
-                          MAC_MODE_PORT_INT_LPBACK;
+               mac_mode = tp->mac_mode &
+                          ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+               mac_mode |= MAC_MODE_PORT_INT_LPBACK;
                if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                        mac_mode |= MAC_MODE_LINK_POLARITY;
                if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10861,7 +10932,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                tg3_writephy(tp, MII_BMCR, val);
                udelay(40);
 
-               mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+               mac_mode = tp->mac_mode &
+                          ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
                if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                        tg3_writephy(tp, MII_TG3_FET_PTEST,
                                     MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10889,6 +10961,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                                     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
                }
                tw32(MAC_MODE, mac_mode);
+
+               /* Wait for link */
+               for (i = 0; i < 100; i++) {
+                       if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+                               break;
+                       mdelay(1);
+               }
        } else {
                return -EINVAL;
        }
@@ -10995,14 +11074,19 @@ out:
 static int tg3_test_loopback(struct tg3 *tp)
 {
        int err = 0;
-       u32 cpmuctrl = 0;
+       u32 eee_cap, cpmuctrl = 0;
 
        if (!netif_running(tp->dev))
                return TG3_LOOPBACK_FAILED;
 
+       eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+       tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
        err = tg3_reset_hw(tp, 1);
-       if (err)
-               return TG3_LOOPBACK_FAILED;
+       if (err) {
+               err = TG3_LOOPBACK_FAILED;
+               goto done;
+       }
 
        /* Turn off gphy autopowerdown. */
        if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11022,8 +11106,10 @@ static int tg3_test_loopback(struct tg3 *tp)
                        udelay(10);
                }
 
-               if (status != CPMU_MUTEX_GNT_DRIVER)
-                       return TG3_LOOPBACK_FAILED;
+               if (status != CPMU_MUTEX_GNT_DRIVER) {
+                       err = TG3_LOOPBACK_FAILED;
+                       goto done;
+               }
 
                /* Turn off link-based power management. */
                cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11052,6 +11138,9 @@ static int tg3_test_loopback(struct tg3 *tp)
        if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
                tg3_phy_toggle_apd(tp, true);
 
+done:
+       tp->phy_flags |= eee_cap;
+
        return err;
 }
 
@@ -12407,9 +12496,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
        }
 done:
-       device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
-       device_set_wakeup_enable(&tp->pdev->dev,
+       if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+               device_set_wakeup_enable(&tp->pdev->dev,
                                 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+       else
+               device_set_wakeup_capable(&tp->pdev->dev, false);
 }
 
 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -12461,12 +12552,45 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
        return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
 }
 
+static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+{
+       u32 adv = ADVERTISED_Autoneg |
+                 ADVERTISED_Pause;
+
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+               adv |= ADVERTISED_1000baseT_Half |
+                      ADVERTISED_1000baseT_Full;
+
+       if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+               adv |= ADVERTISED_100baseT_Half |
+                      ADVERTISED_100baseT_Full |
+                      ADVERTISED_10baseT_Half |
+                      ADVERTISED_10baseT_Full |
+                      ADVERTISED_TP;
+       else
+               adv |= ADVERTISED_FIBRE;
+
+       tp->link_config.advertising = adv;
+       tp->link_config.speed = SPEED_INVALID;
+       tp->link_config.duplex = DUPLEX_INVALID;
+       tp->link_config.autoneg = AUTONEG_ENABLE;
+       tp->link_config.active_speed = SPEED_INVALID;
+       tp->link_config.active_duplex = DUPLEX_INVALID;
+       tp->link_config.orig_speed = SPEED_INVALID;
+       tp->link_config.orig_duplex = DUPLEX_INVALID;
+       tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
 static int __devinit tg3_phy_probe(struct tg3 *tp)
 {
        u32 hw_phy_id_1, hw_phy_id_2;
        u32 hw_phy_id, hw_phy_id_masked;
        int err;
 
+       /* flow control autonegotiation is default behavior */
+       tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+       tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
                return tg3_phy_init(tp);
 
@@ -12528,6 +12652,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
              tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
                tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
 
+       tg3_phy_init_link_config(tp);
+
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12583,17 +12709,6 @@ skip_phy_reset:
                err = tg3_init_5401phy_dsp(tp);
        }
 
-       if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
-               tp->link_config.advertising =
-                       (ADVERTISED_1000baseT_Half |
-                        ADVERTISED_1000baseT_Full |
-                        ADVERTISED_Autoneg |
-                        ADVERTISED_FIBRE);
-       if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
-               tp->link_config.advertising &=
-                       ~(ADVERTISED_1000baseT_Half |
-                         ADVERTISED_1000baseT_Full);
-
        return err;
 }
 
@@ -13020,7 +13135,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
                return 512;
 }
 
-DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
@@ -13262,7 +13377,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        }
 
        /* Determine TSO capabilities */
-       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+               ; /* Do nothing. HW bug. */
+       else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
                tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
        else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13313,7 +13430,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
        }
 
-       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+       if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
                tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13331,42 +13449,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 
                tp->pcie_readrq = 4096;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
-                       u16 word;
-
-                       pci_read_config_word(tp->pdev,
-                                            tp->pcie_cap + PCI_EXP_LNKSTA,
-                                            &word);
-                       switch (word & PCI_EXP_LNKSTA_CLS) {
-                       case PCI_EXP_LNKSTA_CLS_2_5GB:
-                               word &= PCI_EXP_LNKSTA_NLW;
-                               word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
-                               switch (word) {
-                               case 2:
-                                       tp->pcie_readrq = 2048;
-                                       break;
-                               case 4:
-                                       tp->pcie_readrq = 1024;
-                                       break;
-                               }
-                               break;
-
-                       case PCI_EXP_LNKSTA_CLS_5_0GB:
-                               word &= PCI_EXP_LNKSTA_NLW;
-                               word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
-                               switch (word) {
-                               case 1:
-                                       tp->pcie_readrq = 2048;
-                                       break;
-                               case 2:
-                                       tp->pcie_readrq = 1024;
-                                       break;
-                               case 4:
-                                       tp->pcie_readrq = 512;
-                                       break;
-                               }
-                       }
-               }
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+                       tp->pcie_readrq = 2048;
 
                pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
@@ -13405,7 +13489,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         * every mailbox register write to force the writes to be
         * posted to the chip in order.
         */
-       if (pci_dev_present(write_reorder_chipsets) &&
+       if (pci_dev_present(tg3_write_reorder_chipsets) &&
            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
                tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
 
@@ -14161,7 +14245,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
 
 #define TEST_BUFFER_SIZE       0x2000
 
-DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
        { },
 };
@@ -14340,7 +14424,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
                 * now look for chipsets that are known to expose the
                 * DMA bug without failing the test.
                 */
-               if (pci_dev_present(dma_wait_state_chipsets)) {
+               if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
                        tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
                        tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
                } else {
@@ -14357,23 +14441,6 @@ out_nofree:
        return ret;
 }
 
-static void __devinit tg3_init_link_config(struct tg3 *tp)
-{
-       tp->link_config.advertising =
-               (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
-                ADVERTISED_Autoneg | ADVERTISED_MII);
-       tp->link_config.speed = SPEED_INVALID;
-       tp->link_config.duplex = DUPLEX_INVALID;
-       tp->link_config.autoneg = AUTONEG_ENABLE;
-       tp->link_config.active_speed = SPEED_INVALID;
-       tp->link_config.active_duplex = DUPLEX_INVALID;
-       tp->link_config.orig_speed = SPEED_INVALID;
-       tp->link_config.orig_duplex = DUPLEX_INVALID;
-       tp->link_config.orig_autoneg = AUTONEG_INVALID;
-}
-
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
        if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
@@ -14677,8 +14744,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_free_dev;
        }
 
-       tg3_init_link_config(tp);
-
        tp->rx_pending = TG3_DEF_RX_RING_PENDING;
        tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
 
@@ -14826,10 +14891,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_apeunmap;
        }
 
-       /* flow control autonegotiation is default behavior */
-       tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
-       tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
-
        intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
        rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
        sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
index f528243e1a4fd73fc289d65ed63f24124aa79c5f..73884b69b7494ed5f19b4900c11914b9c9045d44 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
  */
 
 #ifndef _T3_H
 #define  CHIPREV_ID_57780_A1            0x57780001
 #define  CHIPREV_ID_5717_A0             0x05717000
 #define  CHIPREV_ID_57765_A0            0x57785000
+#define  CHIPREV_ID_5719_A0             0x05719000
 #define  GET_ASIC_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 12)
 #define   ASIC_REV_5700                         0x07
 #define   ASIC_REV_5701                         0x00
 #define  TG3_CPMU_DBTMR1_PCIEXIT_2047US         0x07ff0000
 #define  TG3_CPMU_DBTMR1_LNKIDLE_2047US         0x000070ff
 #define TG3_CPMU_EEE_DBTMR2            0x000036b8
-#define  TG3_CPMU_DBTMR1_APE_TX_2047US  0x07ff0000
+#define  TG3_CPMU_DBTMR2_APE_TX_2047US  0x07ff0000
 #define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US         0x000070ff
 #define TG3_CPMU_EEE_LNKIDL_CTRL       0x000036bc
 #define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0   0x01000000
 
 #define TG3_RDMA_RSRVCTRL_REG          0x00004900
 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX         0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K         0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK         0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K         0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK         0x000ff000
 #define TG3_RDMA_RSRVCTRL_TXMRGN_320B   0x28000000
 #define TG3_RDMA_RSRVCTRL_TXMRGN_MASK   0xffe00000
 /* 0x4904 --> 0x4910 unused */
 
 #define MII_TG3_DSP_TAP1               0x0001
 #define  MII_TG3_DSP_TAP1_AGCTGT_DFLT  0x0007
+#define MII_TG3_DSP_TAP26              0x001a
+#define  MII_TG3_DSP_TAP26_ALNOKO      0x0001
+#define  MII_TG3_DSP_TAP26_RMRXSTO     0x0002
+#define  MII_TG3_DSP_TAP26_OPCSINPT    0x0004
 #define MII_TG3_DSP_AADJ1CH0           0x001f
 #define MII_TG3_DSP_CH34TP2            0x4022
 #define MII_TG3_DSP_CH34TP2_HIBW01     0x0010
index f8e463cd8eccb7296524c7ec50ad6c3f7728c86d..ace6404e2fac4b886cd64930a24103fd3f4f1efc 100644 (file)
  *             Microchip Technology, 24C01A/02A/04A Data Sheet
  *                     available in PDF format from www.microchip.com
  *
- * Change History
- *
- *     Tigran Aivazian <tigran@sco.com>:       TLan_PciProbe() now uses
- *                                             new PCI BIOS interface.
- *     Alan Cox        <alan@lxorguk.ukuu.org.uk>:
- *                                             Fixed the out of memory
- *                                             handling.
- *
- *     Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
- *
- *     v1.1 Dec 20, 1999    - Removed linux version checking
- *                            Patch from Tigran Aivazian.
- *                          - v1.1 includes Alan's SMP updates.
- *                          - We still have problems on SMP though,
- *                            but I'm looking into that.
- *
- *     v1.2 Jan 02, 2000    - Hopefully fixed the SMP deadlock.
- *                          - Removed dependency of HZ being 100.
- *                          - We now allow higher priority timers to
- *                            overwrite timers like TLAN_TIMER_ACTIVITY
- *                            Patch from John Cagle <john.cagle@compaq.com>.
- *                          - Fixed a few compiler warnings.
- *
- *     v1.3 Feb 04, 2000    - Fixed the remaining HZ issues.
- *                          - Removed call to pci_present().
- *                          - Removed SA_INTERRUPT flag from irq handler.
- *                          - Added __init and __initdata to reduce resisdent
- *                            code size.
- *                          - Driver now uses module_init/module_exit.
- *                          - Rewrote init_module and tlan_probe to
- *                            share a lot more code. We now use tlan_probe
- *                            with builtin and module driver.
- *                          - Driver ported to new net API.
- *                          - tlan.txt has been reworked to reflect current
- *                            driver (almost)
- *                          - Other minor stuff
- *
- *     v1.4 Feb 10, 2000    - Updated with more changes required after Dave's
- *                            network cleanup in 2.3.43pre7 (Tigran & myself)
- *                          - Minor stuff.
- *
- *     v1.5 March 22, 2000  - Fixed another timer bug that would hang the driver
- *                            if no cable/link were present.
- *                          - Cosmetic changes.
- *                          - TODO: Port completely to new PCI/DMA API
- *                                  Auto-Neg fallback.
- *
- *     v1.6 April 04, 2000  - Fixed driver support for kernel-parameters. Haven't
- *                            tested it though, as the kernel support is currently
- *                            broken (2.3.99p4p3).
- *                          - Updated tlan.txt accordingly.
- *                          - Adjusted minimum/maximum frame length.
- *                          - There is now a TLAN website up at
- *                            http://hp.sourceforge.net/ 
- *
- *     v1.7 April 07, 2000  - Started to implement custom ioctls. Driver now
- *                            reports PHY information when used with Donald
- *                            Beckers userspace MII diagnostics utility.
- *
- *     v1.8 April 23, 2000  - Fixed support for forced speed/duplex settings.
- *                          - Added link information to Auto-Neg and forced
- *                            modes. When NIC operates with auto-neg the driver
- *                            will report Link speed & duplex modes as well as
- *                            link partner abilities. When forced link is used,
- *                            the driver will report status of the established
- *                            link.
- *                            Please read tlan.txt for additional information.
- *                          - Removed call to check_region(), and used
- *                            return value of request_region() instead.
- *
- *     v1.8a May 28, 2000   - Minor updates.
- *
- *     v1.9 July 25, 2000   - Fixed a few remaining Full-Duplex issues.
- *                          - Updated with timer fixes from Andrew Morton.
- *                          - Fixed module race in TLan_Open.
- *                          - Added routine to monitor PHY status.
- *                          - Added activity led support for Proliant devices.
- *
- *     v1.10 Aug 30, 2000   - Added support for EISA based tlan controllers
- *                            like the Compaq NetFlex3/E.
- *                          - Rewrote tlan_probe to better handle multiple
- *                            bus probes. Probing and device setup is now
- *                            done through TLan_Probe and TLan_init_one. Actual
- *                            hardware probe is done with kernel API and
- *                            TLan_EisaProbe.
- *                          - Adjusted debug information for probing.
- *                          - Fixed bug that would cause general debug information
- *                            to be printed after driver removal.
- *                          - Added transmit timeout handling.
- *                          - Fixed OOM return values in tlan_probe.
- *                          - Fixed possible mem leak in tlan_exit
- *                            (now tlan_remove_one).
- *                          - Fixed timer bug in TLan_phyMonitor.
- *                          - This driver version is alpha quality, please
- *                            send me any bug issues you may encounter.
- *
- *     v1.11 Aug 31, 2000   - Do not try to register irq 0 if no irq line was
- *                            set for EISA cards.
- *                          - Added support for NetFlex3/E with nibble-rate
- *                            10Base-T PHY. This is untestet as I haven't got
- *                            one of these cards.
- *                          - Fixed timer being added twice.
- *                          - Disabled PhyMonitoring by default as this is
- *                            work in progress. Define MONITOR to enable it.
- *                          - Now we don't display link info with PHYs that
- *                            doesn't support it (level1).
- *                          - Incresed tx_timeout beacuse of auto-neg.
- *                          - Adjusted timers for forced speeds.
- *
- *     v1.12 Oct 12, 2000   - Minor fixes (memleak, init, etc.)
- *
- *     v1.13 Nov 28, 2000   - Stop flooding console with auto-neg issues
- *                            when link can't be established.
- *                          - Added the bbuf option as a kernel parameter.
- *                          - Fixed ioaddr probe bug.
- *                          - Fixed stupid deadlock with MII interrupts.
- *                          - Added support for speed/duplex selection with
- *                            multiple nics.
- *                          - Added partly fix for TX Channel lockup with
- *                            TLAN v1.0 silicon. This needs to be investigated
- *                            further.
- *
- *     v1.14 Dec 16, 2000   - Added support for servicing multiple frames per.
- *                            interrupt. Thanks goes to
- *                            Adam Keys <adam@ti.com>
- *                            Denis Beaudoin <dbeaudoin@ti.com>
- *                            for providing the patch.
- *                          - Fixed auto-neg output when using multiple
- *                            adapters.
- *                          - Converted to use new taskq interface.
- *
- *     v1.14a Jan 6, 2001   - Minor adjustments (spinlocks, etc.)
- *
- *     Samuel Chessman <chessman@tux.org> New Maintainer!
- *
- *     v1.15 Apr 4, 2002    - Correct operation when aui=1 to be
- *                            10T half duplex no loopback
- *                            Thanks to Gunnar Eikman
- *
- *     Sakari Ailus <sakari.ailus@iki.fi>:
- *
- *     v1.15a Dec 15 2008   - Remove bbuf support, it doesn't work anyway.
- *
- *******************************************************************************/
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/init.h>
 
 #include "tlan.h"
 
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
 
 /* For removing EISA devices */
-static struct net_device       *TLan_Eisa_Devices;
+static struct net_device       *tlan_eisa_devices;
 
-static int             TLanDevicesInstalled;
+static int             tlan_devices_installed;
 
 /* Set speed, duplex and aui settings */
 static  int aui[MAX_TLAN_BOARDS];
@@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0);
 module_param_array(duplex, int, NULL, 0);
 module_param_array(speed, int, NULL, 0);
 MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
-MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
+MODULE_PARM_DESC(duplex,
+                "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
 
 MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
 MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -218,139 +76,144 @@ static  int              debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
 
-static const char TLanSignature[] = "TLAN";
-static  const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static const char tlan_signature[] = "TLAN";
+static  const char tlan_banner[] = "ThunderLAN driver v1.17\n";
 static  int tlan_have_pci;
 static  int tlan_have_eisa;
 
-static const char *media[] = {
-       "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
-       "100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+       "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+       "100BaseTx-FD", "100BaseT4", NULL
 };
 
 static struct board {
-       const char      *deviceLabel;
-       u32             flags;
-       u16             addrOfs;
+       const char      *device_label;
+       u32             flags;
+       u16             addr_ofs;
 } board_info[] = {
        { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-       { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+       { "Compaq Netelligent 10/100 TX PCI UTP",
+         TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
        { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/P",
          TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
        { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq Netelligent Integrated 10/100 TX UTP",
          TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-       { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
-       { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+       { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+         TLAN_ADAPTER_NONE, 0x83 },
+       { "Compaq Netelligent 10/100 TX Embedded UTP",
+         TLAN_ADAPTER_NONE, 0x83 },
        { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
-       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
-       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
        { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-       { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+       { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/E",
-         TLAN_ADAPTER_ACTIVITY_LED |   /* EISA card */
+         TLAN_ADAPTER_ACTIVITY_LED |   /* EISA card */
          TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
-       { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+       { "Compaq NetFlex-3/E",
+         TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
 };
 
 static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
        { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
        { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
        { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
        { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
-               PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
        { 0,}
 };
 MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
 
-static void    TLan_EisaProbe( void );
-static void    TLan_Eisa_Cleanup( void );
-static int      TLan_Init( struct net_device * );
-static int     TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int     TLan_Close( struct net_device *);
-static struct  net_device_stats *TLan_GetStats( struct net_device *);
-static void    TLan_SetMulticastList( struct net_device *);
-static int     TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int      TLan_probe1( struct pci_dev *pdev, long ioaddr,
-                            int irq, int rev, const struct pci_device_id *ent);
-static void    TLan_tx_timeout( struct net_device *dev);
-static void    TLan_tx_timeout_work(struct work_struct *work);
-static int     tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
-
-static u32     TLan_HandleTxEOF( struct net_device *, u16 );
-static u32     TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32     TLan_HandleRxEOF( struct net_device *, u16 );
-static u32     TLan_HandleDummy( struct net_device *, u16 );
-static u32     TLan_HandleTxEOC( struct net_device *, u16 );
-static u32     TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32     TLan_HandleRxEOC( struct net_device *, u16 );
-
-static void    TLan_Timer( unsigned long );
-
-static void    TLan_ResetLists( struct net_device * );
-static void    TLan_FreeLists( struct net_device * );
-static void    TLan_PrintDio( u16 );
-static void    TLan_PrintList( TLanList *, char *, int );
-static void    TLan_ReadAndClearStats( struct net_device *, int );
-static void    TLan_ResetAdapter( struct net_device * );
-static void    TLan_FinishReset( struct net_device * );
-static void    TLan_SetMac( struct net_device *, int areg, char *mac );
-
-static void    TLan_PhyPrint( struct net_device * );
-static void    TLan_PhyDetect( struct net_device * );
-static void    TLan_PhyPowerDown( struct net_device * );
-static void    TLan_PhyPowerUp( struct net_device * );
-static void    TLan_PhyReset( struct net_device * );
-static void    TLan_PhyStartLink( struct net_device * );
-static void    TLan_PhyFinishAutoNeg( struct net_device * );
+static void    tlan_eisa_probe(void);
+static void    tlan_eisa_cleanup(void);
+static int      tlan_init(struct net_device *);
+static int     tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int     tlan_close(struct net_device *);
+static struct  net_device_stats *tlan_get_stats(struct net_device *);
+static void    tlan_set_multicast_list(struct net_device *);
+static int     tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int      tlan_probe1(struct pci_dev *pdev, long ioaddr,
+                           int irq, int rev, const struct pci_device_id *ent);
+static void    tlan_tx_timeout(struct net_device *dev);
+static void    tlan_tx_timeout_work(struct work_struct *work);
+static int     tlan_init_one(struct pci_dev *pdev,
+                             const struct pci_device_id *ent);
+
+static u32     tlan_handle_tx_eof(struct net_device *, u16);
+static u32     tlan_handle_stat_overflow(struct net_device *, u16);
+static u32     tlan_handle_rx_eof(struct net_device *, u16);
+static u32     tlan_handle_dummy(struct net_device *, u16);
+static u32     tlan_handle_tx_eoc(struct net_device *, u16);
+static u32     tlan_handle_status_check(struct net_device *, u16);
+static u32     tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void    tlan_timer(unsigned long);
+
+static void    tlan_reset_lists(struct net_device *);
+static void    tlan_free_lists(struct net_device *);
+static void    tlan_print_dio(u16);
+static void    tlan_print_list(struct tlan_list *, char *, int);
+static void    tlan_read_and_clear_stats(struct net_device *, int);
+static void    tlan_reset_adapter(struct net_device *);
+static void    tlan_finish_reset(struct net_device *);
+static void    tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void    tlan_phy_print(struct net_device *);
+static void    tlan_phy_detect(struct net_device *);
+static void    tlan_phy_power_down(struct net_device *);
+static void    tlan_phy_power_up(struct net_device *);
+static void    tlan_phy_reset(struct net_device *);
+static void    tlan_phy_start_link(struct net_device *);
+static void    tlan_phy_finish_auto_neg(struct net_device *);
 #ifdef MONITOR
-static void     TLan_PhyMonitor( struct net_device * );
+static void     tlan_phy_monitor(struct net_device *);
 #endif
 
 /*
-static int     TLan_PhyNop( struct net_device * );
-static int     TLan_PhyInternalCheck( struct net_device * );
-static int     TLan_PhyInternalService( struct net_device * );
-static int     TLan_PhyDp83840aCheck( struct net_device * );
+  static int   tlan_phy_nop(struct net_device *);
+  static int   tlan_phy_internal_check(struct net_device *);
+  static int   tlan_phy_internal_service(struct net_device *);
+  static int   tlan_phy_dp83840a_check(struct net_device *);
 */
 
-static bool    TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void    TLan_MiiSendData( u16, u32, unsigned );
-static void    TLan_MiiSync( u16 );
-static void    TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool    tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void    tlan_mii_send_data(u16, u32, unsigned);
+static void    tlan_mii_sync(u16);
+static void    tlan_mii_write_reg(struct net_device *, u16, u16, u16);
 
-static void    TLan_EeSendStart( u16 );
-static int     TLan_EeSendByte( u16, u8, int );
-static void    TLan_EeReceiveByte( u16, u8 *, int );
-static int     TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void    tlan_ee_send_start(u16);
+static int     tlan_ee_send_byte(u16, u8, int);
+static void    tlan_ee_receive_byte(u16, u8 *, int);
+static int     tlan_ee_read_byte(struct net_device *, u8, u8 *);
 
 
 static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
 {
        unsigned long addr = (unsigned long)skb;
        tag->buffer[9].address = addr;
@@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
 }
 
 static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
 {
        unsigned long addr;
 
@@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
        return (struct sk_buff *) addr;
 }
 
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
        NULL,
-       TLan_HandleTxEOF,
-       TLan_HandleStatOverflow,
-       TLan_HandleRxEOF,
-       TLan_HandleDummy,
-       TLan_HandleTxEOC,
-       TLan_HandleStatusCheck,
-       TLan_HandleRxEOC
+       tlan_handle_tx_eof,
+       tlan_handle_stat_overflow,
+       tlan_handle_rx_eof,
+       tlan_handle_dummy,
+       tlan_handle_tx_eoc,
+       tlan_handle_status_check,
+       tlan_handle_rx_eoc
 };
 
 static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        unsigned long flags = 0;
 
        if (!in_irq())
                spin_lock_irqsave(&priv->lock, flags);
-       if ( priv->timer.function != NULL &&
-               priv->timerType != TLAN_TIMER_ACTIVITY ) {
+       if (priv->timer.function != NULL &&
+           priv->timer_type != TLAN_TIMER_ACTIVITY) {
                if (!in_irq())
                        spin_unlock_irqrestore(&priv->lock, flags);
                return;
        }
-       priv->timer.function = TLan_Timer;
+       priv->timer.function = tlan_timer;
        if (!in_irq())
                spin_unlock_irqrestore(&priv->lock, flags);
 
        priv->timer.data = (unsigned long) dev;
-       priv->timerSetAt = jiffies;
-       priv->timerType = type;
+       priv->timer_set_at = jiffies;
+       priv->timer_type = type;
        mod_timer(&priv->timer, jiffies + ticks);
 
-} /* TLan_SetTimer */
+}
 
 
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
 
-       These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
 
 ******************************************************************************
 *****************************************************************************/
@@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
 
 
 
-       /***************************************************************
       *      tlan_remove_one
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              None
       *
       *      Goes through the TLanDevices list and frees the device
       *      structs and memory associated with each device (lists
       *      and buffers).  It also ureserves the IO port regions
       *      associated with this device.
       *
       **************************************************************/
+/***************************************************************
*     tlan_remove_one
+ *
*     Returns:
*             Nothing
*     Parms:
*             None
+ *
*     Goes through the TLanDevices list and frees the device
*     structs and memory associated with each device (lists
*     and buffers).  It also ureserves the IO port regions
*     associated with this device.
+ *
+ **************************************************************/
 
 
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
 {
-       struct net_device *dev = pci_get_drvdata( pdev );
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct tlan_priv        *priv = netdev_priv(dev);
 
-       unregister_netdev( dev );
+       unregister_netdev(dev);
 
-       if ( priv->dmaStorage ) {
-               pci_free_consistent(priv->pciDev,
-                                   priv->dmaSize, priv->dmaStorage,
-                                   priv->dmaStorageDMA );
+       if (priv->dma_storage) {
+               pci_free_consistent(priv->pci_dev,
+                                   priv->dma_size, priv->dma_storage,
+                                   priv->dma_storage_dma);
        }
 
 #ifdef CONFIG_PCI
        pci_release_regions(pdev);
 #endif
 
-       free_netdev( dev );
+       free_netdev(dev);
 
-       pci_set_drvdata( pdev, NULL );
+       pci_set_drvdata(pdev, NULL);
 }
 
+static void tlan_start(struct net_device *dev)
+{
+       tlan_reset_lists(dev);
+       /* NOTE: It might not be necessary to read the stats before a
+          reset if you don't care what the values are.
+       */
+       tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+       tlan_reset_adapter(dev);
+       netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+       struct tlan_priv *priv = netdev_priv(dev);
+
+       tlan_read_and_clear_stats(dev, TLAN_RECORD);
+       outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+       /* Reset and power down phy */
+       tlan_reset_adapter(dev);
+       if (priv->timer.function != NULL) {
+               del_timer_sync(&priv->timer);
+               priv->timer.function = NULL;
+       }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (netif_running(dev))
+               tlan_stop(dev);
+
+       netif_device_detach(dev);
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_wake_from_d3(pdev, false);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_enable_wake(pdev, 0, 0);
+       netif_device_attach(dev);
+
+       if (netif_running(dev))
+               tlan_start(dev);
+
+       return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend   NULL
+#define tlan_resume    NULL
+
+#endif /* CONFIG_PM */
+
+
 static struct pci_driver tlan_driver = {
        .name           = "tlan",
        .id_table       = tlan_pci_tbl,
        .probe          = tlan_init_one,
        .remove         = __devexit_p(tlan_remove_one),
+       .suspend        = tlan_suspend,
+       .resume         = tlan_resume,
 };
 
 static int __init tlan_probe(void)
 {
        int rc = -ENODEV;
 
-       printk(KERN_INFO "%s", tlan_banner);
+       pr_info("%s", tlan_banner);
 
        TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
 
@@ -477,18 +408,18 @@ static int __init tlan_probe(void)
        rc = pci_register_driver(&tlan_driver);
 
        if (rc != 0) {
-               printk(KERN_ERR "TLAN: Could not register pci driver.\n");
+               pr_err("Could not register pci driver\n");
                goto err_out_pci_free;
        }
 
        TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
-       TLan_EisaProbe();
+       tlan_eisa_probe();
 
-       printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d  EISA: %d\n",
-                TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
-                tlan_have_pci, tlan_have_eisa);
+       pr_info("%d device%s installed, PCI: %d  EISA: %d\n",
+               tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+               tlan_have_pci, tlan_have_eisa);
 
-       if (TLanDevicesInstalled == 0) {
+       if (tlan_devices_installed == 0) {
                rc = -ENODEV;
                goto  err_out_pci_unreg;
        }
@@ -501,39 +432,39 @@ err_out_pci_free:
 }
 
 
-static int __devinit tlan_init_one( struct pci_dev *pdev,
-                                   const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+                                  const struct pci_device_id *ent)
 {
-       return TLan_probe1( pdev, -1, -1, 0, ent);
+       return tlan_probe1(pdev, -1, -1, 0, ent);
 }
 
 
 /*
-       ***************************************************************
-        *      tlan_probe1
-        *
-        *      Returns:
-        *              0 on success, error code on error
-        *      Parms:
-        *              none
-        *
-        *      The name is lower case to fit in with all the rest of
-        *      the netcard_probe names.  This function looks for
-        *      another TLan based adapter, setting it up with the
-        *      allocated device struct if one is found.
-        *      tlan_probe has been ported to the new net API and
-        *      now allocates its own device structure. This function
-        *      is also used by modules.
-        *
-        **************************************************************/
-
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+***************************************************************
+*      tlan_probe1
+*
+*      Returns:
+*              0 on success, error code on error
+*      Parms:
+*              none
+*
+*      The name is lower case to fit in with all the rest of
+*      the netcard_probe names.  This function looks for
+*      another TLan based adapter, setting it up with the
+*      allocated device struct if one is found.
+*      tlan_probe has been ported to the new net API and
+*      now allocates its own device structure. This function
+*      is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
                                 long ioaddr, int irq, int rev,
-                                const struct pci_device_id *ent )
+                                const struct pci_device_id *ent)
 {
 
        struct net_device  *dev;
-       TLanPrivateInfo    *priv;
+       struct tlan_priv  *priv;
        u16                device_id;
        int                reg, rc = -ENODEV;
 
@@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
                if (rc)
                        return rc;
 
-               rc = pci_request_regions(pdev, TLanSignature);
+               rc = pci_request_regions(pdev, tlan_signature);
                if (rc) {
-                       printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
+                       pr_err("Could not reserve IO regions\n");
                        goto err_out;
                }
        }
 #endif  /*  CONFIG_PCI  */
 
-       dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+       dev = alloc_etherdev(sizeof(struct tlan_priv));
        if (dev == NULL) {
-               printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
+               pr_err("Could not allocate memory for device\n");
                rc = -ENOMEM;
                goto err_out_regions;
        }
@@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
 
        priv = netdev_priv(dev);
 
-       priv->pciDev = pdev;
+       priv->pci_dev = pdev;
        priv->dev = dev;
 
        /* Is this a PCI device? */
        if (pdev) {
-               u32                pci_io_base = 0;
+               u32                pci_io_base = 0;
 
                priv->adapter = &board_info[ent->driver_data];
 
                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (rc) {
-                       printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+                       pr_err("No suitable PCI mapping available\n");
                        goto err_out_free_dev;
                }
 
-               for ( reg= 0; reg <= 5; reg ++ ) {
+               for (reg = 0; reg <= 5; reg++) {
                        if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
                                pci_io_base = pci_resource_start(pdev, reg);
-                               TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
-                                               pci_io_base);
+                               TLAN_DBG(TLAN_DEBUG_GNRL,
+                                        "IO mapping is available at %x.\n",
+                                        pci_io_base);
                                break;
                        }
                }
                if (!pci_io_base) {
-                       printk(KERN_ERR "TLAN: No IO mappings available\n");
+                       pr_err("No IO mappings available\n");
                        rc = -EIO;
                        goto err_out_free_dev;
                }
 
                dev->base_addr = pci_io_base;
                dev->irq = pdev->irq;
-               priv->adapterRev = pdev->revision;
+               priv->adapter_rev = pdev->revision;
                pci_set_master(pdev);
                pci_set_drvdata(pdev, dev);
 
@@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
                device_id = inw(ioaddr + EISA_ID2);
                priv->is_eisa = 1;
                if (device_id == 0x20F1) {
-                       priv->adapter = &board_info[13];        /* NetFlex-3/E */
-                       priv->adapterRev = 23;                  /* TLAN 2.3 */
+                       priv->adapter = &board_info[13]; /* NetFlex-3/E */
+                       priv->adapter_rev = 23;         /* TLAN 2.3 */
                } else {
                        priv->adapter = &board_info[14];
-                       priv->adapterRev = 10;                  /* TLAN 1.0 */
+                       priv->adapter_rev = 10;         /* TLAN 1.0 */
                }
                dev->base_addr = ioaddr;
                dev->irq = irq;
@@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
                priv->speed  = ((dev->mem_start & 0x18) == 0x18) ? 0
                        : (dev->mem_start & 0x18) >> 3;
 
-               if (priv->speed == 0x1) {
+               if (priv->speed == 0x1)
                        priv->speed = TLAN_SPEED_10;
-               } else if (priv->speed == 0x2) {
+               else if (priv->speed == 0x2)
                        priv->speed = TLAN_SPEED_100;
-               }
+
                debug = priv->debug = dev->mem_end;
        } else {
                priv->aui    = aui[boards_found];
@@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
 
        /* This will be used when we get an adapter error from
         * within our irq handler */
-       INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+       INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
 
        spin_lock_init(&priv->lock);
 
-       rc = TLan_Init(dev);
+       rc = tlan_init(dev);
        if (rc) {
-               printk(KERN_ERR "TLAN: Could not set up device.\n");
+               pr_err("Could not set up device\n");
                goto err_out_free_dev;
        }
 
        rc = register_netdev(dev);
        if (rc) {
-               printk(KERN_ERR "TLAN: Could not register device.\n");
+               pr_err("Could not register device\n");
                goto err_out_uninit;
        }
 
 
-       TLanDevicesInstalled++;
+       tlan_devices_installed++;
        boards_found++;
 
        /* pdev is NULL if this is an EISA device */
        if (pdev)
                tlan_have_pci++;
        else {
-               priv->nextDevice = TLan_Eisa_Devices;
-               TLan_Eisa_Devices = dev;
+               priv->next_device = tlan_eisa_devices;
+               tlan_eisa_devices = dev;
                tlan_have_eisa++;
        }
 
-       printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
-                       dev->name,
-                       (int) dev->irq,
-                       (int) dev->base_addr,
-                       priv->adapter->deviceLabel,
-                       priv->adapterRev);
+       netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+                   (int)dev->irq,
+                   (int)dev->base_addr,
+                   priv->adapter->device_label,
+                   priv->adapter_rev);
        return 0;
 
 err_out_uninit:
-       pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
-                           priv->dmaStorageDMA );
+       pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+                           priv->dma_storage_dma);
 err_out_free_dev:
        free_netdev(dev);
 err_out_regions:
@@ -689,22 +620,23 @@ err_out:
 }
 
 
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
 {
        struct net_device *dev;
-       TLanPrivateInfo *priv;
+       struct tlan_priv *priv;
 
-       while( tlan_have_eisa ) {
-               dev = TLan_Eisa_Devices;
+       while (tlan_have_eisa) {
+               dev = tlan_eisa_devices;
                priv = netdev_priv(dev);
-               if (priv->dmaStorage) {
-                       pci_free_consistent(priv->pciDev, priv->dmaSize,
-                                           priv->dmaStorage, priv->dmaStorageDMA );
+               if (priv->dma_storage) {
+                       pci_free_consistent(priv->pci_dev, priv->dma_size,
+                                           priv->dma_storage,
+                                           priv->dma_storage_dma);
                }
-               release_region( dev->base_addr, 0x10);
-               unregister_netdev( dev );
-               TLan_Eisa_Devices = priv->nextDevice;
-               free_netdev( dev );
+               release_region(dev->base_addr, 0x10);
+               unregister_netdev(dev);
+               tlan_eisa_devices = priv->next_device;
+               free_netdev(dev);
                tlan_have_eisa--;
        }
 }
@@ -715,7 +647,7 @@ static void __exit tlan_exit(void)
        pci_unregister_driver(&tlan_driver);
 
        if (tlan_have_eisa)
-               TLan_Eisa_Cleanup();
+               tlan_eisa_cleanup();
 
 }
 
@@ -726,24 +658,24 @@ module_exit(tlan_exit);
 
 
 
-       /**************************************************************
       *      TLan_EisaProbe
       *
       *      Returns: 0 on success, 1 otherwise
       *
       *      Parms:   None
       *
       *
       *      This functions probes for EISA devices and calls
       *      TLan_probe1 when one is found.
       *
       *************************************************************/
+/**************************************************************
*     tlan_eisa_probe
+ *
*     Returns: 0 on success, 1 otherwise
+ *
*     Parms:   None
+ *
+ *
*     This functions probes for EISA devices and calls
*     TLan_probe1 when one is found.
+ *
+ *************************************************************/
 
-static void  __init TLan_EisaProbe (void)
+static void  __init tlan_eisa_probe(void)
 {
-       long    ioaddr;
-       int     rc = -ENODEV;
-       int     irq;
+       long    ioaddr;
+       int     rc = -ENODEV;
+       int     irq;
        u16     device_id;
 
        if (!EISA_bus) {
@@ -754,15 +686,16 @@ static void  __init TLan_EisaProbe (void)
        /* Loop through all slots of the EISA bus */
        for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
 
-       TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
-                (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
-       TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
-                (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+               TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+                        (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+               TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+                        (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
 
 
-               TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
-                                       (int) ioaddr);
-               if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+               TLAN_DBG(TLAN_DEBUG_PROBE,
+                        "Probing for EISA adapter at IO: 0x%4x : ",
+                        (int) ioaddr);
+               if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
                        goto out;
 
                if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +705,324 @@ static void  __init TLan_EisaProbe (void)
 
                device_id = inw(ioaddr + EISA_ID2);
                if (device_id !=  0x20F1 && device_id != 0x40F1) {
-                       release_region (ioaddr, 0x10);
+                       release_region(ioaddr, 0x10);
                        goto out;
                }
 
-               if (inb(ioaddr + EISA_CR) != 0x1) {     /* Check if adapter is enabled */
-                       release_region (ioaddr, 0x10);
+               /* check if adapter is enabled */
+               if (inb(ioaddr + EISA_CR) != 0x1) {
+                       release_region(ioaddr, 0x10);
                        goto out2;
                }
 
                if (debug == 0x10)
-                       printk("Found one\n");
+                       pr_info("Found one\n");
 
 
                /* Get irq from board */
-               switch (inb(ioaddr + 0xCC0)) {
-                       case(0x10):
-                               irq=5;
-                               break;
-                       case(0x20):
-                               irq=9;
-                               break;
-                       case(0x40):
-                               irq=10;
-                               break;
-                       case(0x80):
-                               irq=11;
-                               break;
-                       default:
-                               goto out;
+               switch (inb(ioaddr + 0xcc0)) {
+               case(0x10):
+                       irq = 5;
+                       break;
+               case(0x20):
+                       irq = 9;
+                       break;
+               case(0x40):
+                       irq = 10;
+                       break;
+               case(0x80):
+                       irq = 11;
+                       break;
+               default:
+                       goto out;
                }
 
 
                /* Setup the newly found eisa adapter */
-               rc = TLan_probe1( NULL, ioaddr, irq,
-                                       12, NULL);
+               rc = tlan_probe1(NULL, ioaddr, irq,
+                                12, NULL);
                continue;
 
-               out:
-                       if (debug == 0x10)
-                               printk("None found\n");
-                       continue;
+out:
+               if (debug == 0x10)
+                       pr_info("None found\n");
+               continue;
 
-               out2:   if (debug == 0x10)
-                               printk("Card found but it is not enabled, skipping\n");
-                       continue;
+out2:
+               if (debug == 0x10)
+                       pr_info("Card found but it is not enabled, skipping\n");
+               continue;
 
        }
 
-} /* TLan_EisaProbe */
+}
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
 {
        disable_irq(dev->irq);
-       TLan_HandleInterrupt(dev->irq, dev);
+       tlan_handle_interrupt(dev->irq, dev);
        enable_irq(dev->irq);
 }
 #endif
 
-static const struct net_device_ops TLan_netdev_ops = {
-       .ndo_open               = TLan_Open,
-       .ndo_stop               = TLan_Close,
-       .ndo_start_xmit         = TLan_StartTx,
-       .ndo_tx_timeout         = TLan_tx_timeout,
-       .ndo_get_stats          = TLan_GetStats,
-       .ndo_set_multicast_list = TLan_SetMulticastList,
-       .ndo_do_ioctl           = TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+       .ndo_open               = tlan_open,
+       .ndo_stop               = tlan_close,
+       .ndo_start_xmit         = tlan_start_tx,
+       .ndo_tx_timeout         = tlan_tx_timeout,
+       .ndo_get_stats          = tlan_get_stats,
+       .ndo_set_multicast_list = tlan_set_multicast_list,
+       .ndo_do_ioctl           = tlan_ioctl,
        .ndo_change_mtu         = eth_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller     = TLan_Poll,
+       .ndo_poll_controller     = tlan_poll,
 #endif
 };
 
 
 
-       /***************************************************************
       *      TLan_Init
       *
       *      Returns:
       *              0 on success, error code otherwise.
       *      Parms:
       *              dev     The structure of the device to be
       *                      init'ed.
       *
       *      This function completes the initialization of the
       *      device structure and driver.  It reserves the IO
       *      addresses, allocates memory for the lists and bounce
       *      buffers, retrieves the MAC address from the eeprom
       *      and assignes the device's methods.
       *
       **************************************************************/
-
-static int TLan_Init( struct net_device *dev )
+/***************************************************************
*     tlan_init
+ *
*     Returns:
*             0 on success, error code otherwise.
*     Parms:
*             dev     The structure of the device to be
*                     init'ed.
+ *
*     This function completes the initialization of the
*     device structure and driver.  It reserves the IO
*     addresses, allocates memory for the lists and bounce
*     buffers, retrieves the MAC address from the eeprom
*     and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
 {
        int             dma_size;
-       int             err;
+       int             err;
        int             i;
-       TLanPrivateInfo *priv;
+       struct tlan_priv        *priv;
 
        priv = netdev_priv(dev);
 
-       dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
-               * ( sizeof(TLanList) );
-       priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
-                                               dma_size, &priv->dmaStorageDMA);
-       priv->dmaSize = dma_size;
+       dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+               * (sizeof(struct tlan_list));
+       priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+                                                dma_size,
+                                                &priv->dma_storage_dma);
+       priv->dma_size = dma_size;
 
-       if ( priv->dmaStorage == NULL ) {
-               printk(KERN_ERR "TLAN:  Could not allocate lists and buffers for %s.\n",
-                       dev->name );
+       if (priv->dma_storage == NULL) {
+               pr_err("Could not allocate lists and buffers for %s\n",
+                      dev->name);
                return -ENOMEM;
        }
-       memset( priv->dmaStorage, 0, dma_size );
-       priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
-       priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
-       priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
-       priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+       memset(priv->dma_storage, 0, dma_size);
+       priv->rx_list = (struct tlan_list *)
+               ALIGN((unsigned long)priv->dma_storage, 8);
+       priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+       priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+       priv->tx_list_dma =
+               priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
 
        err = 0;
-       for ( i = 0;  i < 6 ; i++ )
-               err |= TLan_EeReadByte( dev,
-                                       (u8) priv->adapter->addrOfs + i,
-                                       (u8 *) &dev->dev_addr[i] );
-       if ( err ) {
-               printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
-                       dev->name,
-                       err );
+       for (i = 0;  i < 6 ; i++)
+               err |= tlan_ee_read_byte(dev,
+                                        (u8) priv->adapter->addr_ofs + i,
+                                        (u8 *) &dev->dev_addr[i]);
+       if (err) {
+               pr_err("%s: Error reading MAC from eeprom: %d\n",
+                      dev->name, err);
        }
        dev->addr_len = 6;
 
        netif_carrier_off(dev);
 
        /* Device methods */
-       dev->netdev_ops = &TLan_netdev_ops;
+       dev->netdev_ops = &tlan_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        return 0;
 
-} /* TLan_Init */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_Open
       *
       *      Returns:
       *              0 on success, error code otherwise.
       *      Parms:
       *              dev     Structure of device to be opened.
       *
       *      This routine puts the driver and TLAN adapter in a
       *      state where it is ready to send and receive packets.
       *      It allocates the IRQ, resets and brings the adapter
       *      out of reset, and allows interrupts.  It also delays
       *      the startup for autonegotiation or sends a Rx GO
       *      command to the adapter, as appropriate.
       *
       **************************************************************/
+/***************************************************************
*     tlan_open
+ *
*     Returns:
*             0 on success, error code otherwise.
*     Parms:
*             dev     Structure of device to be opened.
+ *
*     This routine puts the driver and TLAN adapter in a
*     state where it is ready to send and receive packets.
*     It allocates the IRQ, resets and brings the adapter
*     out of reset, and allows interrupts.  It also delays
*     the startup for autonegotiation or sends a Rx GO
*     command to the adapter, as appropriate.
+ *
+ **************************************************************/
 
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int             err;
 
-       priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
-       err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
-                          dev->name, dev );
+       priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+       err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+                         dev->name, dev);
 
-       if ( err ) {
-               pr_err("TLAN:  Cannot open %s because IRQ %d is already in use.\n",
-                      dev->name, dev->irq );
+       if (err) {
+               netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+                          dev->irq);
                return err;
        }
 
        init_timer(&priv->timer);
-       netif_start_queue(dev);
 
-       /* NOTE: It might not be necessary to read the stats before a
-                        reset if you don't care what the values are.
-       */
-       TLan_ResetLists( dev );
-       TLan_ReadAndClearStats( dev, TLAN_IGNORE );
-       TLan_ResetAdapter( dev );
+       tlan_start(dev);
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
-                 dev->name, priv->tlanRev );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
+                dev->name, priv->tlan_rev);
 
        return 0;
 
-} /* TLan_Open */
+}
 
 
 
-       /**************************************************************
       *      TLan_ioctl
       *
       *      Returns:
       *              0 on success, error code otherwise
       *      Params:
       *              dev     structure of device to receive ioctl.
       *
       *              rq      ifreq structure to hold userspace data.
       *
       *              cmd     ioctl command.
       *
       *
       *************************************************************/
+/**************************************************************
*     tlan_ioctl
+ *
*     Returns:
*             0 on success, error code otherwise
*     Params:
*             dev     structure of device to receive ioctl.
+ *
*             rq      ifreq structure to hold userspace data.
+ *
*             cmd     ioctl command.
+ *
+ *
+ *************************************************************/
 
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        struct mii_ioctl_data *data = if_mii(rq);
-       u32 phy   = priv->phy[priv->phyNum];
+       u32 phy   = priv->phy[priv->phy_num];
 
-       if (!priv->phyOnline)
+       if (!priv->phy_online)
                return -EAGAIN;
 
-       switch(cmd) {
-       case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
-                       data->phy_id = phy;
+       switch (cmd) {
+       case SIOCGMIIPHY:               /* get address of MII PHY in use. */
+               data->phy_id = phy;
 
 
-       case SIOCGMIIREG:               /* Read MII PHY register. */
-                       TLan_MiiReadReg(dev, data->phy_id & 0x1f,
-                                       data->reg_num & 0x1f, &data->val_out);
-                       return 0;
+       case SIOCGMIIREG:               /* read MII PHY register. */
+               tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+                                 data->reg_num & 0x1f, &data->val_out);
+               return 0;
 
 
-       case SIOCSMIIREG:               /* Write MII PHY register. */
-                       TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
-                                        data->reg_num & 0x1f, data->val_in);
-                       return 0;
-               default:
-                       return -EOPNOTSUPP;
+       case SIOCSMIIREG:               /* write MII PHY register. */
+               tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+                                  data->reg_num & 0x1f, data->val_in);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
        }
-} /* tlan_ioctl */
+}
 
 
-       /***************************************************************
       *      TLan_tx_timeout
       *
       *      Returns: nothing
       *
       *      Params:
       *              dev     structure of device which timed out
       *                      during transmit.
       *
       **************************************************************/
+/***************************************************************
*     tlan_tx_timeout
+ *
*     Returns: nothing
+ *
*     Params:
*             dev     structure of device which timed out
*                     during transmit.
+ *
+ **************************************************************/
 
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
 {
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
 
        /* Ok so we timed out, lets see what we can do about it...*/
-       TLan_FreeLists( dev );
-       TLan_ResetLists( dev );
-       TLan_ReadAndClearStats( dev, TLAN_IGNORE );
-       TLan_ResetAdapter( dev );
+       tlan_free_lists(dev);
+       tlan_reset_lists(dev);
+       tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+       tlan_reset_adapter(dev);
        dev->trans_start = jiffies; /* prevent tx timeout */
-       netif_wake_queue( dev );
+       netif_wake_queue(dev);
 
 }
 
 
-       /***************************************************************
       *      TLan_tx_timeout_work
       *
       *      Returns: nothing
       *
       *      Params:
       *              work    work item of device which timed out
       *
       **************************************************************/
+/***************************************************************
*     tlan_tx_timeout_work
+ *
*     Returns: nothing
+ *
*     Params:
*             work    work item of device which timed out
+ *
+ **************************************************************/
 
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
 {
-       TLanPrivateInfo *priv =
-               container_of(work, TLanPrivateInfo, tlan_tqueue);
+       struct tlan_priv        *priv =
+               container_of(work, struct tlan_priv, tlan_tqueue);
 
-       TLan_tx_timeout(priv->dev);
+       tlan_tx_timeout(priv->dev);
 }
 
 
 
-       /***************************************************************
       *      TLan_StartTx
       *
       *      Returns:
       *              0 on success, non-zero on failure.
       *      Parms:
       *              skb     A pointer to the sk_buff containing the
       *                      frame to be sent.
       *              dev     The device to send the data on.
       *
       *      This function adds a frame to the Tx list to be sent
       *      ASAP.  First it verifies that the adapter is ready and
       *      there is room in the queue.  Then it sets up the next
       *      available list, copies the frame to the corresponding
       *      buffer.  If the adapter Tx channel is idle, it gives
       *      the adapter a Tx Go command on the list, otherwise it
       *      sets the forward address of the previous list to point
       *      to this one.  Then it frees the sk_buff.
       *
       **************************************************************/
-
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+/***************************************************************
*     tlan_start_tx
+ *
*     Returns:
*             0 on success, non-zero on failure.
*     Parms:
*             skb     A pointer to the sk_buff containing the
*                     frame to be sent.
*             dev     The device to send the data on.
+ *
*     This function adds a frame to the Tx list to be sent
*     ASAP.  First it verifies that the adapter is ready and
*     there is room in the queue.  Then it sets up the next
*     available list, copies the frame to the corresponding
*     buffer.  If the adapter Tx channel is idle, it gives
*     the adapter a Tx Go command on the list, otherwise it
*     sets the forward address of the previous list to point
*     to this one.  Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        dma_addr_t      tail_list_phys;
-       TLanList        *tail_list;
+       struct tlan_list        *tail_list;
        unsigned long   flags;
        unsigned int    txlen;
 
-       if ( ! priv->phyOnline ) {
-               TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
-                         dev->name );
+       if (!priv->phy_online) {
+               TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
+                        dev->name);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
@@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
                return NETDEV_TX_OK;
        txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
 
-       tail_list = priv->txList + priv->txTail;
-       tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+       tail_list = priv->tx_list + priv->tx_tail;
+       tail_list_phys =
+               priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
 
-       if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
-                         dev->name, priv->txHead, priv->txTail );
+       if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
+                        dev->name, priv->tx_head, priv->tx_tail);
                netif_stop_queue(dev);
-               priv->txBusyCount++;
+               priv->tx_busy_count++;
                return NETDEV_TX_BUSY;
        }
 
        tail_list->forward = 0;
 
-       tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+       tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
                                                      skb->data, txlen,
                                                      PCI_DMA_TODEVICE);
-       TLan_StoreSKB(tail_list, skb);
+       tlan_store_skb(tail_list, skb);
 
-       tail_list->frameSize = (u16) txlen;
+       tail_list->frame_size = (u16) txlen;
        tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
        tail_list->buffer[1].count = 0;
        tail_list->buffer[1].address = 0;
 
        spin_lock_irqsave(&priv->lock, flags);
-       tail_list->cStat = TLAN_CSTAT_READY;
-       if ( ! priv->txInProgress ) {
-               priv->txInProgress = 1;
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  Starting TX on buffer %d\n", priv->txTail );
-               outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
-               outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+       tail_list->c_stat = TLAN_CSTAT_READY;
+       if (!priv->tx_in_progress) {
+               priv->tx_in_progress = 1;
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  Starting TX on buffer %d\n",
+                        priv->tx_tail);
+               outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+               outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
        } else {
-               TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  Adding buffer %d to TX channel\n",
-                         priv->txTail );
-               if ( priv->txTail == 0 ) {
-                       ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  Adding buffer %d to TX channel\n",
+                        priv->tx_tail);
+               if (priv->tx_tail == 0) {
+                       (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
                                = tail_list_phys;
                } else {
-                       ( priv->txList + ( priv->txTail - 1 ) )->forward
+                       (priv->tx_list + (priv->tx_tail - 1))->forward
                                = tail_list_phys;
                }
        }
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+       CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
 
        return NETDEV_TX_OK;
 
-} /* TLan_StartTx */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleInterrupt
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              irq     The line on which the interrupt
       *                      occurred.
       *              dev_id  A pointer to the device assigned to
       *                      this irq line.
       *
       *      This function handles an interrupt generated by its
       *      assigned TLAN adapter.  The function deactivates
       *      interrupts on its adapter, records the type of
       *      interrupt, executes the appropriate subhandler, and
       *      acknowdges the interrupt to the adapter (thus
       *      re-enabling adapter interrupts.
       *
       **************************************************************/
+/***************************************************************
*     tlan_handle_interrupt
+ *
*     Returns:
*             Nothing
*     Parms:
*             irq     The line on which the interrupt
*                     occurred.
*             dev_id  A pointer to the device assigned to
*                     this irq line.
+ *
*     This function handles an interrupt generated by its
*     assigned TLAN adapter.  The function deactivates
*     interrupts on its adapter, records the type of
*     interrupt, executes the appropriate subhandler, and
*     acknowdges the interrupt to the adapter (thus
*     re-enabling adapter interrupts.
+ *
+ **************************************************************/
 
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
 {
        struct net_device       *dev = dev_id;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16             host_int;
        u16             type;
 
        spin_lock(&priv->lock);
 
-       host_int = inw( dev->base_addr + TLAN_HOST_INT );
-       type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
-       if ( type ) {
+       host_int = inw(dev->base_addr + TLAN_HOST_INT);
+       type = (host_int & TLAN_HI_IT_MASK) >> 2;
+       if (type) {
                u32     ack;
                u32     host_cmd;
 
-               outw( host_int, dev->base_addr + TLAN_HOST_INT );
-               ack = TLanIntVector[type]( dev, host_int );
+               outw(host_int, dev->base_addr + TLAN_HOST_INT);
+               ack = tlan_int_vector[type](dev, host_int);
 
-               if ( ack ) {
-                       host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
-                       outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+               if (ack) {
+                       host_cmd = TLAN_HC_ACK | ack | (type << 18);
+                       outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
                }
        }
 
        spin_unlock(&priv->lock);
 
        return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_Close
       *
       *      Returns:
       *              An error code.
       *      Parms:
       *              dev     The device structure of the device to
       *                      close.
       *
       *      This function shuts down the adapter.  It records any
       *      stats, puts the adapter into reset state, deactivates
       *      its time as needed, and frees the irq it is using.
       *
       **************************************************************/
+/***************************************************************
*     tlan_close
+ *
*     Returns:
*             An error code.
*     Parms:
*             dev     The device structure of the device to
*                     close.
+ *
*     This function shuts down the adapter.  It records any
*     stats, puts the adapter into reset state, deactivates
*     its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
 
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
 
-       netif_stop_queue(dev);
        priv->neg_be_verbose = 0;
+       tlan_stop(dev);
 
-       TLan_ReadAndClearStats( dev, TLAN_RECORD );
-       outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
-       if ( priv->timer.function != NULL ) {
-               del_timer_sync( &priv->timer );
-               priv->timer.function = NULL;
-       }
-
-       free_irq( dev->irq, dev );
-       TLan_FreeLists( dev );
-       TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+       free_irq(dev->irq, dev);
+       tlan_free_lists(dev);
+       TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
 
        return 0;
 
-} /* TLan_Close */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_GetStats
       *
       *      Returns:
       *              A pointer to the device's statistics structure.
       *      Parms:
       *              dev     The device structure to return the
       *                      stats for.
       *
       *      This function updates the devices statistics by reading
       *      the TLAN chip's onboard registers.  Then it returns the
       *      address of the statistics structure.
       *
       **************************************************************/
+/***************************************************************
*     tlan_get_stats
+ *
*     Returns:
*             A pointer to the device's statistics structure.
*     Parms:
*             dev     The device structure to return the
*                     stats for.
+ *
*     This function updates the devices statistics by reading
*     the TLAN chip's onboard registers.  Then it returns the
*     address of the statistics structure.
+ *
+ **************************************************************/
 
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int i;
 
        /* Should only read stats if open ? */
-       TLan_ReadAndClearStats( dev, TLAN_RECORD );
+       tlan_read_and_clear_stats(dev, TLAN_RECORD);
 
-       TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
-                 priv->rxEocCount );
-       TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
-                 priv->txBusyCount );
-       if ( debug & TLAN_DEBUG_GNRL ) {
-               TLan_PrintDio( dev->base_addr );
-               TLan_PhyPrint( dev );
+       TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
+                priv->rx_eoc_count);
+       TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
+                priv->tx_busy_count);
+       if (debug & TLAN_DEBUG_GNRL) {
+               tlan_print_dio(dev->base_addr);
+               tlan_phy_print(dev);
        }
-       if ( debug & TLAN_DEBUG_LIST ) {
-               for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
-                       TLan_PrintList( priv->rxList + i, "RX", i );
-               for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
-                       TLan_PrintList( priv->txList + i, "TX", i );
+       if (debug & TLAN_DEBUG_LIST) {
+               for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+                       tlan_print_list(priv->rx_list + i, "RX", i);
+               for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+                       tlan_print_list(priv->tx_list + i, "TX", i);
        }
 
        return &dev->stats;
 
-} /* TLan_GetStats */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_SetMulticastList
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     The device structure to set the
       *                      multicast list for.
       *
       *      This function sets the TLAN adaptor to various receive
       *      modes.  If the IFF_PROMISC flag is set, promiscuous
       *      mode is acitviated.  Otherwise, promiscuous mode is
       *      turned off.  If the IFF_ALLMULTI flag is set, then
       *      the hash table is set to receive all group addresses.
       *      Otherwise, the first three multicast addresses are
       *      stored in AREG_1-3, and the rest are selected via the
       *      hash table, as necessary.
       *
       **************************************************************/
+/***************************************************************
*     tlan_set_multicast_list
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     The device structure to set the
*                     multicast list for.
+ *
*     This function sets the TLAN adaptor to various receive
*     modes.  If the IFF_PROMISC flag is set, promiscuous
*     mode is acitviated.  Otherwise, promiscuous mode is
*     turned off.  If the IFF_ALLMULTI flag is set, then
*     the hash table is set to receive all group addresses.
*     Otherwise, the first three multicast addresses are
*     stored in AREG_1-3, and the rest are selected via the
*     hash table, as necessary.
+ *
+ **************************************************************/
 
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
 {
        struct netdev_hw_addr *ha;
        u32                     hash1 = 0;
@@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
        u32                     offset;
        u8                      tmp;
 
-       if ( dev->flags & IFF_PROMISC ) {
-               tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+       if (dev->flags & IFF_PROMISC) {
+               tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
        } else {
-               tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
-               if ( dev->flags & IFF_ALLMULTI ) {
-                       for ( i = 0; i < 3; i++ )
-                               TLan_SetMac( dev, i + 1, NULL );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+               tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+               if (dev->flags & IFF_ALLMULTI) {
+                       for (i = 0; i < 3; i++)
+                               tlan_set_mac(dev, i + 1, NULL);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+                                        0xffffffff);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+                                        0xffffffff);
                } else {
                        i = 0;
                        netdev_for_each_mc_addr(ha, dev) {
-                               if ( i < 3 ) {
-                                       TLan_SetMac( dev, i + 1,
+                               if (i < 3) {
+                                       tlan_set_mac(dev, i + 1,
                                                     (char *) &ha->addr);
                                } else {
-                                       offset = TLan_HashFunc((u8 *)&ha->addr);
-                                       if ( offset < 32 )
-                                               hash1 |= ( 1 << offset );
+                                       offset =
+                                               tlan_hash_func((u8 *)&ha->addr);
+                                       if (offset < 32)
+                                               hash1 |= (1 << offset);
                                        else
-                                               hash2 |= ( 1 << ( offset - 32 ) );
+                                               hash2 |= (1 << (offset - 32));
                                }
                                i++;
                        }
-                       for ( ; i < 3; i++ )
-                               TLan_SetMac( dev, i + 1, NULL );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
-                       TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+                       for ( ; i < 3; i++)
+                               tlan_set_mac(dev, i + 1, NULL);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+                       tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
                }
        }
 
-} /* TLan_SetMulticastList */
+}
 
 
 
 /*****************************************************************************
 ******************************************************************************
 
-        ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
 
-       Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
-       Programmer's Guide" for more informations on handling interrupts
-       generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
 
 ******************************************************************************
 *****************************************************************************/
@@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
 
 
 
-       /***************************************************************
-        *      TLan_HandleTxEOF
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles Tx EOF interrupts which are raised
       *      by the adapter when it has completed sending the
       *      contents of a buffer.  If detemines which list/buffer
       *      was completed and resets it.  If the buffer was the last
       *      in the channel (EOC), then the function checks to see if
       *      another buffer is ready to send, and if so, sends a Tx
       *      Go command.  Finally, the driver activates/continues the
       *      activity LED.
       *
       **************************************************************/
-
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+/***************************************************************
+ *     tlan_handle_tx_eof
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles Tx EOF interrupts which are raised
*     by the adapter when it has completed sending the
*     contents of a buffer.  If detemines which list/buffer
*     was completed and resets it.  If the buffer was the last
*     in the channel (EOC), then the function checks to see if
*     another buffer is ready to send, and if so, sends a Tx
*     Go command.  Finally, the driver activates/continues the
*     activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int             eoc = 0;
-       TLanList        *head_list;
+       struct tlan_list        *head_list;
        dma_addr_t      head_list_phys;
        u32             ack = 0;
-       u16             tmpCStat;
+       u16             tmp_c_stat;
 
-       TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
-                 priv->txHead, priv->txTail );
-       head_list = priv->txList + priv->txHead;
+       TLAN_DBG(TLAN_DEBUG_TX,
+                "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
+                priv->tx_head, priv->tx_tail);
+       head_list = priv->tx_list + priv->tx_head;
 
-       while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
-               struct sk_buff *skb = TLan_GetSKB(head_list);
+       while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+              && (ack < 255)) {
+               struct sk_buff *skb = tlan_get_skb(head_list);
 
                ack++;
-               pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+               pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
                                 max(skb->len,
                                     (unsigned int)TLAN_MIN_FRAME_SIZE),
                                 PCI_DMA_TODEVICE);
@@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
                head_list->buffer[8].address = 0;
                head_list->buffer[9].address = 0;
 
-               if ( tmpCStat & TLAN_CSTAT_EOC )
+               if (tmp_c_stat & TLAN_CSTAT_EOC)
                        eoc = 1;
 
-               dev->stats.tx_bytes += head_list->frameSize;
+               dev->stats.tx_bytes += head_list->frame_size;
 
-               head_list->cStat = TLAN_CSTAT_UNUSED;
+               head_list->c_stat = TLAN_CSTAT_UNUSED;
                netif_start_queue(dev);
-               CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
-               head_list = priv->txList + priv->txHead;
+               CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+               head_list = priv->tx_list + priv->tx_head;
        }
 
        if (!ack)
-               printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
-
-       if ( eoc ) {
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  Handling TX EOC (Head=%d Tail=%d)\n",
-                         priv->txHead, priv->txTail );
-               head_list = priv->txList + priv->txHead;
-               head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
-               if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
-                       outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+               netdev_info(dev,
+                           "Received interrupt for uncompleted TX frame\n");
+
+       if (eoc) {
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  handling TX EOC (Head=%d Tail=%d)\n",
+                        priv->tx_head, priv->tx_tail);
+               head_list = priv->tx_list + priv->tx_head;
+               head_list_phys = priv->tx_list_dma
+                       + sizeof(struct tlan_list)*priv->tx_head;
+               if ((head_list->c_stat & TLAN_CSTAT_READY)
+                   == TLAN_CSTAT_READY) {
+                       outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                        ack |= TLAN_HC_GO;
                } else {
-                       priv->txInProgress = 0;
+                       priv->tx_in_progress = 0;
                }
        }
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
-               if ( priv->timer.function == NULL ) {
-                        priv->timer.function = TLan_Timer;
-                        priv->timer.data = (unsigned long) dev;
-                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
-                        priv->timerSetAt = jiffies;
-                        priv->timerType = TLAN_TIMER_ACTIVITY;
-                        add_timer(&priv->timer);
-               } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
-                       priv->timerSetAt = jiffies;
+       if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+               if (priv->timer.function == NULL) {
+                       priv->timer.function = tlan_timer;
+                       priv->timer.data = (unsigned long) dev;
+                       priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+                       priv->timer_set_at = jiffies;
+                       priv->timer_type = TLAN_TIMER_ACTIVITY;
+                       add_timer(&priv->timer);
+               } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+                       priv->timer_set_at = jiffies;
                }
        }
 
        return ack;
 
-} /* TLan_HandleTxEOF */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleStatOverflow
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles the Statistics Overflow interrupt
       *      which means that one or more of the TLAN statistics
       *      registers has reached 1/2 capacity and needs to be read.
       *
       **************************************************************/
+/***************************************************************
*     TLan_HandleStatOverflow
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles the Statistics Overflow interrupt
*     which means that one or more of the TLAN statistics
*     registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
 {
-       TLan_ReadAndClearStats( dev, TLAN_RECORD );
+       tlan_read_and_clear_stats(dev, TLAN_RECORD);
 
        return 1;
 
-} /* TLan_HandleStatOverflow */
-
-
-
-
-       /***************************************************************
       *      TLan_HandleRxEOF
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles the Rx EOF interrupt which
       *      indicates a frame has been received by the adapter from
       *      the net and the frame has been transferred to memory.
       *      The function determines the bounce buffer the frame has
       *      been loaded into, creates a new sk_buff big enough to
       *      hold the frame, and sends it to protocol stack.  It
       *      then resets the used buffer and appends it to the end
       *      of the list.  If the frame was the last in the Rx
       *      channel (EOC), the function restarts the receive channel
       *      by sending an Rx Go command to the adapter.  Then it
       *      activates/continues the activity LED.
       *
       **************************************************************/
-
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+}
+
+
+
+
+/***************************************************************
*     TLan_HandleRxEOF
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles the Rx EOF interrupt which
*     indicates a frame has been received by the adapter from
*     the net and the frame has been transferred to memory.
*     The function determines the bounce buffer the frame has
*     been loaded into, creates a new sk_buff big enough to
*     hold the frame, and sends it to protocol stack.  It
*     then resets the used buffer and appends it to the end
*     of the list.  If the frame was the last in the Rx
*     channel (EOC), the function restarts the receive channel
*     by sending an Rx Go command to the adapter.  Then it
*     activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u32             ack = 0;
        int             eoc = 0;
-       TLanList        *head_list;
+       struct tlan_list        *head_list;
        struct sk_buff  *skb;
-       TLanList        *tail_list;
-       u16             tmpCStat;
+       struct tlan_list        *tail_list;
+       u16             tmp_c_stat;
        dma_addr_t      head_list_phys;
 
-       TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE:  Handling RX EOF (Head=%d Tail=%d)\n",
-                 priv->rxHead, priv->rxTail );
-       head_list = priv->rxList + priv->rxHead;
-       head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+       TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  handling RX EOF (Head=%d Tail=%d)\n",
+                priv->rx_head, priv->rx_tail);
+       head_list = priv->rx_list + priv->rx_head;
+       head_list_phys =
+               priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
 
-       while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
-               dma_addr_t frameDma = head_list->buffer[0].address;
-               u32 frameSize = head_list->frameSize;
+       while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+              && (ack < 255)) {
+               dma_addr_t frame_dma = head_list->buffer[0].address;
+               u32 frame_size = head_list->frame_size;
                struct sk_buff *new_skb;
 
                ack++;
-               if (tmpCStat & TLAN_CSTAT_EOC)
+               if (tmp_c_stat & TLAN_CSTAT_EOC)
                        eoc = 1;
 
                new_skb = netdev_alloc_skb_ip_align(dev,
                                                    TLAN_MAX_FRAME_SIZE + 5);
-               if ( !new_skb )
+               if (!new_skb)
                        goto drop_and_reuse;
 
-               skb = TLan_GetSKB(head_list);
-               pci_unmap_single(priv->pciDev, frameDma,
+               skb = tlan_get_skb(head_list);
+               pci_unmap_single(priv->pci_dev, frame_dma,
                                 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
-               skb_put( skb, frameSize );
+               skb_put(skb, frame_size);
 
-               dev->stats.rx_bytes += frameSize;
+               dev->stats.rx_bytes += frame_size;
 
-               skb->protocol = eth_type_trans( skb, dev );
-               netif_rx( skb );
+               skb->protocol = eth_type_trans(skb, dev);
+               netif_rx(skb);
 
-               head_list->buffer[0].address = pci_map_single(priv->pciDev,
-                                                             new_skb->data,
-                                                             TLAN_MAX_FRAME_SIZE,
-                                                             PCI_DMA_FROMDEVICE);
+               head_list->buffer[0].address =
+                       pci_map_single(priv->pci_dev, new_skb->data,
+                                      TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
 
-               TLan_StoreSKB(head_list, new_skb);
+               tlan_store_skb(head_list, new_skb);
 drop_and_reuse:
                head_list->forward = 0;
-               head_list->cStat = 0;
-               tail_list = priv->rxList + priv->rxTail;
+               head_list->c_stat = 0;
+               tail_list = priv->rx_list + priv->rx_tail;
                tail_list->forward = head_list_phys;
 
-               CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
-               CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
-               head_list = priv->rxList + priv->rxHead;
-               head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+               CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+               CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+               head_list = priv->rx_list + priv->rx_head;
+               head_list_phys = priv->rx_list_dma
+                       + sizeof(struct tlan_list)*priv->rx_head;
        }
 
        if (!ack)
-               printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
-
-
-       if ( eoc ) {
-               TLAN_DBG( TLAN_DEBUG_RX,
-                         "RECEIVE:  Handling RX EOC (Head=%d Tail=%d)\n",
-                         priv->rxHead, priv->rxTail );
-               head_list = priv->rxList + priv->rxHead;
-               head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
-               outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+               netdev_info(dev,
+                           "Received interrupt for uncompleted RX frame\n");
+
+
+       if (eoc) {
+               TLAN_DBG(TLAN_DEBUG_RX,
+                        "RECEIVE:  handling RX EOC (Head=%d Tail=%d)\n",
+                        priv->rx_head, priv->rx_tail);
+               head_list = priv->rx_list + priv->rx_head;
+               head_list_phys = priv->rx_list_dma
+                       + sizeof(struct tlan_list)*priv->rx_head;
+               outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                ack |= TLAN_HC_GO | TLAN_HC_RT;
-               priv->rxEocCount++;
+               priv->rx_eoc_count++;
        }
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
-               TLan_DioWrite8( dev->base_addr,
-                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
-               if ( priv->timer.function == NULL )  {
-                       priv->timer.function = TLan_Timer;
+       if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+               tlan_dio_write8(dev->base_addr,
+                               TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+               if (priv->timer.function == NULL)  {
+                       priv->timer.function = tlan_timer;
                        priv->timer.data = (unsigned long) dev;
                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
-                       priv->timerSetAt = jiffies;
-                       priv->timerType = TLAN_TIMER_ACTIVITY;
+                       priv->timer_set_at = jiffies;
+                       priv->timer_type = TLAN_TIMER_ACTIVITY;
                        add_timer(&priv->timer);
-               } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
-                       priv->timerSetAt = jiffies;
+               } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+                       priv->timer_set_at = jiffies;
                }
        }
 
        return ack;
 
-} /* TLan_HandleRxEOF */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleDummy
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles the Dummy interrupt, which is
       *      raised whenever a test interrupt is generated by setting
       *      the Req_Int bit of HOST_CMD to 1.
       *
       **************************************************************/
+/***************************************************************
*     tlan_handle_dummy
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles the Dummy interrupt, which is
*     raised whenever a test interrupt is generated by setting
*     the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
 {
-       printk( "TLAN:  Test interrupt on %s.\n", dev->name );
+       netdev_info(dev, "Test interrupt\n");
        return 1;
 
-} /* TLan_HandleDummy */
+}
 
 
 
 
-       /***************************************************************
-        *      TLan_HandleTxEOC
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This driver is structured to determine EOC occurrences by
       *      reading the CSTAT member of the list structure.  Tx EOC
       *      interrupts are disabled via the DIO INTDIS register.
       *      However, TLAN chips before revision 3.0 didn't have this
       *      functionality, so process EOC events if this is the
       *      case.
       *
       **************************************************************/
+/***************************************************************
+ *     tlan_handle_tx_eoc
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This driver is structured to determine EOC occurrences by
*     reading the CSTAT member of the list structure.  Tx EOC
*     interrupts are disabled via the DIO INTDIS register.
*     However, TLAN chips before revision 3.0 didn't have this
*     functionality, so process EOC events if this is the
*     case.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
-       TLanList                *head_list;
+       struct tlan_priv        *priv = netdev_priv(dev);
+       struct tlan_list                *head_list;
        dma_addr_t              head_list_phys;
        u32                     ack = 1;
 
        host_int = 0;
-       if ( priv->tlanRev < 0x30 ) {
-               TLAN_DBG( TLAN_DEBUG_TX,
-                         "TRANSMIT:  Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
-                         priv->txHead, priv->txTail );
-               head_list = priv->txList + priv->txHead;
-               head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
-               if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+       if (priv->tlan_rev < 0x30) {
+               TLAN_DBG(TLAN_DEBUG_TX,
+                        "TRANSMIT:  handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+                        priv->tx_head, priv->tx_tail);
+               head_list = priv->tx_list + priv->tx_head;
+               head_list_phys = priv->tx_list_dma
+                       + sizeof(struct tlan_list)*priv->tx_head;
+               if ((head_list->c_stat & TLAN_CSTAT_READY)
+                   == TLAN_CSTAT_READY) {
                        netif_stop_queue(dev);
-                       outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+                       outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                        ack |= TLAN_HC_GO;
                } else {
-                       priv->txInProgress = 0;
+                       priv->tx_in_progress = 0;
                }
        }
 
        return ack;
 
-} /* TLan_HandleTxEOC */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_HandleStatusCheck
       *
       *      Returns:
       *              0 if Adapter check, 1 if Network Status check.
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This function handles Adapter Check/Network Status
       *      interrupts generated by the adapter.  It checks the
       *      vector in the HOST_INT register to determine if it is
       *      an Adapter Check interrupt.  If so, it resets the
       *      adapter.  Otherwise it clears the status registers
       *      and services the PHY.
       *
       **************************************************************/
+/***************************************************************
*     tlan_handle_status_check
+ *
*     Returns:
*             0 if Adapter check, 1 if Network Status check.
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This function handles Adapter Check/Network Status
*     interrupts generated by the adapter.  It checks the
*     vector in the HOST_INT register to determine if it is
*     an Adapter Check interrupt.  If so, it resets the
*     adapter.  Otherwise it clears the status registers
*     and services the PHY.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u32             ack;
        u32             error;
        u8              net_sts;
@@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
        u16             tlphy_sts;
 
        ack = 1;
-       if ( host_int & TLAN_HI_IV_MASK ) {
-               netif_stop_queue( dev );
-               error = inl( dev->base_addr + TLAN_CH_PARM );
-               printk( "TLAN:  %s: Adaptor Error = 0x%x\n", dev->name, error );
-               TLan_ReadAndClearStats( dev, TLAN_RECORD );
-               outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+       if (host_int & TLAN_HI_IV_MASK) {
+               netif_stop_queue(dev);
+               error = inl(dev->base_addr + TLAN_CH_PARM);
+               netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+               tlan_read_and_clear_stats(dev, TLAN_RECORD);
+               outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
 
                schedule_work(&priv->tlan_tqueue);
 
                netif_wake_queue(dev);
                ack = 0;
        } else {
-               TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
-               phy = priv->phy[priv->phyNum];
-
-               net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
-               if ( net_sts ) {
-                       TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
-                       TLAN_DBG( TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
-                                 dev->name, (unsigned) net_sts );
+               TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+               phy = priv->phy[priv->phy_num];
+
+               net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+               if (net_sts) {
+                       tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+                       TLAN_DBG(TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
+                                dev->name, (unsigned) net_sts);
                }
-               if ( ( net_sts & TLAN_NET_STS_MIRQ ) &&  ( priv->phyNum == 0 ) ) {
-                       TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
-                       TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
-                       if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
-                            ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
-                               tlphy_ctl |= TLAN_TC_SWAPOL;
-                               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-                       } else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
-                                   ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
-                               tlphy_ctl &= ~TLAN_TC_SWAPOL;
-                               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-                       }
-
-                       if (debug) {
-                               TLan_PhyPrint( dev );
+               if ((net_sts & TLAN_NET_STS_MIRQ) &&  (priv->phy_num == 0)) {
+                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+                       if (!(tlphy_sts & TLAN_TS_POLOK) &&
+                           !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+                               tlphy_ctl |= TLAN_TC_SWAPOL;
+                               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+                                                  tlphy_ctl);
+                       } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+                                  (tlphy_ctl & TLAN_TC_SWAPOL)) {
+                               tlphy_ctl &= ~TLAN_TC_SWAPOL;
+                               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+                                                  tlphy_ctl);
                        }
+
+                       if (debug)
+                               tlan_phy_print(dev);
                }
        }
 
        return ack;
 
-} /* TLan_HandleStatusCheck */
+}
 
 
 
 
-       /***************************************************************
-        *      TLan_HandleRxEOC
       *
       *      Returns:
       *              1
       *      Parms:
       *              dev             Device assigned the IRQ that was
       *                              raised.
       *              host_int        The contents of the HOST_INT
       *                              port.
       *
       *      This driver is structured to determine EOC occurrences by
       *      reading the CSTAT member of the list structure.  Rx EOC
       *      interrupts are disabled via the DIO INTDIS register.
       *      However, TLAN chips before revision 3.0 didn't have this
       *      CSTAT member or a INTDIS register, so if this chip is
       *      pre-3.0, process EOC interrupts normally.
       *
       **************************************************************/
+/***************************************************************
+ *     tlan_handle_rx_eoc
+ *
*     Returns:
*             1
*     Parms:
*             dev             Device assigned the IRQ that was
*                             raised.
*             host_int        The contents of the HOST_INT
*                             port.
+ *
*     This driver is structured to determine EOC occurrences by
*     reading the CSTAT member of the list structure.  Rx EOC
*     interrupts are disabled via the DIO INTDIS register.
*     However, TLAN chips before revision 3.0 didn't have this
*     CSTAT member or a INTDIS register, so if this chip is
*     pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        dma_addr_t      head_list_phys;
        u32             ack = 1;
 
-       if (  priv->tlanRev < 0x30 ) {
-               TLAN_DBG( TLAN_DEBUG_RX,
-                         "RECEIVE:  Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
-                         priv->rxHead, priv->rxTail );
-               head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
-               outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+       if (priv->tlan_rev < 0x30) {
+               TLAN_DBG(TLAN_DEBUG_RX,
+                        "RECEIVE:  Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+                        priv->rx_head, priv->rx_tail);
+               head_list_phys = priv->rx_list_dma
+                       + sizeof(struct tlan_list)*priv->rx_head;
+               outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
                ack |= TLAN_HC_GO | TLAN_HC_RT;
-               priv->rxEocCount++;
+               priv->rx_eoc_count++;
        }
 
        return ack;
 
-} /* TLan_HandleRxEOC */
+}
 
 
 
@@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
       *      TLan_Timer
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              data    A value given to add timer when
       *                      add_timer was called.
       *
       *      This function handles timed functionality for the
       *      TLAN driver.  The two current timer uses are for
       *      delaying for autonegotionation and driving the ACT LED.
       *      -       Autonegotiation requires being allowed about
       *              2 1/2 seconds before attempting to transmit a
       *              packet.  It would be a very bad thing to hang
       *              the kernel this long, so the driver doesn't
       *              allow transmission 'til after this time, for
       *              certain PHYs.  It would be much nicer if all
       *              PHYs were interrupt-capable like the internal
       *              PHY.
       *      -       The ACT LED, which shows adapter activity, is
       *              driven by the driver, and so must be left on
       *              for a short period to power up the LED so it
       *              can be seen.  This delay can be changed by
       *              changing the TLAN_TIMER_ACT_DELAY in tlan.h,
       *              if desired.  100 ms  produces a slightly
       *              sluggish response.
       *
       **************************************************************/
-
-static void TLan_Timer( unsigned long data )
+/***************************************************************
*     tlan_timer
+ *
*     Returns:
*             Nothing
*     Parms:
*             data    A value given to add timer when
*                     add_timer was called.
+ *
*     This function handles timed functionality for the
*     TLAN driver.  The two current timer uses are for
*     delaying for autonegotionation and driving the ACT LED.
*     -       Autonegotiation requires being allowed about
*             2 1/2 seconds before attempting to transmit a
*             packet.  It would be a very bad thing to hang
*             the kernel this long, so the driver doesn't
*             allow transmission 'til after this time, for
*             certain PHYs.  It would be much nicer if all
*             PHYs were interrupt-capable like the internal
*             PHY.
*     -       The ACT LED, which shows adapter activity, is
*             driven by the driver, and so must be left on
*             for a short period to power up the LED so it
*             can be seen.  This delay can be changed by
*             changing the TLAN_TIMER_ACT_DELAY in tlan.h,
*             if desired.  100 ms  produces a slightly
*             sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
 {
        struct net_device       *dev = (struct net_device *) data;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u32             elapsed;
        unsigned long   flags = 0;
 
        priv->timer.function = NULL;
 
-       switch ( priv->timerType ) {
+       switch (priv->timer_type) {
 #ifdef MONITOR
-               case TLAN_TIMER_LINK_BEAT:
-                       TLan_PhyMonitor( dev );
-                       break;
+       case TLAN_TIMER_LINK_BEAT:
+               tlan_phy_monitor(dev);
+               break;
 #endif
-               case TLAN_TIMER_PHY_PDOWN:
-                       TLan_PhyPowerDown( dev );
-                       break;
-               case TLAN_TIMER_PHY_PUP:
-                       TLan_PhyPowerUp( dev );
-                       break;
-               case TLAN_TIMER_PHY_RESET:
-                       TLan_PhyReset( dev );
-                       break;
-               case TLAN_TIMER_PHY_START_LINK:
-                       TLan_PhyStartLink( dev );
-                       break;
-               case TLAN_TIMER_PHY_FINISH_AN:
-                       TLan_PhyFinishAutoNeg( dev );
-                       break;
-               case TLAN_TIMER_FINISH_RESET:
-                       TLan_FinishReset( dev );
-                       break;
-               case TLAN_TIMER_ACTIVITY:
-                       spin_lock_irqsave(&priv->lock, flags);
-                       if ( priv->timer.function == NULL ) {
-                               elapsed = jiffies - priv->timerSetAt;
-                               if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
-                                       TLan_DioWrite8( dev->base_addr,
-                                                       TLAN_LED_REG, TLAN_LED_LINK );
-                               } else  {
-                                       priv->timer.function = TLan_Timer;
-                                       priv->timer.expires = priv->timerSetAt
-                                               + TLAN_TIMER_ACT_DELAY;
-                                       spin_unlock_irqrestore(&priv->lock, flags);
-                                       add_timer( &priv->timer );
-                                       break;
-                               }
+       case TLAN_TIMER_PHY_PDOWN:
+               tlan_phy_power_down(dev);
+               break;
+       case TLAN_TIMER_PHY_PUP:
+               tlan_phy_power_up(dev);
+               break;
+       case TLAN_TIMER_PHY_RESET:
+               tlan_phy_reset(dev);
+               break;
+       case TLAN_TIMER_PHY_START_LINK:
+               tlan_phy_start_link(dev);
+               break;
+       case TLAN_TIMER_PHY_FINISH_AN:
+               tlan_phy_finish_auto_neg(dev);
+               break;
+       case TLAN_TIMER_FINISH_RESET:
+               tlan_finish_reset(dev);
+               break;
+       case TLAN_TIMER_ACTIVITY:
+               spin_lock_irqsave(&priv->lock, flags);
+               if (priv->timer.function == NULL) {
+                       elapsed = jiffies - priv->timer_set_at;
+                       if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+                               tlan_dio_write8(dev->base_addr,
+                                               TLAN_LED_REG, TLAN_LED_LINK);
+                       } else  {
+                               priv->timer.function = tlan_timer;
+                               priv->timer.expires = priv->timer_set_at
+                                       + TLAN_TIMER_ACT_DELAY;
+                               spin_unlock_irqrestore(&priv->lock, flags);
+                               add_timer(&priv->timer);
+                               break;
                        }
-                       spin_unlock_irqrestore(&priv->lock, flags);
-                       break;
-               default:
-                       break;
+               }
+               spin_unlock_irqrestore(&priv->lock, flags);
+               break;
+       default:
+               break;
        }
 
-} /* TLan_Timer */
+}
 
 
 
@@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
       *      TLan_ResetLists
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     The device structure with the list
       *                      stuctures to be reset.
       *
       *      This routine sets the variables associated with managing
       *      the TLAN lists to their initial values.
       *
       **************************************************************/
-
-static void TLan_ResetLists( struct net_device *dev )
+/***************************************************************
*     tlan_reset_lists
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     The device structure with the list
*                     stuctures to be reset.
+ *
*     This routine sets the variables associated with managing
*     the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        int             i;
-       TLanList        *list;
+       struct tlan_list        *list;
        dma_addr_t      list_phys;
        struct sk_buff  *skb;
 
-       priv->txHead = 0;
-       priv->txTail = 0;
-       for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
-               list = priv->txList + i;
-               list->cStat = TLAN_CSTAT_UNUSED;
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+       for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+               list = priv->tx_list + i;
+               list->c_stat = TLAN_CSTAT_UNUSED;
                list->buffer[0].address = 0;
                list->buffer[2].count = 0;
                list->buffer[2].address = 0;
@@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev )
                list->buffer[9].address = 0;
        }
 
-       priv->rxHead = 0;
-       priv->rxTail = TLAN_NUM_RX_LISTS - 1;
-       for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
-               list = priv->rxList + i;
-               list_phys = priv->rxListDMA + sizeof(TLanList) * i;
-               list->cStat = TLAN_CSTAT_READY;
-               list->frameSize = TLAN_MAX_FRAME_SIZE;
+       priv->rx_head = 0;
+       priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+       for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+               list = priv->rx_list + i;
+               list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+               list->c_stat = TLAN_CSTAT_READY;
+               list->frame_size = TLAN_MAX_FRAME_SIZE;
                list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
                skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
-               if ( !skb ) {
-                       pr_err("TLAN: out of memory for received data.\n" );
+               if (!skb) {
+                       netdev_err(dev, "Out of memory for received data\n");
                        break;
                }
 
-               list->buffer[0].address = pci_map_single(priv->pciDev,
+               list->buffer[0].address = pci_map_single(priv->pci_dev,
                                                         skb->data,
                                                         TLAN_MAX_FRAME_SIZE,
                                                         PCI_DMA_FROMDEVICE);
-               TLan_StoreSKB(list, skb);
+               tlan_store_skb(list, skb);
                list->buffer[1].count = 0;
                list->buffer[1].address = 0;
-               list->forward = list_phys + sizeof(TLanList);
+               list->forward = list_phys + sizeof(struct tlan_list);
        }
 
        /* in case ran out of memory early, clear bits */
        while (i < TLAN_NUM_RX_LISTS) {
-               TLan_StoreSKB(priv->rxList + i, NULL);
+               tlan_store_skb(priv->rx_list + i, NULL);
                ++i;
        }
        list->forward = 0;
 
-} /* TLan_ResetLists */
+}
 
 
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        int             i;
-       TLanList        *list;
+       struct tlan_list        *list;
        struct sk_buff  *skb;
 
-       for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
-               list = priv->txList + i;
-               skb = TLan_GetSKB(list);
-               if ( skb ) {
+       for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+               list = priv->tx_list + i;
+               skb = tlan_get_skb(list);
+               if (skb) {
                        pci_unmap_single(
-                               priv->pciDev,
+                               priv->pci_dev,
                                list->buffer[0].address,
                                max(skb->len,
                                    (unsigned int)TLAN_MIN_FRAME_SIZE),
                                PCI_DMA_TODEVICE);
-                       dev_kfree_skb_any( skb );
+                       dev_kfree_skb_any(skb);
                        list->buffer[8].address = 0;
                        list->buffer[9].address = 0;
                }
        }
 
-       for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
-               list = priv->rxList + i;
-               skb = TLan_GetSKB(list);
-               if ( skb ) {
-                       pci_unmap_single(priv->pciDev,
+       for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+               list = priv->rx_list + i;
+               skb = tlan_get_skb(list);
+               if (skb) {
+                       pci_unmap_single(priv->pci_dev,
                                         list->buffer[0].address,
                                         TLAN_MAX_FRAME_SIZE,
                                         PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any( skb );
+                       dev_kfree_skb_any(skb);
                        list->buffer[8].address = 0;
                        list->buffer[9].address = 0;
                }
        }
-} /* TLan_FreeLists */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_PrintDio
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              io_base         Base IO port of the device of
       *                              which to print DIO registers.
       *
       *      This function prints out all the internal (DIO)
       *      registers of a TLAN chip.
       *
       **************************************************************/
+/***************************************************************
*     tlan_print_dio
+ *
*     Returns:
*             Nothing
*     Parms:
*             io_base         Base IO port of the device of
*                             which to print DIO registers.
+ *
*     This function prints out all the internal (DIO)
*     registers of a TLAN chip.
+ *
+ **************************************************************/
 
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
 {
        u32 data0, data1;
        int     i;
 
-       printk( "TLAN:   Contents of internal registers for io base 0x%04hx.\n",
-               io_base );
-       printk( "TLAN:      Off.  +0         +4\n" );
-       for ( i = 0; i < 0x4C; i+= 8 ) {
-               data0 = TLan_DioRead32( io_base, i );
-               data1 = TLan_DioRead32( io_base, i + 0x4 );
-               printk( "TLAN:      0x%02x  0x%08x 0x%08x\n", i, data0, data1 );
+       pr_info("Contents of internal registers for io base 0x%04hx\n",
+               io_base);
+       pr_info("Off.  +0        +4\n");
+       for (i = 0; i < 0x4C; i += 8) {
+               data0 = tlan_dio_read32(io_base, i);
+               data1 = tlan_dio_read32(io_base, i + 0x4);
+               pr_info("0x%02x  0x%08x 0x%08x\n", i, data0, data1);
        }
 
-} /* TLan_PrintDio */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_PrintList
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              list    A pointer to the TLanList structure to
       *                      be printed.
       *              type    A string to designate type of list,
       *                      "Rx" or "Tx".
       *              num     The index of the list.
       *
       *      This function prints out the contents of the list
       *      pointed to by the list parameter.
       *
       **************************************************************/
+/***************************************************************
*     TLan_PrintList
+ *
*     Returns:
*             Nothing
*     Parms:
*             list    A pointer to the struct tlan_list structure to
*                     be printed.
*             type    A string to designate type of list,
*                     "Rx" or "Tx".
*             num     The index of the list.
+ *
*     This function prints out the contents of the list
*     pointed to by the list parameter.
+ *
+ **************************************************************/
 
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
 {
        int i;
 
-       printk( "TLAN:   %s List %d at %p\n", type, num, list );
-       printk( "TLAN:      Forward    = 0x%08x\n",  list->forward );
-       printk( "TLAN:      CSTAT      = 0x%04hx\n", list->cStat );
-       printk( "TLAN:      Frame Size = 0x%04hx\n", list->frameSize );
-       /* for ( i = 0; i < 10; i++ ) { */
-       for ( i = 0; i < 2; i++ ) {
-               printk( "TLAN:      Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
-                       i, list->buffer[i].count, list->buffer[i].address );
+       pr_info("%s List %d at %p\n", type, num, list);
+       pr_info("   Forward    = 0x%08x\n",  list->forward);
+       pr_info("   CSTAT      = 0x%04hx\n", list->c_stat);
+       pr_info("   Frame Size = 0x%04hx\n", list->frame_size);
+       /* for (i = 0; i < 10; i++) { */
+       for (i = 0; i < 2; i++) {
+               pr_info("   Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+                       i, list->buffer[i].count, list->buffer[i].address);
        }
 
-} /* TLan_PrintList */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_ReadAndClearStats
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     Pointer to device structure of adapter
       *                      to which to read stats.
       *              record  Flag indicating whether to add
       *
       *      This functions reads all the internal status registers
       *      of the TLAN chip, which clears them as a side effect.
       *      It then either adds the values to the device's status
       *      struct, or discards them, depending on whether record
       *      is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
       *
       **************************************************************/
+/***************************************************************
*     tlan_read_and_clear_stats
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     Pointer to device structure of adapter
*                     to which to read stats.
*             record  Flag indicating whether to add
+ *
*     This functions reads all the internal status registers
*     of the TLAN chip, which clears them as a side effect.
*     It then either adds the values to the device's status
*     struct, or discards them, depending on whether record
*     is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
 
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
 {
        u32             tx_good, tx_under;
        u32             rx_good, rx_over;
@@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
        u32             multi_col, single_col;
        u32             excess_col, late_col, loss;
 
-       outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       tx_good  = inb( dev->base_addr + TLAN_DIO_DATA );
-       tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
-       tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
-       outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       rx_good  = inb( dev->base_addr + TLAN_DIO_DATA );
-       rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
-       rx_over  = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
-       outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
-       def_tx  = inb( dev->base_addr + TLAN_DIO_DATA );
-       def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       crc     = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-       code    = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
-       outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       multi_col   = inb( dev->base_addr + TLAN_DIO_DATA );
-       multi_col  += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-       single_col  = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-       single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
-
-       outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
-       excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
-       late_col   = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
-       loss       = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-
-       if ( record ) {
+       outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       tx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+       tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+       tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+       outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       rx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+       rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+       rx_over  = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+       outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+       def_tx  = inb(dev->base_addr + TLAN_DIO_DATA);
+       def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       crc     = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+       code    = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+       outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       multi_col   = inb(dev->base_addr + TLAN_DIO_DATA);
+       multi_col  += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+       single_col  = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+       single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+       outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+       excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+       late_col   = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+       loss       = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+       if (record) {
                dev->stats.rx_packets += rx_good;
                dev->stats.rx_errors  += rx_over + crc + code;
                dev->stats.tx_packets += tx_good;
                dev->stats.tx_errors  += tx_under + loss;
-               dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+               dev->stats.collisions += multi_col
+                       + single_col + excess_col + late_col;
 
                dev->stats.rx_over_errors    += rx_over;
                dev->stats.rx_crc_errors     += crc;
@@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
                dev->stats.tx_carrier_errors += loss;
        }
 
-} /* TLan_ReadAndClearStats */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_Reset
       *
       *      Returns:
       *              0
       *      Parms:
       *              dev     Pointer to device structure of adapter
       *                      to be reset.
       *
       *      This function resets the adapter and it's physical
       *      device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
       *      Programmer's Guide" for details.  The routine tries to
       *      implement what is detailed there, though adjustments
       *      have been made.
       *
       **************************************************************/
+/***************************************************************
*     TLan_Reset
+ *
*     Returns:
*             0
*     Parms:
*             dev     Pointer to device structure of adapter
*                     to be reset.
+ *
*     This function resets the adapter and it's physical
*     device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
*     Programmer's Guide" for details.  The routine tries to
*     implement what is detailed there, though adjustments
*     have been made.
+ *
+ **************************************************************/
 
 static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        int             i;
        u32             addr;
        u32             data;
        u8              data8;
 
-       priv->tlanFullDuplex = false;
-       priv->phyOnline=0;
+       priv->tlan_full_duplex = false;
+       priv->phy_online = 0;
        netif_carrier_off(dev);
 
 /*  1. Assert reset bit. */
@@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev )
 
        udelay(1000);
 
-/*  2. Turn off interrupts. ( Probably isn't necessary ) */
+/*  2. Turn off interrupts. (Probably isn't necessary) */
 
        data = inl(dev->base_addr + TLAN_HOST_CMD);
        data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev )
 
 /*  3. Clear AREGs and HASHs. */
 
-       for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
-               TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
-       }
+       for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+               tlan_dio_write32(dev->base_addr, (u16) i, 0);
 
 /*  4. Setup NetConfig register. */
 
        data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-       TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+       tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
 /*  5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
 
-       outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
-       outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+       outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+       outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
 
 /*  6. Unreset the MII by setting NMRST (in NetSio) to 1. */
 
-       outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
        addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
-       TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+       tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
 
 /*  7. Setup the remaining registers. */
 
-       if ( priv->tlanRev >= 0x30 ) {
+       if (priv->tlan_rev >= 0x30) {
                data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
-               TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+               tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
        }
-       TLan_PhyDetect( dev );
+       tlan_phy_detect(dev);
        data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+       if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
                data |= TLAN_NET_CFG_BIT;
-               if ( priv->aui == 1 ) {
-                       TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
-               } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
-                       TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
-                       priv->tlanFullDuplex = true;
+               if (priv->aui == 1) {
+                       tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+               } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+                       tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+                       priv->tlan_full_duplex = true;
                } else {
-                       TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+                       tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
                }
        }
 
-       if ( priv->phyNum == 0 ) {
+       if (priv->phy_num == 0)
                data |= TLAN_NET_CFG_PHY_EN;
-       }
-       TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+       tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-               TLan_FinishReset( dev );
-       } else {
-               TLan_PhyPowerDown( dev );
-       }
+       if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+               tlan_finish_reset(dev);
+       else
+               tlan_phy_power_down(dev);
 
-} /* TLan_ResetAdapter */
+}
 
 
 
 
 static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u8              data;
        u32             phy;
        u8              sio;
        u16             status;
        u16             partner;
        u16             tlphy_ctl;
-       u16             tlphy_par;
+       u16             tlphy_par;
        u16             tlphy_id1, tlphy_id2;
-       int             i;
+       int             i;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
        data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
-       if ( priv->tlanFullDuplex ) {
+       if (priv->tlan_full_duplex)
                data |= TLAN_NET_CMD_DUPLEX;
-       }
-       TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+       tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
        data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
-       if ( priv->phyNum == 0 ) {
+       if (priv->phy_num == 0)
                data |= TLAN_NET_MASK_MASK7;
-       }
-       TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
-       TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
-       TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
-       TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+       tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+       tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+       tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+       tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
 
-       if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
-            ( priv->aui ) ) {
+       if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+           (priv->aui)) {
                status = MII_GS_LINK;
-               printk( "TLAN:  %s: Link forced.\n", dev->name );
+               netdev_info(dev, "Link forced\n");
        } else {
-               TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-               udelay( 1000 );
-               TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-               if ( (status & MII_GS_LINK) &&
-                    /* We only support link info on Nat.Sem. PHY's */
-                       (tlphy_id1 == NAT_SEM_ID1) &&
-                       (tlphy_id2 == NAT_SEM_ID2) ) {
-                       TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
-                       TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
-
-                       printk( "TLAN: %s: Link active with ", dev->name );
-                       if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
-                                printk( "forced 10%sMbps %s-Duplex\n",
-                                        tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
-                                        tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
-                       } else {
-                               printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
-                                       tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
-                                       tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
-                               printk("TLAN: Partner capability: ");
-                                       for (i = 5; i <= 10; i++)
-                                               if (partner & (1<<i))
-                                                       printk("%s",media[i-5]);
-                               printk("\n");
+               tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+               udelay(1000);
+               tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+               if ((status & MII_GS_LINK) &&
+                   /* We only support link info on Nat.Sem. PHY's */
+                   (tlphy_id1 == NAT_SEM_ID1) &&
+                   (tlphy_id2 == NAT_SEM_ID2)) {
+                       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+                       netdev_info(dev,
+                                   "Link active with %s %uMbps %s-Duplex\n",
+                                   !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+                                   ? "forced" : "Autonegotiation enabled,",
+                                   tlphy_par & TLAN_PHY_SPEED_100
+                                   ? 100 : 10,
+                                   tlphy_par & TLAN_PHY_DUPLEX_FULL
+                                   ? "Full" : "Half");
+
+                       if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+                               netdev_info(dev, "Partner capability:");
+                               for (i = 5; i < 10; i++)
+                                       if (partner & (1 << i))
+                                               pr_cont(" %s", media[i-5]);
+                               pr_cont("\n");
                        }
 
-                       TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+                                       TLAN_LED_LINK);
 #ifdef MONITOR
                        /* We have link beat..for now anyway */
-                       priv->link = 1;
-                       /*Enabling link beat monitoring */
-                       TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+                       priv->link = 1;
+                       /*Enabling link beat monitoring */
+                       tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
 #endif
                } else if (status & MII_GS_LINK)  {
-                       printk( "TLAN: %s: Link active\n", dev->name );
-                       TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+                       netdev_info(dev, "Link active\n");
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+                                       TLAN_LED_LINK);
                }
        }
 
-       if ( priv->phyNum == 0 ) {
-               TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
-               tlphy_ctl |= TLAN_TC_INTEN;
-               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
-               sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
-               sio |= TLAN_NET_SIO_MINTEN;
-               TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
-       }
-
-       if ( status & MII_GS_LINK ) {
-               TLan_SetMac( dev, 0, dev->dev_addr );
-               priv->phyOnline = 1;
-               outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
-               if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
-                       outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
-               }
-               outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
-               outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+       if (priv->phy_num == 0) {
+               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+               tlphy_ctl |= TLAN_TC_INTEN;
+               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+               sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+               sio |= TLAN_NET_SIO_MINTEN;
+               tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+       }
+
+       if (status & MII_GS_LINK) {
+               tlan_set_mac(dev, 0, dev->dev_addr);
+               priv->phy_online = 1;
+               outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+               if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+                       outb((TLAN_HC_REQ_INT >> 8),
+                            dev->base_addr + TLAN_HOST_CMD + 1);
+               outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+               outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
                netif_carrier_on(dev);
        } else {
-               printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
-                       dev->name );
-               TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+               netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+               tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
                return;
        }
-       TLan_SetMulticastList(dev);
+       tlan_set_multicast_list(dev);
 
-} /* TLan_FinishReset */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_SetMac
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     Pointer to device structure of adapter
       *                      on which to change the AREG.
       *              areg    The AREG to set the address in (0 - 3).
       *              mac     A pointer to an array of chars.  Each
       *                      element stores one byte of the address.
       *                      IE, it isn't in ascii.
       *
       *      This function transfers a MAC address to one of the
       *      TLAN AREGs (address registers).  The TLAN chip locks
       *      the register on writing to offset 0 and unlocks the
       *      register after writing to offset 5.  If NULL is passed
       *      in mac, then the AREG is filled with 0's.
       *
       **************************************************************/
+/***************************************************************
*     tlan_set_mac
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     Pointer to device structure of adapter
*                     on which to change the AREG.
*             areg    The AREG to set the address in (0 - 3).
*             mac     A pointer to an array of chars.  Each
*                     element stores one byte of the address.
*                     IE, it isn't in ascii.
+ *
*     This function transfers a MAC address to one of the
*     TLAN AREGs (address registers).  The TLAN chip locks
*     the register on writing to offset 0 and unlocks the
*     register after writing to offset 5.  If NULL is passed
*     in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
 
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
 {
        int i;
 
        areg *= 6;
 
-       if ( mac != NULL ) {
-               for ( i = 0; i < 6; i++ )
-                       TLan_DioWrite8( dev->base_addr,
-                                       TLAN_AREG_0 + areg + i, mac[i] );
+       if (mac != NULL) {
+               for (i = 0; i < 6; i++)
+                       tlan_dio_write8(dev->base_addr,
+                                       TLAN_AREG_0 + areg + i, mac[i]);
        } else {
-               for ( i = 0; i < 6; i++ )
-                       TLan_DioWrite8( dev->base_addr,
-                                       TLAN_AREG_0 + areg + i, 0 );
+               for (i = 0; i < 6; i++)
+                       tlan_dio_write8(dev->base_addr,
+                                       TLAN_AREG_0 + areg + i, 0);
        }
 
-} /* TLan_SetMac */
+}
 
 
 
@@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
 
 ******************************************************************************
 *****************************************************************************/
 
 
 
-       /*********************************************************************
       *      TLan_PhyPrint
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     A pointer to the device structure of the
       *                      TLAN device having the PHYs to be detailed.
       *
       *      This function prints the registers a PHY (aka transceiver).
       *
       ********************************************************************/
+/*********************************************************************
*     tlan_phy_print
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     A pointer to the device structure of the
*                     TLAN device having the PHYs to be detailed.
+ *
*     This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
 
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16 i, data0, data1, data2, data3, phy;
 
-       phy = priv->phy[priv->phyNum];
-
-       if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-               printk( "TLAN:   Device %s, Unmanaged PHY.\n", dev->name );
-       } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
-               printk( "TLAN:   Device %s, PHY 0x%02x.\n", dev->name, phy );
-               printk( "TLAN:      Off.  +0     +1     +2     +3\n" );
-                for ( i = 0; i < 0x20; i+= 4 ) {
-                       printk( "TLAN:      0x%02x", i );
-                       TLan_MiiReadReg( dev, phy, i, &data0 );
-                       printk( " 0x%04hx", data0 );
-                       TLan_MiiReadReg( dev, phy, i + 1, &data1 );
-                       printk( " 0x%04hx", data1 );
-                       TLan_MiiReadReg( dev, phy, i + 2, &data2 );
-                       printk( " 0x%04hx", data2 );
-                       TLan_MiiReadReg( dev, phy, i + 3, &data3 );
-                       printk( " 0x%04hx\n", data3 );
+       phy = priv->phy[priv->phy_num];
+
+       if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+               netdev_info(dev, "Unmanaged PHY\n");
+       } else if (phy <= TLAN_PHY_MAX_ADDR) {
+               netdev_info(dev, "PHY 0x%02x\n", phy);
+               pr_info("   Off.  +0     +1     +2     +3\n");
+               for (i = 0; i < 0x20; i += 4) {
+                       tlan_mii_read_reg(dev, phy, i, &data0);
+                       tlan_mii_read_reg(dev, phy, i + 1, &data1);
+                       tlan_mii_read_reg(dev, phy, i + 2, &data2);
+                       tlan_mii_read_reg(dev, phy, i + 3, &data3);
+                       pr_info("   0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+                               i, data0, data1, data2, data3);
                }
        } else {
-               printk( "TLAN:   Device %s, Invalid PHY.\n", dev->name );
+               netdev_info(dev, "Invalid PHY\n");
        }
 
-} /* TLan_PhyPrint */
+}
 
 
 
 
-       /*********************************************************************
       *      TLan_PhyDetect
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev     A pointer to the device structure of the adapter
       *                      for which the PHY needs determined.
       *
       *      So far I've found that adapters which have external PHYs
       *      may also use the internal PHY for part of the functionality.
       *      (eg, AUI/Thinnet).  This function finds out if this TLAN
       *      chip has an internal PHY, and then finds the first external
       *      PHY (starting from address 0) if it exists).
       *
       ********************************************************************/
+/*********************************************************************
*     tlan_phy_detect
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev     A pointer to the device structure of the adapter
*                     for which the PHY needs determined.
+ *
*     So far I've found that adapters which have external PHYs
*     may also use the internal PHY for part of the functionality.
*     (eg, AUI/Thinnet).  This function finds out if this TLAN
*     chip has an internal PHY, and then finds the first external
*     PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
 
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16             control;
        u16             hi;
        u16             lo;
        u32             phy;
 
-       if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-               priv->phyNum = 0xFFFF;
+       if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+               priv->phy_num = 0xffff;
                return;
        }
 
-       TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+       tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
 
-       if ( hi != 0xFFFF ) {
+       if (hi != 0xffff)
                priv->phy[0] = TLAN_PHY_MAX_ADDR;
-       } else {
+       else
                priv->phy[0] = TLAN_PHY_NONE;
-       }
 
        priv->phy[1] = TLAN_PHY_NONE;
-       for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
-               TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
-               TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
-               TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
-               if ( ( control != 0xFFFF ) ||
-                    ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
-                       TLAN_DBG( TLAN_DEBUG_GNRL,
-                                 "PHY found at %02x %04x %04x %04x\n",
-                                 phy, control, hi, lo );
-                       if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
-                            ( phy != TLAN_PHY_MAX_ADDR ) ) {
+       for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+               tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+               tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+               tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+               if ((control != 0xffff) ||
+                   (hi != 0xffff) || (lo != 0xffff)) {
+                       TLAN_DBG(TLAN_DEBUG_GNRL,
+                                "PHY found at %02x %04x %04x %04x\n",
+                                phy, control, hi, lo);
+                       if ((priv->phy[1] == TLAN_PHY_NONE) &&
+                           (phy != TLAN_PHY_MAX_ADDR)) {
                                priv->phy[1] = phy;
                        }
                }
        }
 
-       if ( priv->phy[1] != TLAN_PHY_NONE ) {
-               priv->phyNum = 1;
-       } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
-               priv->phyNum = 0;
-       } else {
-               printk( "TLAN:  Cannot initialize device, no PHY was found!\n" );
-       }
+       if (priv->phy[1] != TLAN_PHY_NONE)
+               priv->phy_num = 1;
+       else if (priv->phy[0] != TLAN_PHY_NONE)
+               priv->phy_num = 0;
+       else
+               netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
 
-} /* TLan_PhyDetect */
+}
 
 
 
 
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             value;
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
        value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
-       TLan_MiiSync( dev->base_addr );
-       TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
-       if ( ( priv->phyNum == 0 ) &&
-            ( priv->phy[1] != TLAN_PHY_NONE ) &&
-            ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
-               TLan_MiiSync( dev->base_addr );
-               TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+       tlan_mii_sync(dev->base_addr);
+       tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+       if ((priv->phy_num == 0) &&
+           (priv->phy[1] != TLAN_PHY_NONE) &&
+           (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+               tlan_mii_sync(dev->base_addr);
+               tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
        }
 
        /* Wait for 50 ms and powerup
         * This is abitrary.  It is intended to make sure the
         * transceiver settles.
         */
-       TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
 
-} /* TLan_PhyPowerDown */
+}
 
 
 
 
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             value;
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
-       TLan_MiiSync( dev->base_addr );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+       tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK;
-       TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
-       TLan_MiiSync(dev->base_addr);
+       tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+       tlan_mii_sync(dev->base_addr);
        /* Wait for 500 ms and reset the
         * transceiver.  The TLAN docs say both 50 ms and
         * 500 ms, so do the longer, just in case.
         */
-       TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
 
-} /* TLan_PhyPowerUp */
+}
 
 
 
 
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             phy;
        u16             value;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
-       TLan_MiiSync( dev->base_addr );
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+       tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK | MII_GC_RESET;
-       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
-       TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
-       while ( value & MII_GC_RESET ) {
-               TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
-       }
+       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+       tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+       while (value & MII_GC_RESET)
+               tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
 
        /* Wait for 500 ms and initialize.
         * I don't remember why I wait this long.
         * I've changed this to 50ms, as it seems long enough.
         */
-       TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
 
-} /* TLan_PhyReset */
+}
 
 
 
 
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             ability;
        u16             control;
        u16             data;
@@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev )
        u16             status;
        u16             tctl;
 
-       phy = priv->phy[priv->phyNum];
-       TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+       phy = priv->phy[priv->phy_num];
+       TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
 
-       if ( ( status & MII_GS_AUTONEG ) &&
-            ( ! priv->aui ) ) {
+       if ((status & MII_GS_AUTONEG) &&
+           (!priv->aui)) {
                ability = status >> 11;
-               if ( priv->speed  == TLAN_SPEED_10 &&
-                    priv->duplex == TLAN_DUPLEX_HALF) {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
-               } else if ( priv->speed == TLAN_SPEED_10 &&
-                           priv->duplex == TLAN_DUPLEX_FULL) {
-                       priv->tlanFullDuplex = true;
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
-               } else if ( priv->speed == TLAN_SPEED_100 &&
-                           priv->duplex == TLAN_DUPLEX_HALF) {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
-               } else if ( priv->speed == TLAN_SPEED_100 &&
-                           priv->duplex == TLAN_DUPLEX_FULL) {
-                       priv->tlanFullDuplex = true;
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+               if (priv->speed  == TLAN_SPEED_10 &&
+                   priv->duplex == TLAN_DUPLEX_HALF) {
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+               } else if (priv->speed == TLAN_SPEED_10 &&
+                          priv->duplex == TLAN_DUPLEX_FULL) {
+                       priv->tlan_full_duplex = true;
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+               } else if (priv->speed == TLAN_SPEED_100 &&
+                          priv->duplex == TLAN_DUPLEX_HALF) {
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+               } else if (priv->speed == TLAN_SPEED_100 &&
+                          priv->duplex == TLAN_DUPLEX_FULL) {
+                       priv->tlan_full_duplex = true;
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
                } else {
 
                        /* Set Auto-Neg advertisement */
-                       TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+                       tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+                                          (ability << 5) | 1);
                        /* Enablee Auto-Neg */
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
                        /* Restart Auto-Neg */
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
                        /* Wait for 4 sec for autonegotiation
-                       * to complete.  The max spec time is less than this
-                       * but the card need additional time to start AN.
-                       * .5 sec should be plenty extra.
-                       */
-                       printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
-                       TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+                        * to complete.  The max spec time is less than this
+                        * but the card need additional time to start AN.
+                        * .5 sec should be plenty extra.
+                        */
+                       netdev_info(dev, "Starting autonegotiation\n");
+                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
                        return;
                }
 
        }
 
-       if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
-               priv->phyNum = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-               TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
-               TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+       if ((priv->aui) && (priv->phy_num != 0)) {
+               priv->phy_num = 0;
+               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+                       | TLAN_NET_CFG_PHY_EN;
+               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+               tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
-       }  else if ( priv->phyNum == 0 ) {
+       } else if (priv->phy_num == 0) {
                control = 0;
-               TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
-               if ( priv->aui ) {
-                       tctl |= TLAN_TC_AUISEL;
+               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+               if (priv->aui) {
+                       tctl |= TLAN_TC_AUISEL;
                } else {
-                       tctl &= ~TLAN_TC_AUISEL;
-                       if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+                       tctl &= ~TLAN_TC_AUISEL;
+                       if (priv->duplex == TLAN_DUPLEX_FULL) {
                                control |= MII_GC_DUPLEX;
-                               priv->tlanFullDuplex = true;
+                               priv->tlan_full_duplex = true;
                        }
-                       if ( priv->speed == TLAN_SPEED_100 ) {
+                       if (priv->speed == TLAN_SPEED_100)
                                control |= MII_GC_SPEEDSEL;
-                       }
                }
-               TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
-               TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+               tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+               tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
        }
 
        /* Wait for 2 sec to give the transceiver time
         * to establish link.
         */
-       TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+       tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
 
-} /* TLan_PhyStartLink */
+}
 
 
 
 
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv        *priv = netdev_priv(dev);
        u16             an_adv;
        u16             an_lpa;
        u16             data;
@@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
        u16             phy;
        u16             status;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-       udelay( 1000 );
-       TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+       udelay(1000);
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
 
-       if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+       if (!(status & MII_GS_AUTOCMPLT)) {
                /* Wait for 8 sec to give the process
                 * more time.  Perhaps we should fail after a while.
                 */
-                if (!priv->neg_be_verbose++) {
-                        pr_info("TLAN:  Giving autonegotiation more time.\n");
-                        pr_info("TLAN:  Please check that your adapter has\n");
-                        pr_info("TLAN:  been properly connected to a HUB or Switch.\n");
-                        pr_info("TLAN:  Trying to establish link in the background...\n");
-                }
-               TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+               if (!priv->neg_be_verbose++) {
+                       pr_info("Giving autonegotiation more time.\n");
+                       pr_info("Please check that your adapter has\n");
+                       pr_info("been properly connected to a HUB or Switch.\n");
+                       pr_info("Trying to establish link in the background...\n");
+               }
+               tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
                return;
        }
 
-       printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
-       TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
-       TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+       netdev_info(dev, "Autonegotiation complete\n");
+       tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
        mode = an_adv & an_lpa & 0x03E0;
-       if ( mode & 0x0100 ) {
-               priv->tlanFullDuplex = true;
-       } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
-               priv->tlanFullDuplex = true;
-       }
-
-       if ( ( ! ( mode & 0x0180 ) ) &&
-            ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
-            ( priv->phyNum != 0 ) ) {
-               priv->phyNum = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-               TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
-               TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+       if (mode & 0x0100)
+               priv->tlan_full_duplex = true;
+       else if (!(mode & 0x0080) && (mode & 0x0040))
+               priv->tlan_full_duplex = true;
+
+       if ((!(mode & 0x0180)) &&
+           (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+           (priv->phy_num != 0)) {
+               priv->phy_num = 0;
+               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+                       | TLAN_NET_CFG_PHY_EN;
+               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+               tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
        }
 
-       if ( priv->phyNum == 0 ) {
-               if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
-                    ( an_adv & an_lpa & 0x0040 ) ) {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
-                                         MII_GC_AUTOENB | MII_GC_DUPLEX );
-                       pr_info("TLAN:  Starting internal PHY with FULL-DUPLEX\n" );
+       if (priv->phy_num == 0) {
+               if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+                   (an_adv & an_lpa & 0x0040)) {
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+                                          MII_GC_AUTOENB | MII_GC_DUPLEX);
+                       netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
                } else {
-                       TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
-                       pr_info( "TLAN:  Starting internal PHY with HALF-DUPLEX\n" );
+                       tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+                                          MII_GC_AUTOENB);
+                       netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
                }
        }
 
        /* Wait for 100 ms.  No reason in partiticular.
         */
-       TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+       tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
 
-} /* TLan_PhyFinishAutoNeg */
+}
 
 #ifdef MONITOR
 
-        /*********************************************************************
       *
       *      TLan_phyMonitor
       *
       *      Returns:
       *              None
       *
       *      Params:
       *              dev             The device structure of this device.
       *
       *
       *      This function monitors PHY condition by reading the status
       *      register via the MII bus. This can be used to give info
       *      about link changes (up/down), and possible switch to alternate
       *      media.
       *
       * ******************************************************************/
-
-void TLan_PhyMonitor( struct net_device *dev )
+/*********************************************************************
+ *
*     tlan_phy_monitor
+ *
*     Returns:
*           None
+ *
*     Params:
*           dev            The device structure of this device.
+ *
+ *
*     This function monitors PHY condition by reading the status
*     register via the MII bus. This can be used to give info
*     about link changes (up/down), and possible switch to alternate
*     media.
+ *
*******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
 {
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        u16     phy;
        u16     phy_status;
 
-       phy = priv->phy[priv->phyNum];
+       phy = priv->phy[priv->phy_num];
 
-        /* Get PHY status register */
-        TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+       /* Get PHY status register */
+       tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
 
-        /* Check if link has been lost */
-        if (!(phy_status & MII_GS_LINK)) {
-              if (priv->link) {
-                     priv->link = 0;
-                     printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
-                     netif_carrier_off(dev);
-                     TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
-                     return;
+       /* Check if link has been lost */
+       if (!(phy_status & MII_GS_LINK)) {
+               if (priv->link) {
+                       priv->link = 0;
+                       printk(KERN_DEBUG "TLAN: %s has lost link\n",
+                              dev->name);
+                       netif_carrier_off(dev);
+                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+                       return;
                }
        }
 
-        /* Link restablished? */
-        if ((phy_status & MII_GS_LINK) && !priv->link) {
-               priv->link = 1;
-               printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+       /* Link restablished? */
+       if ((phy_status & MII_GS_LINK) && !priv->link) {
+               priv->link = 1;
+               printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+                      dev->name);
                netif_carrier_on(dev);
-        }
+       }
 
        /* Setup a new monitor */
-       TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
 }
 
 #endif /* MONITOR */
@@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev )
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
 
-       These routines are based on the information in Chap. 2 of the
-       "ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
-        *      TLan_MiiReadReg
-        *
-        *      Returns:
-        *              false   if ack received ok
-        *              true    if no ack received or other error
-        *
-        *      Parms:
-        *              dev             The device structure containing
-        *                              The io address and interrupt count
-        *                              for this device.
-        *              phy             The address of the PHY to be queried.
-        *              reg             The register whose contents are to be
-        *                              retrieved.
-        *              val             A pointer to a variable to store the
-        *                              retrieved value.
-        *
-        *      This function uses the TLAN's MII bus to retrieve the contents
-        *      of a given register on a PHY.  It sends the appropriate info
-        *      and then reads the 16-bit register value from the MII bus via
-        *      the TLAN SIO register.
-        *
-        **************************************************************/
-
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+/***************************************************************
+ *     tlan_mii_read_reg
+ *
+ *     Returns:
+ *             false   if ack received ok
+ *             true    if no ack received or other error
+ *
+ *     Parms:
+ *             dev             The device structure containing
+ *                             The io address and interrupt count
+ *                             for this device.
+ *             phy             The address of the PHY to be queried.
+ *             reg             The register whose contents are to be
+ *                             retrieved.
+ *             val             A pointer to a variable to store the
+ *                             retrieved value.
+ *
+ *     This function uses the TLAN's MII bus to retrieve the contents
+ *     of a given register on a PHY.  It sends the appropriate info
+ *     and then reads the 16-bit register value from the MII bus via
+ *     the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
 {
        u8      nack;
        u16     sio, tmp;
-       u32     i;
+       u32     i;
        bool    err;
        int     minten;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        unsigned long flags = 0;
 
        err = false;
@@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
        if (!in_irq())
                spin_lock_irqsave(&priv->lock, flags);
 
-       TLan_MiiSync(dev->base_addr);
+       tlan_mii_sync(dev->base_addr);
 
-       minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
-       if ( minten )
-               TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+       minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+       if (minten)
+               tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
 
-       TLan_MiiSendData( dev->base_addr, 0x1, 2 );     /* Start ( 01b ) */
-       TLan_MiiSendData( dev->base_addr, 0x2, 2 );     /* Read  ( 10b ) */
-       TLan_MiiSendData( dev->base_addr, phy, 5 );     /* Device #      */
-       TLan_MiiSendData( dev->base_addr, reg, 5 );     /* Register #    */
+       tlan_mii_send_data(dev->base_addr, 0x1, 2);     /* start (01b) */
+       tlan_mii_send_data(dev->base_addr, 0x2, 2);     /* read  (10b) */
+       tlan_mii_send_data(dev->base_addr, phy, 5);     /* device #      */
+       tlan_mii_send_data(dev->base_addr, reg, 5);     /* register #    */
 
 
-       TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio);         /* Change direction */
+       tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);        /* change direction */
 
-       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);          /* Clock Idle bit */
-       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
-       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);          /* Wait 300ns */
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);         /* clock idle bit */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);         /* wait 300ns */
 
-       nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio);    /* Check for ACK */
-       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);            /* Finish ACK */
-       if (nack) {                                     /* No ACK, so fake it */
+       nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);   /* check for ACK */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);           /* finish ACK */
+       if (nack) {                                     /* no ACK, so fake it */
                for (i = 0; i < 16; i++) {
-                       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
-                       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+                       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+                       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
                }
                tmp = 0xffff;
                err = true;
        } else {                                        /* ACK, so read data */
                for (tmp = 0, i = 0x8000; i; i >>= 1) {
-                       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
-                       if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+                       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+                       if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
                                tmp |= i;
-                       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+                       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
                }
        }
 
 
-       TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);          /* Idle cycle */
-       TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);         /* idle cycle */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 
-       if ( minten )
-               TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+       if (minten)
+               tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
 
        *val = tmp;
 
@@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
 
        return err;
 
-} /* TLan_MiiReadReg */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_MiiSendData
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              base_port       The base IO port of the adapter in
       *                              question.
       *              dev             The address of the PHY to be queried.
       *              data            The value to be placed on the MII bus.
       *              num_bits        The number of bits in data that are to
       *                              be placed on the MII bus.
       *
       *      This function sends on sequence of bits on the MII
       *      configuration bus.
       *
       **************************************************************/
+/***************************************************************
*     tlan_mii_send_data
+ *
*     Returns:
*             Nothing
*     Parms:
*             base_port       The base IO port of the adapter in
*                             question.
*             dev             The address of the PHY to be queried.
*             data            The value to be placed on the MII bus.
*             num_bits        The number of bits in data that are to
*                             be placed on the MII bus.
+ *
*     This function sends on sequence of bits on the MII
*     configuration bus.
+ *
+ **************************************************************/
 
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
 {
        u16 sio;
        u32 i;
 
-       if ( num_bits == 0 )
+       if (num_bits == 0)
                return;
 
-       outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
        sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
-       TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+       tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
 
-       for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
-               TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
-               (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
-               if ( data & i )
-                       TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+       for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+               tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+               (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+               if (data & i)
+                       tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
                else
-                       TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
-               (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+                       tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+               (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
        }
 
-} /* TLan_MiiSendData */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_MiiSync
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              base_port       The base IO port of the adapter in
       *                              question.
       *
       *      This functions syncs all PHYs in terms of the MII configuration
       *      bus.
       *
       **************************************************************/
+/***************************************************************
*     TLan_MiiSync
+ *
*     Returns:
*             Nothing
*     Parms:
*             base_port       The base IO port of the adapter in
*                             question.
+ *
*     This functions syncs all PHYs in terms of the MII configuration
*     bus.
+ *
+ **************************************************************/
 
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
 {
        int i;
        u16 sio;
 
-       outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
        sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
 
-       TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
-       for ( i = 0; i < 32; i++ ) {
-               TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
-               TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+       tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+       for (i = 0; i < 32; i++) {
+               tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+               tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
        }
 
-} /* TLan_MiiSync */
+}
 
 
 
 
-       /***************************************************************
       *      TLan_MiiWriteReg
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              dev             The device structure for the device
       *                              to write to.
       *              phy             The address of the PHY to be written to.
       *              reg             The register whose contents are to be
       *                              written.
       *              val             The value to be written to the register.
       *
       *      This function uses the TLAN's MII bus to write the contents of a
       *      given register on a PHY.  It sends the appropriate info and then
       *      writes the 16-bit register value from the MII configuration bus
       *      via the TLAN SIO register.
       *
       **************************************************************/
+/***************************************************************
*     tlan_mii_write_reg
+ *
*     Returns:
*             Nothing
*     Parms:
*             dev             The device structure for the device
*                             to write to.
*             phy             The address of the PHY to be written to.
*             reg             The register whose contents are to be
*                             written.
*             val             The value to be written to the register.
+ *
*     This function uses the TLAN's MII bus to write the contents of a
*     given register on a PHY.  It sends the appropriate info and then
*     writes the 16-bit register value from the MII configuration bus
*     via the TLAN SIO register.
+ *
+ **************************************************************/
 
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
 {
        u16     sio;
        int     minten;
        unsigned long flags = 0;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
 
        outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
        sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
        if (!in_irq())
                spin_lock_irqsave(&priv->lock, flags);
 
-       TLan_MiiSync( dev->base_addr );
+       tlan_mii_sync(dev->base_addr);
 
-       minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
-       if ( minten )
-               TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+       minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+       if (minten)
+               tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
 
-       TLan_MiiSendData( dev->base_addr, 0x1, 2 );     /* Start ( 01b ) */
-       TLan_MiiSendData( dev->base_addr, 0x1, 2 );     /* Write ( 01b ) */
-       TLan_MiiSendData( dev->base_addr, phy, 5 );     /* Device #      */
-       TLan_MiiSendData( dev->base_addr, reg, 5 );     /* Register #    */
+       tlan_mii_send_data(dev->base_addr, 0x1, 2);     /* start (01b) */
+       tlan_mii_send_data(dev->base_addr, 0x1, 2);     /* write (01b) */
+       tlan_mii_send_data(dev->base_addr, phy, 5);     /* device #      */
+       tlan_mii_send_data(dev->base_addr, reg, 5);     /* register #    */
 
-       TLan_MiiSendData( dev->base_addr, 0x2, 2 );     /* Send ACK */
-       TLan_MiiSendData( dev->base_addr, val, 16 );    /* Send Data */
+       tlan_mii_send_data(dev->base_addr, 0x2, 2);     /* send ACK */
+       tlan_mii_send_data(dev->base_addr, val, 16);    /* send data */
 
-       TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );        /* Idle cycle */
-       TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+       tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+       tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 
-       if ( minten )
-               TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+       if (minten)
+               tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
 
        if (!in_irq())
                spin_unlock_irqrestore(&priv->lock, flags);
 
-} /* TLan_MiiWriteReg */
+}
 
 
 
@@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
 /*****************************************************************************
 ******************************************************************************
 
-       ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
 
-       The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
-       EEPROM.  These functions are based on information in Microchip's
-       data sheet.  I don't know how well this functions will work with
-       other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM.  these functions are based on information in microchip's
+data sheet.  I don't know how well this functions will work with
+other Eeproms.
 
 ******************************************************************************
 *****************************************************************************/
 
 
-       /***************************************************************
       *      TLan_EeSendStart
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *
       *      This function sends a start cycle to an EEPROM attached
       *      to a TLAN chip.
       *
       **************************************************************/
-
-static void TLan_EeSendStart( u16 io_base )
+/***************************************************************
*     tlan_ee_send_start
+ *
*     Returns:
*             Nothing
*     Parms:
*             io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
+ *
*     This function sends a start cycle to an EEPROM attached
*     to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
 {
        u16     sio;
 
-       outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
        sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 
-       TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-       TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
-       TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
-       TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-       TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-
-} /* TLan_EeSendStart */
-
-
-
-
-       /***************************************************************
       *      TLan_EeSendByte
       *
       *      Returns:
       *              If the correct ack was received, 0, otherwise 1
       *      Parms:  io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *              data            The 8 bits of information to
       *                              send to the EEPROM.
       *              stop            If TLAN_EEPROM_STOP is passed, a
       *                              stop cycle is sent after the
       *                              byte is sent after the ack is
       *                              read.
       *
       *      This function sends a byte on the serial EEPROM line,
       *      driving the clock to send each bit. The function then
       *      reverses transmission direction and reads an acknowledge
       *      bit.
       *
       **************************************************************/
-
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+       tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+       tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+       tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+       tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+       tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
*     tlan_ee_send_byte
+ *
*     Returns:
*             If the correct ack was received, 0, otherwise 1
*     Parms:  io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
*             data            The 8 bits of information to
*                             send to the EEPROM.
*             stop            If TLAN_EEPROM_STOP is passed, a
*                             stop cycle is sent after the
*                             byte is sent after the ack is
*                             read.
+ *
*     This function sends a byte on the serial EEPROM line,
*     driving the clock to send each bit. The function then
*     reverses transmission direction and reads an acknowledge
*     bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
 {
        int     err;
        u8      place;
        u16     sio;
 
-       outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
        sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 
        /* Assume clock is low, tx is enabled; */
-       for ( place = 0x80; place != 0; place >>= 1 ) {
-               if ( place & data )
-                       TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+       for (place = 0x80; place != 0; place >>= 1) {
+               if (place & data)
+                       tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
                else
-                       TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+                       tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
        }
-       TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
-       TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-       err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
-       TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-       TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+       tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+       tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+       err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+       tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+       tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
 
-       if ( ( ! err ) && stop ) {
+       if ((!err) && stop) {
                /* STOP, raise data while clock is high */
-               TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+               tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
        }
 
        return err;
 
-} /* TLan_EeSendByte */
-
-
-
-
-       /***************************************************************
       *      TLan_EeReceiveByte
       *
       *      Returns:
       *              Nothing
       *      Parms:
       *              io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *              data            An address to a char to hold the
       *                              data sent from the EEPROM.
       *              stop            If TLAN_EEPROM_STOP is passed, a
       *                              stop cycle is sent after the
       *                              byte is received, and no ack is
       *                              sent.
       *
       *      This function receives 8 bits of data from the EEPROM
       *      over the serial link.  It then sends and ack bit, or no
       *      ack and a stop bit.  This function is used to retrieve
       *      data after the address of a byte in the EEPROM has been
       *      sent.
       *
       **************************************************************/
-
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+}
+
+
+
+
+/***************************************************************
*     tlan_ee_receive_byte
+ *
*     Returns:
*             Nothing
*     Parms:
*             io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
*             data            An address to a char to hold the
*                             data sent from the EEPROM.
*             stop            If TLAN_EEPROM_STOP is passed, a
*                             stop cycle is sent after the
*                             byte is received, and no ack is
*                             sent.
+ *
*     This function receives 8 bits of data from the EEPROM
*     over the serial link.  It then sends and ack bit, or no
*     ack and a stop bit.  This function is used to retrieve
*     data after the address of a byte in the EEPROM has been
*     sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
 {
        u8  place;
        u16 sio;
 
-       outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+       outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
        sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
        *data = 0;
 
        /* Assume clock is low, tx is enabled; */
-       TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
-       for ( place = 0x80; place; place >>= 1 ) {
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+       tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+       for (place = 0x80; place; place >>= 1) {
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
                        *data |= place;
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
        }
 
-       TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
-       if ( ! stop ) {
-               TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );       /* Ack = 0 */
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+       tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+       if (!stop) {
+               tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
        } else {
-               TLan_SetBit( TLAN_NET_SIO_EDATA, sio );         /* No ack = 1 (?) */
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+               tlan_set_bit(TLAN_NET_SIO_EDATA, sio);  /* no ack = 1 (?) */
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
                /* STOP, raise data while clock is high */
-               TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-               TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-               TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
-       }
-
-} /* TLan_EeReceiveByte */
-
-
-
-
-       /***************************************************************
       *      TLan_EeReadByte
       *
       *      Returns:
       *              No error = 0, else, the stage at which the error
       *              occurred.
       *      Parms:
       *              io_base         The IO port base address for the
       *                              TLAN device with the EEPROM to
       *                              use.
       *              ee_addr         The address of the byte in the
       *                              EEPROM whose contents are to be
       *                              retrieved.
       *              data            An address to a char to hold the
       *                              data obtained from the EEPROM.
       *
       *      This function reads a byte of information from an byte
       *      cell in the EEPROM.
       *
       **************************************************************/
-
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+               tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+               tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+               tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+       }
+
+}
+
+
+
+
+/***************************************************************
*     tlan_ee_read_byte
+ *
*     Returns:
*             No error = 0, else, the stage at which the error
*             occurred.
*     Parms:
*             io_base         The IO port base address for the
*                             TLAN device with the EEPROM to
*                             use.
*             ee_addr         The address of the byte in the
*                             EEPROM whose contents are to be
*                             retrieved.
*             data            An address to a char to hold the
*                             data obtained from the EEPROM.
+ *
*     This function reads a byte of information from an byte
*     cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
 {
        int err;
-       TLanPrivateInfo *priv = netdev_priv(dev);
+       struct tlan_priv *priv = netdev_priv(dev);
        unsigned long flags = 0;
-       int ret=0;
+       int ret = 0;
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       TLan_EeSendStart( dev->base_addr );
-       err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
-       if (err)
-       {
-               ret=1;
+       tlan_ee_send_start(dev->base_addr);
+       err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+       if (err) {
+               ret = 1;
                goto fail;
        }
-       err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
-       if (err)
-       {
-               ret=2;
+       err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+       if (err) {
+               ret = 2;
                goto fail;
        }
-       TLan_EeSendStart( dev->base_addr );
-       err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
-       if (err)
-       {
-               ret=3;
+       tlan_ee_send_start(dev->base_addr);
+       err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+       if (err) {
+               ret = 3;
                goto fail;
        }
-       TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+       tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
 fail:
        spin_unlock_irqrestore(&priv->lock, flags);
 
        return ret;
 
-} /* TLan_EeReadByte */
+}
 
 
 
index 3315ced774e2ff9078b972462b50295abbdb8f63..5fc98a8e488900c07757b59c7464b5e0a54b7855 100644 (file)
@@ -20,8 +20,8 @@
  ********************************************************************/
 
 
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
 #include <linux/netdevice.h>
 
 
 #define TLAN_IGNORE            0
 #define TLAN_RECORD            1
 
-#define TLAN_DBG(lvl, format, args...) \
-       do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...)                                 \
+       do {                                                            \
+               if (debug&lvl)                                          \
+                       printk(KERN_DEBUG "TLAN: " format, ##args);     \
+       } while (0)
 
 #define TLAN_DEBUG_GNRL                0x0001
 #define TLAN_DEBUG_TX          0x0002
@@ -50,7 +53,8 @@
 #define TLAN_DEBUG_PROBE       0x0010
 
 #define TX_TIMEOUT             (10*HZ)  /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS                8        /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS                8        /* Max number of boards installed
+                                           at a time */
 
 
        /*****************************************************************
 #define PCI_DEVICE_ID_OLICOM_OC2326                    0x0014
 #endif
 
-typedef struct tlan_adapter_entry {
-       u16     vendorId;
-       u16     deviceId;
-       char    *deviceLabel;
+struct tlan_adapter_entry {
+       u16     vendor_id;
+       u16     device_id;
+       char    *device_label;
        u32     flags;
-       u16     addrOfs;
-} TLanAdapterEntry;
+       u16     addr_ofs;
+};
 
 #define TLAN_ADAPTER_NONE              0x00000000
 #define TLAN_ADAPTER_UNMANAGED_PHY     0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
 #define TLAN_CSTAT_DP_PR       0x0100
 
 
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
        u32     count;
        u32     address;
-} TLanBufferRef;
+};
 
 
-typedef struct tlan_list_tag {
+struct tlan_list {
        u32             forward;
-       u16             cStat;
-       u16             frameSize;
-       TLanBufferRef   buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+       u16             c_stat;
+       u16             frame_size;
+       struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
 
 
 typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
         *
         ****************************************************************/
 
-typedef struct tlan_private_tag {
-       struct net_device       *nextDevice;
-       struct pci_dev          *pciDev;
+struct tlan_priv {
+       struct net_device       *next_device;
+       struct pci_dev          *pci_dev;
        struct net_device       *dev;
-       void                    *dmaStorage;
-       dma_addr_t              dmaStorageDMA;
-       unsigned int            dmaSize;
-       u8                      *padBuffer;
-       TLanList                *rxList;
-       dma_addr_t              rxListDMA;
-       u8                      *rxBuffer;
-       dma_addr_t              rxBufferDMA;
-       u32                     rxHead;
-       u32                     rxTail;
-       u32                     rxEocCount;
-       TLanList                *txList;
-       dma_addr_t              txListDMA;
-       u8                      *txBuffer;
-       dma_addr_t              txBufferDMA;
-       u32                     txHead;
-       u32                     txInProgress;
-       u32                     txTail;
-       u32                     txBusyCount;
-       u32                     phyOnline;
-       u32                     timerSetAt;
-       u32                     timerType;
+       void                    *dma_storage;
+       dma_addr_t              dma_storage_dma;
+       unsigned int            dma_size;
+       u8                      *pad_buffer;
+       struct tlan_list        *rx_list;
+       dma_addr_t              rx_list_dma;
+       u8                      *rx_buffer;
+       dma_addr_t              rx_buffer_dma;
+       u32                     rx_head;
+       u32                     rx_tail;
+       u32                     rx_eoc_count;
+       struct tlan_list        *tx_list;
+       dma_addr_t              tx_list_dma;
+       u8                      *tx_buffer;
+       dma_addr_t              tx_buffer_dma;
+       u32                     tx_head;
+       u32                     tx_in_progress;
+       u32                     tx_tail;
+       u32                     tx_busy_count;
+       u32                     phy_online;
+       u32                     timer_set_at;
+       u32                     timer_type;
        struct timer_list       timer;
        struct board            *adapter;
-       u32                     adapterRev;
+       u32                     adapter_rev;
        u32                     aui;
        u32                     debug;
        u32                     duplex;
        u32                     phy[2];
-       u32                     phyNum;
+       u32                     phy_num;
        u32                     speed;
-       u8                      tlanRev;
-       u8                      tlanFullDuplex;
+       u8                      tlan_rev;
+       u8                      tlan_full_duplex;
        spinlock_t              lock;
        u8                      link;
        u8                      is_eisa;
        struct work_struct                      tlan_tqueue;
        u8                      neg_be_verbose;
-} TLanPrivateInfo;
+};
 
 
 
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
         ****************************************************************/
 
 #define TLAN_HOST_CMD                  0x00
-#define        TLAN_HC_GO              0x80000000
+#define        TLAN_HC_GO              0x80000000
 #define                TLAN_HC_STOP            0x40000000
 #define                TLAN_HC_ACK             0x20000000
 #define                TLAN_HC_CS_MASK         0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
 #define                TLAN_NET_CMD_TRFRAM     0x02
 #define                TLAN_NET_CMD_TXPACE     0x01
 #define TLAN_NET_SIO                   0x01
-#define        TLAN_NET_SIO_MINTEN     0x80
+#define        TLAN_NET_SIO_MINTEN     0x80
 #define                TLAN_NET_SIO_ECLOK      0x40
 #define                TLAN_NET_SIO_ETXEN      0x20
 #define                TLAN_NET_SIO_EDATA      0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
 #define                TLAN_NET_MASK_MASK4     0x10
 #define                TLAN_NET_MASK_RSRVD     0x0F
 #define TLAN_NET_CONFIG                        0x04
-#define        TLAN_NET_CFG_RCLK       0x8000
+#define        TLAN_NET_CFG_RCLK       0x8000
 #define                TLAN_NET_CFG_TCLK       0x4000
 #define                TLAN_NET_CFG_BIT        0x2000
 #define                TLAN_NET_CFG_RXCRC      0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
 /* Generic MII/PHY Registers */
 
 #define MII_GEN_CTL                    0x00
-#define        MII_GC_RESET            0x8000
+#define        MII_GC_RESET            0x8000
 #define                MII_GC_LOOPBK           0x4000
 #define                MII_GC_SPEEDSEL         0x2000
 #define                MII_GC_AUTOENB          0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
 #define                MII_GS_EXTCAP           0x0001
 #define MII_GEN_ID_HI                  0x02
 #define MII_GEN_ID_LO                  0x03
-#define        MII_GIL_OUI             0xFC00
-#define        MII_GIL_MODEL           0x03F0
-#define        MII_GIL_REVISION        0x000F
+#define        MII_GIL_OUI             0xFC00
+#define        MII_GIL_MODEL           0x03F0
+#define        MII_GIL_REVISION        0x000F
 #define MII_AN_ADV                     0x04
 #define MII_AN_LPA                     0x05
 #define MII_AN_EXP                     0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
 
 #define TLAN_TLPHY_ID                  0x10
 #define TLAN_TLPHY_CTL                 0x11
-#define        TLAN_TC_IGLINK          0x8000
+#define        TLAN_TC_IGLINK          0x8000
 #define                TLAN_TC_SWAPOL          0x4000
 #define                TLAN_TC_AUISEL          0x2000
 #define                TLAN_TC_SQEEN           0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
 #define LEVEL1_ID1                     0x7810
 #define LEVEL1_ID2                     0x0000
 
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
 
 /* Routines to access internal registers. */
 
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
 
-} /* TLan_DioRead8 */
+}
 
 
 
 
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
 
-} /* TLan_DioRead16 */
+}
 
 
 
 
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        return inl(base_addr + TLAN_DIO_DATA);
 
-} /* TLan_DioRead32 */
+}
 
 
 
 
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
 
 
 
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
 
 
 
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
 {
        outw(internal_addr, base_addr + TLAN_DIO_ADR);
        outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
 
 }
 
-#define TLan_ClearBit( bit, port )     outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port )       ((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port )       outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port)      outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port)        ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port)        outb_p(inb_p(port) | bit, port)
 
 /*
  * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
  *
  * The original code was:
  *
- * u32 xor( u32 a, u32 b ) {   return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32 xor(u32 a, u32 b) {     return ((a && !b ) || (! a && b )); }
  *
- * #define XOR8( a, b, c, d, e, f, g, h )      \
- *     xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit )                ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h)        \
+ *     xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit)          (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
  *
- *     hash  = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- *                   DA(a,30), DA(a,36), DA(a,42) );
- *     hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- *                   DA(a,31), DA(a,37), DA(a,43) ) << 1;
- *     hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- *                   DA(a,32), DA(a,38), DA(a,44) ) << 2;
- *     hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- *                   DA(a,33), DA(a,39), DA(a,45) ) << 3;
- *     hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- *                   DA(a,34), DA(a,40), DA(a,46) ) << 4;
- *     hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- *                   DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ *     hash  = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ *                   DA(a,30), DA(a,36), DA(a,42));
+ *     hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ *                   DA(a,31), DA(a,37), DA(a,43)) << 1;
+ *     hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ *                   DA(a,32), DA(a,38), DA(a,44)) << 2;
+ *     hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ *                   DA(a,33), DA(a,39), DA(a,45)) << 3;
+ *     hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ *                   DA(a,34), DA(a,40), DA(a,46)) << 4;
+ *     hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ *                   DA(a,35), DA(a,41), DA(a,47)) << 5;
  *
  */
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
 {
-        u8     hash;
+       u8     hash;
 
-        hash = (a[0]^a[3]);             /* & 077 */
-        hash ^= ((a[0]^a[3])>>6);       /* & 003 */
-        hash ^= ((a[1]^a[4])<<2);       /* & 074 */
-        hash ^= ((a[1]^a[4])>>4);       /* & 017 */
-        hash ^= ((a[2]^a[5])<<4);       /* & 060 */
-        hash ^= ((a[2]^a[5])>>2);       /* & 077 */
+       hash = (a[0]^a[3]);             /* & 077 */
+       hash ^= ((a[0]^a[3])>>6);       /* & 003 */
+       hash ^= ((a[1]^a[4])<<2);       /* & 074 */
+       hash ^= ((a[1]^a[4])>>4);       /* & 017 */
+       hash ^= ((a[2]^a[5])<<4);       /* & 060 */
+       hash ^= ((a[2]^a[5])>>2);       /* & 077 */
 
-        return hash & 077;
+       return hash & 077;
 }
 #endif
index b100bd50a0d7f27e0bda4144ffb3288a5bfc4d40..f5e9ac00a07bdd95040d9e8f47903f55c46c708e 100644 (file)
@@ -34,6 +34,8 @@
  *    Modifications for 2.3.99-pre5 kernel.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define DRV_NAME       "tun"
 #define DRV_VERSION    "1.6"
 #define DRV_DESCRIPTION        "Universal TUN/TAP device driver"
 #ifdef TUN_DEBUG
 static int debug;
 
-#define DBG  if(tun->debug)printk
-#define DBG1 if(debug==2)printk
+#define tun_debug(level, tun, fmt, args...)                    \
+do {                                                           \
+       if (tun->debug)                                         \
+               netdev_printk(level, tun->dev, fmt, ##args);    \
+} while (0)
+#define DBG1(level, fmt, args...)                              \
+do {                                                           \
+       if (debug == 2)                                         \
+               printk(level fmt, ##args);                      \
+} while (0)
 #else
-#define DBG( a... )
-#define DBG1( a... )
+#define tun_debug(level, tun, fmt, args...)                    \
+do {                                                           \
+       if (0)                                                  \
+               netdev_printk(level, tun->dev, fmt, ##args);    \
+} while (0)
+#define DBG1(level, fmt, args...)                              \
+do {                                                           \
+       if (0)                                                  \
+               printk(level fmt, ##args);                      \
+} while (0)
 #endif
 
 #define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
                tun_detach(tfile->tun);
 }
 
-/* TAP filterting */
+/* TAP filtering */
 static void addr_hash_set(u32 *mask, const u8 *addr)
 {
        int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
-       DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
+       tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
        /* Drop packet if interface is not attached */
        if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
 
        sk = tun->socket.sk;
 
-       DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 
        poll_wait(file, &tun->wq.wait, wait);
 
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
        if (!tun)
                return -EBADFD;
 
-       DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
+       tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
 
        result = tun_get_user(tun, iv, iov_length(iv, count),
                              file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
                        else if (sinfo->gso_type & SKB_GSO_UDP)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
-                               printk(KERN_ERR "tun: unexpected GSO type: "
+                               pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
                                       sinfo->gso_type, gso.gso_size,
                                       gso.hdr_len);
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
        struct sk_buff *skb;
        ssize_t ret = 0;
 
-       DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_chr_read\n");
 
        add_wait_queue(&tun->wq.wait, &wait);
        while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
-                       printk(KERN_ERR "Failed to create tun sysfs files\n");
+                       pr_err("Failed to create tun sysfs files\n");
 
                sk->sk_destruct = tun_sock_destruct;
 
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        goto failed;
        }
 
-       DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
        if (ifr->ifr_flags & IFF_NO_PI)
                tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 static int tun_get_iff(struct net *net, struct tun_struct *tun,
                       struct ifreq *ifr)
 {
-       DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
+       tun_debug(KERN_INFO, tun, "tun_get_iff\n");
 
        strcpy(ifr->ifr_name, tun->dev->name);
 
@@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
  * privs required. */
 static int set_offload(struct net_device *dev, unsigned long arg)
 {
-       unsigned int old_features, features;
+       u32 old_features, features;
 
        old_features = dev->features;
        /* Unset features, set them as we chew on the arg. */
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        if (!tun)
                goto unlock;
 
-       DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
+       tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
 
        ret = 0;
        switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                else
                        tun->flags &= ~TUN_NOCHECKSUM;
 
-               DBG(KERN_INFO "%s: checksum %s\n",
-                   tun->dev->name, arg ? "disabled" : "enabled");
+               tun_debug(KERN_INFO, tun, "checksum %s\n",
+                         arg ? "disabled" : "enabled");
                break;
 
        case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                else
                        tun->flags &= ~TUN_PERSIST;
 
-               DBG(KERN_INFO "%s: persist %s\n",
-                   tun->dev->name, arg ? "enabled" : "disabled");
+               tun_debug(KERN_INFO, tun, "persist %s\n",
+                         arg ? "enabled" : "disabled");
                break;
 
        case TUNSETOWNER:
                /* Set owner of the device */
                tun->owner = (uid_t) arg;
 
-               DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
+               tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
                break;
 
        case TUNSETGROUP:
                /* Set group of the device */
                tun->group= (gid_t) arg;
 
-               DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
+               tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
                break;
 
        case TUNSETLINK:
                /* Only allow setting the type when the interface is down */
                if (tun->dev->flags & IFF_UP) {
-                       DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
-                               tun->dev->name);
+                       tun_debug(KERN_INFO, tun,
+                                 "Linktype set failed because interface is up\n");
                        ret = -EBUSY;
                } else {
                        tun->dev->type = (int) arg;
-                       DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
+                       tun_debug(KERN_INFO, tun, "linktype set to %d\n",
+                                 tun->dev->type);
                        ret = 0;
                }
                break;
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 
        case SIOCSIFHWADDR:
                /* Set hw address */
-               DBG(KERN_DEBUG "%s: set hw address: %pM\n",
-                       tun->dev->name, ifr.ifr_hwaddr.sa_data);
+               tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
+                         ifr.ifr_hwaddr.sa_data);
 
                ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
                break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
        if (!tun)
                return -EBADFD;
 
-       DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
+       tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
 
        if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
                goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 {
        struct tun_file *tfile;
 
-       DBG1(KERN_INFO "tunX: tun_chr_open\n");
+       DBG1(KERN_INFO, "tunX: tun_chr_open\n");
 
        tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
        if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
        if (tun) {
                struct net_device *dev = tun->dev;
 
-               DBG(KERN_INFO "%s: tun_chr_close\n", dev->name);
+               tun_debug(KERN_INFO, tun, "tun_chr_close\n");
 
                __tun_detach(tun);
 
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
 {
        int ret = 0;
 
-       printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
-       printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
+       pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+       pr_info("%s\n", DRV_COPYRIGHT);
 
        ret = rtnl_link_register(&tun_link_ops);
        if (ret) {
-               printk(KERN_ERR "tun: Can't register link_ops\n");
+               pr_err("Can't register link_ops\n");
                goto err_linkops;
        }
 
        ret = misc_register(&tun_miscdev);
        if (ret) {
-               printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
+               pr_err("Can't register misc device %d\n", TUN_MINOR);
                goto err_misc;
        }
        return  0;
index a3c46f6a15e7c5b02a91542406208928cdb23a6d..7fa5ec2de942cc4f45dfc0f43374614c885925a9 100644 (file)
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
 #include <linux/in6.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
-#include <generated/utsrelease.h>
 
 #include "typhoon.h"
 
 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FIRMWARE_NAME);
 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
index cc83fa71c3ffb970b5107aa812e1e1956c9c7c82..105d7f0630ccb9c0a14b12395b04b322c5362e99 100644 (file)
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        if (tb[IFLA_ADDRESS] == NULL)
                random_ether_addr(dev->dev_addr);
 
-       if (tb[IFLA_IFNAME])
-               nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
-       else
-               snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
-       if (strchr(dev->name, '%')) {
-               err = dev_alloc_name(dev, dev->name);
-               if (err < 0)
-                       goto err_alloc_name;
-       }
-
        err = register_netdevice(dev);
        if (err < 0)
                goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 
 err_register_dev:
        /* nothing to do */
-err_alloc_name:
 err_configure_peer:
        unregister_netdevice(peer);
        return err;
index 09cac704fdd74879ed75bf7af5b82ba7d45551dc..0d6fec6b7d93d8b22475bcd5774cf83409b6aaf0 100644 (file)
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
 static int velocity_set_wol(struct velocity_info *vptr)
 {
        struct mac_regs __iomem *regs = vptr->mac_regs;
+       enum speed_opt spd_dpx = vptr->options.spd_dpx;
        static u8 buf[256];
        int i;
 
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
 
        writew(0x0FFF, &regs->WOLSRClr);
 
+       if (spd_dpx == SPD_DPX_1000_FULL)
+               goto mac_done;
+
+       if (spd_dpx != SPD_DPX_AUTO)
+               goto advertise_done;
+
        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
                        MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
        if (vptr->mii_status & VELOCITY_SPEED_1000)
                MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 
+advertise_done:
        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 
        {
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
                writeb(GCR, &regs->CHIPGCR);
        }
 
+mac_done:
        BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
        /* Turn on SWPTAG just before entering power mode */
        BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
index aa2e69b9ff61301569cf1a8e1b3d62259ace2894..d7227539484e3d5dae53b4077109d38844d32b00 100644 (file)
@@ -361,7 +361,7 @@ enum  velocity_owner {
 #define MAC_REG_CHIPGSR     0x9C
 #define MAC_REG_TESTCFG     0x9D
 #define MAC_REG_DEBUG       0x9E
-#define MAC_REG_CHIPGCR     0x9F
+#define MAC_REG_CHIPGCR     0x9F       /* Chip Operation and Diagnostic Control */
 #define MAC_REG_WOLCR0_SET  0xA0
 #define MAC_REG_WOLCR1_SET  0xA1
 #define MAC_REG_PWCFG_SET   0xA2
@@ -848,10 +848,10 @@ enum  velocity_owner {
  *     Bits in CHIPGCR register
  */
 
-#define CHIPGCR_FCGMII      0x80       /* enable GMII mode */
-#define CHIPGCR_FCFDX       0x40
+#define CHIPGCR_FCGMII      0x80       /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX       0x40       /* force full duplex */
 #define CHIPGCR_FCRESV      0x20
-#define CHIPGCR_FCMODE      0x10
+#define CHIPGCR_FCMODE      0x10       /* enable MAC forced mode */
 #define CHIPGCR_LPSOPT      0x08
 #define CHIPGCR_TM1US       0x04
 #define CHIPGCR_TM0US       0x02
index 228d4f7a58afdd4f1a3dbfb8d1ce62898237aaf5..e74e4b42592dbd2873b4d68c416a50a610d2b738 100644 (file)
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
                data1 = steer_ctrl = 0;
 
                status = vxge_hw_vpath_fw_api(vpath,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
                        VXGE_HW_FW_API_GET_EPROM_REV,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
                        0, &data0, &data1, &steer_ctrl);
                if (status != VXGE_HW_OK)
                        break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
        ring->rxd_init = attr->rxd_init;
        ring->rxd_term = attr->rxd_term;
        ring->buffer_mode = config->buffer_mode;
+       ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+       ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
        ring->rxds_limit = config->rxds_limit;
 
        ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
 
        /* apply "interrupts per txdl" attribute */
        fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+       fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+       fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
 
        if (fifo->config->intr)
                fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+               vpath->tim_tti_cfg1_saved = val64;
+
                val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
 
                if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+               vpath->tim_tti_cfg3_saved = val64;
        }
 
        if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+               vpath->tim_rti_cfg1_saved = val64;
+
                val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
 
                if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                }
 
                writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+               vpath->tim_rti_cfg3_saved = val64;
        }
 
        val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
        return status;
 }
 
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-       struct __vxge_hw_virtualpath *vpath;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-       struct vxge_hw_vp_config *config;
-       u64 val64;
-
-       vpath = &hldev->virtual_paths[vp_id];
-       vp_reg = vpath->vp_reg;
-       config = vpath->vp_config;
-
-       if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
-           config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
-               config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
-               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-       }
-}
-
 /*
  * __vxge_hw_vpath_initialize
  * This routine is the final phase of init which initializes the
index e249e288d160af59634a0c60c7b55319ab8b7469..3c53aa732c9d3c27d7349c82fd83f03209e07fee 100644 (file)
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
        u32                             vsport_number;
        u32                             max_kdfc_db;
        u32                             max_nofl_db;
+       u64                             tim_tti_cfg1_saved;
+       u64                             tim_tti_cfg3_saved;
+       u64                             tim_rti_cfg1_saved;
+       u64                             tim_rti_cfg3_saved;
 
        struct __vxge_hw_ring *____cacheline_aligned ringh;
        struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
        u32                                     doorbell_cnt;
        u32                                     total_db_cnt;
        u64                                     rxds_limit;
+       u32                                     rtimer;
+       u64                                     tim_rti_cfg1_saved;
+       u64                                     tim_rti_cfg3_saved;
 
        enum vxge_hw_status (*callback)(
                        struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
        u32                                     per_txdl_space;
        u32                                     vp_id;
        u32                                     tx_intr_num;
+       u32                                     rtimer;
+       u64                                     tim_tti_cfg1_saved;
+       u64                                     tim_tti_cfg3_saved;
 
        enum vxge_hw_status (*callback)(
                        struct __vxge_hw_fifo *fifo_handle,
index c81a6512c683d4dee03d0b515895ce2846ebf4ec..395423aeec0039b7b00512d2d24c77881afecef0 100644 (file)
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
        struct vxge_hw_ring_rxd_info ext_info;
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                ring->ndev->name, __func__, __LINE__);
-       ring->pkts_processed = 0;
-
-       vxge_hw_ring_replenish(ringh);
 
        do {
                prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
        return ret;
 }
 
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+       int i = 0;
+
+       /* Enable CI for RTI */
+       if (vdev->config.intr_type == MSI_X) {
+               for (i = 0; i < vdev->no_of_vpath; i++) {
+                       struct __vxge_hw_ring *hw_ring;
+
+                       hw_ring = vdev->vpaths[i].ring.handle;
+                       vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+               }
+       }
+
+       /* Enable CI for TTI */
+       for (i = 0; i < vdev->no_of_vpath; i++) {
+               struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+               vxge_hw_vpath_tti_ci_set(hw_fifo);
+               /*
+                * For Inta (with or without napi), Set CI ON for only one
+                * vpath. (Have only one free running timer).
+                */
+               if ((vdev->config.intr_type == INTA) && (i == 0))
+                       break;
+       }
+
+       return;
+}
+
 static int do_vxge_reset(struct vxgedev *vdev, int event)
 {
        enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                netif_tx_wake_all_queues(vdev->ndev);
        }
 
+       /* configure CI */
+       vxge_config_ci_for_tti_rti(vdev);
+
 out:
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d  Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
  */
 static int vxge_poll_msix(struct napi_struct *napi, int budget)
 {
-       struct vxge_ring *ring =
-               container_of(napi, struct vxge_ring, napi);
+       struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+       int pkts_processed;
        int budget_org = budget;
-       ring->budget = budget;
 
+       ring->budget = budget;
+       ring->pkts_processed = 0;
        vxge_hw_vpath_poll_rx(ring->handle);
+       pkts_processed = ring->pkts_processed;
 
        if (ring->pkts_processed < budget_org) {
                napi_complete(napi);
+
                /* Re enable the Rx interrupts for the vpath */
                vxge_hw_channel_msix_unmask(
                                (struct __vxge_hw_channel *)ring->handle,
                                ring->rx_vector_no);
+               mmiowb();
        }
 
-       return ring->pkts_processed;
+       /* We are copying and returning the local variable, in case if after
+        * clearing the msix interrupt above, if the interrupt fires right
+        * away which can preempt this NAPI thread */
+       return pkts_processed;
 }
 
 static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
        for (i = 0; i < vdev->no_of_vpath; i++) {
                ring = &vdev->vpaths[i].ring;
                ring->budget = budget;
+               ring->pkts_processed = 0;
                vxge_hw_vpath_poll_rx(ring->handle);
                pkts_processed += ring->pkts_processed;
                budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                                        netdev_get_tx_queue(vdev->ndev, 0);
                        vpath->fifo.indicate_max_pkts =
                                vdev->config.fifo_indicate_max_pkts;
+                       vpath->fifo.tx_vector_no = 0;
                        vpath->ring.rx_vector_no = 0;
                        vpath->ring.rx_csum = vdev->rx_csum;
                        vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
        return VXGE_HW_OK;
 }
 
+/**
+ *  adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ *  if the interrupts are not within a range
+ *  @fifo: pointer to transmit fifo structure
+ *  Description: The function changes boundary timer and restriction timer
+ *  value depends on the traffic
+ *  Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+       fifo->interrupt_count++;
+       if (jiffies > fifo->jiffies + HZ / 100) {
+               struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+               fifo->jiffies = jiffies;
+               if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+                   hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+                       hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+                       vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+               } else if (hw_fifo->rtimer != 0) {
+                       hw_fifo->rtimer = 0;
+                       vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+               }
+               fifo->interrupt_count = 0;
+       }
+}
+
+/**
+ *  adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ *  if the interrupts are not within a range
+ *  @ring: pointer to receive ring structure
+ *  Description: The function increases of decreases the packet counts within
+ *  the ranges of traffic utilization, if the interrupts due to this ring are
+ *  not within a fixed range.
+ *  Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+       ring->interrupt_count++;
+       if (jiffies > ring->jiffies + HZ / 100) {
+               struct __vxge_hw_ring *hw_ring = ring->handle;
+
+               ring->jiffies = jiffies;
+               if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+                   hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+                       hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+                       vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+               } else if (hw_ring->rtimer != 0) {
+                       hw_ring->rtimer = 0;
+                       vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+               }
+               ring->interrupt_count = 0;
+       }
+}
+
 /*
  *  vxge_isr_napi
  *  @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
 
 #ifdef CONFIG_PCI_MSI
 
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
 {
        struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
 
+       adaptive_coalesce_tx_interrupts(fifo);
+
+       vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+                                 fifo->tx_vector_no);
+
+       vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+                                  fifo->tx_vector_no);
+
        VXGE_COMPLETE_VPATH_TX(fifo);
 
+       vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+                                   fifo->tx_vector_no);
+
+       mmiowb();
+
        return IRQ_HANDLED;
 }
 
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
 {
        struct vxge_ring *ring = (struct vxge_ring *)dev_id;
 
-       /* MSIX_IDX for Rx is 1 */
+       adaptive_coalesce_rx_interrupts(ring);
+
        vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
-                                       ring->rx_vector_no);
+                                 ring->rx_vector_no);
+
+       vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+                                  ring->rx_vector_no);
 
        napi_schedule(&ring->napi);
        return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
                VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
+               /* Reduce the chance of loosing alarm interrupts by masking
+                * the vector. A pending bit will be set if an alarm is
+                * generated and on unmask the interrupt will be fired.
+                */
                vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+               vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+               mmiowb();
 
                status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
                        vdev->exec_mode);
                if (status == VXGE_HW_OK) {
-
                        vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
-                                       msix_id);
+                                                 msix_id);
+                       mmiowb();
                        continue;
                }
                vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
                        vpath->ring.rx_vector_no = (vpath->device_id *
                                                VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
 
+                       vpath->fifo.tx_vector_no = (vpath->device_id *
+                                               VXGE_HW_VPATH_MSIX_ACTIVE);
+
                        vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
                                               VXGE_ALARM_MSIX_ID);
                }
@@ -2474,8 +2592,9 @@ INTA_MODE:
                        "%s:vxge:INTA", vdev->ndev->name);
                vxge_hw_device_set_intr_type(vdev->devh,
                        VXGE_HW_INTR_MODE_IRQLINE);
-               vxge_hw_vpath_tti_ci_set(vdev->devh,
-                       vdev->vpaths[0].device_id);
+
+               vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
                ret = request_irq((int) vdev->pdev->irq,
                        vxge_isr_napi,
                        IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
        }
 
        netif_tx_start_all_queues(vdev->ndev);
+
+       /* configure CI */
+       vxge_config_ci_for_tti_rti(vdev);
+
        goto out0;
 
 out2:
@@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
-static int __devinit vxge_device_revision(struct vxgedev *vdev)
-{
-       int ret;
-       u8 revision;
-
-       ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
-       if (ret)
-               return -EIO;
-
-       vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
-       return 0;
-}
-
 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                                          struct vxge_config *config,
                                          int high_dma, int no_of_vpath,
@@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        memcpy(&vdev->config, config, sizeof(struct vxge_config));
        vdev->rx_csum = 1;      /* Enable Rx CSUM by default. */
        vdev->rx_hwts = 0;
-
-       ret = vxge_device_revision(vdev);
-       if (ret < 0)
-               goto _out1;
+       vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
 
        SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
 
@@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                vxge_debug_init(VXGE_ERR,
                        "%s: vpath memory allocation failed",
                        vdev->ndev->name);
-               ret = -ENODEV;
+               ret = -ENOMEM;
                goto _out1;
        }
 
@@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        if (vdev->config.gro_enable)
                ndev->features |= NETIF_F_GRO;
 
-       if (register_netdev(ndev)) {
+       ret = register_netdev(ndev);
+       if (ret) {
                vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
                        "%s: %s : device registration failed!",
                        ndev->name, __func__);
-               ret = -ENODEV;
                goto _out2;
        }
 
@@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
 
+       kfree(vdev->vpaths);
+
+       /* we are safe to free it now */
+       free_netdev(dev);
+
        vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
                        buf);
        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
@@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init(
                break;
 
        case MSI_X:
-               device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+               device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
                break;
        }
 
@@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit1;
        }
 
-       if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
+       ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+       if (ret) {
                vxge_debug_init(VXGE_ERR,
                        "%s : request regions failed", __func__);
-               ret = -ENODEV;
                goto _exit1;
        }
 
@@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                        if (!img[i].is_valid)
                                break;
                        vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
-                                       "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+                                       "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
                                        VXGE_EPROM_IMG_MAJOR(img[i].version),
                                        VXGE_EPROM_IMG_MINOR(img[i].version),
                                        VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4755,9 @@ _exit6:
 _exit5:
        vxge_device_unregister(hldev);
 _exit4:
-       pci_disable_sriov(pdev);
+       pci_set_drvdata(pdev, NULL);
        vxge_hw_device_terminate(hldev);
+       pci_disable_sriov(pdev);
 _exit3:
        iounmap(attr.bar0);
 _exit2:
@@ -4655,7 +4768,7 @@ _exit0:
        kfree(ll_config);
        kfree(device_config);
        driver_config->config_dev_cnt--;
-       pci_set_drvdata(pdev, NULL);
+       driver_config->total_dev_cnt--;
        return ret;
 }
 
@@ -4668,45 +4781,34 @@ _exit0:
 static void __devexit vxge_remove(struct pci_dev *pdev)
 {
        struct __vxge_hw_device *hldev;
-       struct vxgedev *vdev = NULL;
-       struct net_device *dev;
-       int i = 0;
+       struct vxgedev *vdev;
+       int i;
 
        hldev = pci_get_drvdata(pdev);
-
        if (hldev == NULL)
                return;
 
-       dev = hldev->ndev;
-       vdev = netdev_priv(dev);
+       vdev = netdev_priv(hldev->ndev);
 
        vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
-
        vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
                        __func__);
-       vxge_device_unregister(hldev);
 
-       for (i = 0; i < vdev->no_of_vpath; i++) {
+       for (i = 0; i < vdev->no_of_vpath; i++)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
-               vdev->vpaths[i].mcast_addr_cnt = 0;
-               vdev->vpaths[i].mac_addr_cnt = 0;
-       }
-
-       kfree(vdev->vpaths);
 
+       vxge_device_unregister(hldev);
+       pci_set_drvdata(pdev, NULL);
+       /* Do not call pci_disable_sriov here, as it will break child devices */
+       vxge_hw_device_terminate(hldev);
        iounmap(vdev->bar0);
-
-       /* we are safe to free it now */
-       free_netdev(dev);
+       pci_release_region(pdev, 0);
+       pci_disable_device(pdev);
+       driver_config->config_dev_cnt--;
+       driver_config->total_dev_cnt--;
 
        vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
                        __func__, __LINE__);
-
-       vxge_hw_device_terminate(hldev);
-
-       pci_disable_device(pdev);
-       pci_release_region(pdev, 0);
-       pci_set_drvdata(pdev, NULL);
        vxge_debug_entryexit(vdev->level_trace, "%s:%d  Exiting...", __func__,
                             __LINE__);
 }
index 5746fedc356fb3691a4771f860d88145fb6dd6eb..40474f0da5766c52f6acdc89ef31abc62efe43fa 100644 (file)
 #define VXGE_TTI_LTIMER_VAL    1000
 #define VXGE_T1A_TTI_LTIMER_VAL        80
 #define VXGE_TTI_RTIMER_VAL    0
+#define VXGE_TTI_RTIMER_ADAPT_VAL      10
 #define VXGE_T1A_TTI_RTIMER_VAL        400
 #define VXGE_RTI_BTIMER_VAL    250
 #define VXGE_RTI_LTIMER_VAL    100
 #define VXGE_RTI_RTIMER_VAL    0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_RTI_RTIMER_ADAPT_VAL      15
+#define VXGE_FIFO_INDICATE_MAX_PKTS    VXGE_DEF_FIFO_LENGTH
 #define VXGE_ISR_POLLING_CNT   8
 #define VXGE_MAX_CONFIG_DEV    0xFF
 #define VXGE_EXEC_MODE_DISABLE 0
 #define RTI_T1A_RX_UFC_C       50
 #define RTI_T1A_RX_UFC_D       60
 
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT   100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT        200
 
 /* Milli secs timer period */
 #define VXGE_TIMER_DELAY               10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
        int tx_steering_type;
        int indicate_max_pkts;
 
+       /* Adaptive interrupt moderation parameters used in T1A */
+       unsigned long interrupt_count;
+       unsigned long jiffies;
+
+       u32 tx_vector_no;
        /* Tx stats */
        struct vxge_fifo_stats stats;
 } ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
         */
        int driver_id;
 
+       /* Adaptive interrupt moderation parameters used in T1A */
+       unsigned long interrupt_count;
+       unsigned long jiffies;
+
        /* copy of the flag indicating whether rx_csum is to be used */
        u32 rx_csum:1,
            rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
 
        int vlan_tag_strip;
        struct vlan_group *vlgrp;
-       int rx_vector_no;
+       u32 rx_vector_no;
        enum vxge_hw_status last_status;
 
        /* Rx stats */
index 4c10d6c4075fdd8092a6f9dc599ae81073d2cb85..8674f331311c111afbbf6fd87f055ed731b4a681 100644 (file)
@@ -218,6 +218,68 @@ exit:
        return status;
 }
 
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct vxge_hw_vp_config *config;
+       u64 val64;
+
+       if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+               return;
+
+       vp_reg = fifo->vp_reg;
+       config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+       if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+               config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+               fifo->tim_tti_cfg1_saved = val64;
+               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+       }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+       u64 val64 = ring->tim_rti_cfg1_saved;
+
+       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+       ring->tim_rti_cfg1_saved = val64;
+       writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+       u64 val64 = fifo->tim_tti_cfg3_saved;
+       u64 timer = (fifo->rtimer * 1000) / 272;
+
+       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+       if (timer)
+               val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+                       VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+       writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+       /* tti_cfg3_saved is not updated again because it is
+        * initialized at one place only - init time.
+        */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+       u64 val64 = ring->tim_rti_cfg3_saved;
+       u64 timer = (ring->rtimer * 1000) / 272;
+
+       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+       if (timer)
+               val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+                       VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+       writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+       /* rti_cfg3_saved is not updated again because it is
+        * initialized at one place only - init time.
+        */
+}
+
 /**
  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  * @channeh: Channel for rx or tx handle
@@ -253,6 +315,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
                &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
 }
 
+/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id:  MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+       __vxge_hw_pio_mem_write32_upper(
+               (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+               &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
 /**
  * vxge_hw_device_set_intr_type - Updates the configuration
  *             with new interrupt type.
@@ -2190,20 +2269,15 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
 
        if (vpath->hldev->config.intr_mode ==
                                        VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
+               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+                               VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+                               0, 32), &vp_reg->one_shot_vect0_en);
                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
                                VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
                                0, 32), &vp_reg->one_shot_vect1_en);
-       }
-
-       if (vpath->hldev->config.intr_mode ==
-               VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
                                VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
                                0, 32), &vp_reg->one_shot_vect2_en);
-
-               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
-                               VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
-                               0, 32), &vp_reg->one_shot_vect3_en);
        }
 }
 
@@ -2228,6 +2302,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
                &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
 }
 
+/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id:  MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+       if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+               __vxge_hw_pio_mem_write32_upper(
+                       (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+                       &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+       else
+               __vxge_hw_pio_mem_write32_upper(
+                       (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+                       &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
 /**
  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  * @vp: Virtual Path handle.
index d48486d6afa1ccb0b955df802edc03cd25fe1e58..9d9dfda4c7abd66937f7cbdd646cce4d813c7e2d 100644 (file)
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
  *  Virtual Paths
  */
 
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
 u32 vxge_hw_vpath_id(
        struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2245,6 +2249,8 @@ void
 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
                        int msix_id);
 
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
 void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
 
 void
@@ -2269,6 +2275,9 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
 void
 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
 
+void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
 void
 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
                                 void **dtrh);
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
 int
 vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
 
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
 
 #endif
index ad2f99b9bcf30731f81a97de0eef012e3c52d512..581e21525e85cbc9a8da097f0a7e3f0f71d3fe85 100644 (file)
@@ -16,8 +16,8 @@
 
 #define VXGE_VERSION_MAJOR     "2"
 #define VXGE_VERSION_MINOR     "5"
-#define VXGE_VERSION_FIX       "1"
-#define VXGE_VERSION_BUILD     "22082"
+#define VXGE_VERSION_FIX       "2"
+#define VXGE_VERSION_BUILD     "22259"
 #define VXGE_VERSION_FOR       "k"
 
 #define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
index b4338f3893948bdf52e97ab4df4cbc012ab1efa8..7aeb113cbb908e78ce9e0d2afc875850b7b324e4 100644 (file)
@@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig"
 source "drivers/net/wireless/hostap/Kconfig"
 source "drivers/net/wireless/ipw2x00/Kconfig"
 source "drivers/net/wireless/iwlwifi/Kconfig"
+source "drivers/net/wireless/iwlegacy/Kconfig"
 source "drivers/net/wireless/iwmc3200wifi/Kconfig"
 source "drivers/net/wireless/libertas/Kconfig"
 source "drivers/net/wireless/orinoco/Kconfig"
index 9760561a27a50b411f004e6ed50dee66bf95428f..ddd3fb6ba1d3ded37344bee55ad4dbd8501bb8fc 100644 (file)
@@ -24,7 +24,7 @@ obj-$(CONFIG_B43LEGACY)               += b43legacy/
 obj-$(CONFIG_ZD1211RW)         += zd1211rw/
 obj-$(CONFIG_RTL8180)          += rtl818x/
 obj-$(CONFIG_RTL8187)          += rtl818x/
-obj-$(CONFIG_RTL8192CE)                += rtlwifi/
+obj-$(CONFIG_RTLWIFI)          += rtlwifi/
 
 # 16-bit wireless PCMCIA client drivers
 obj-$(CONFIG_PCMCIA_RAYCS)     += ray_cs.o
@@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
 
 obj-$(CONFIG_MWL8K)    += mwl8k.o
 
-obj-$(CONFIG_IWLWIFI)  += iwlwifi/
+obj-$(CONFIG_IWLAGN)   += iwlwifi/
+obj-$(CONFIG_IWLWIFI_LEGACY)   += iwlegacy/
 obj-$(CONFIG_RT2X00)   += rt2x00/
 
 obj-$(CONFIG_P54_COMMON)       += p54/
index f9aa1bc0a94756465bbd4d8ac7660cb3b95718c7..afe2cbc6cb24c26b77a0d309bcc167584daab9de 100644 (file)
@@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
-static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct adm8211_tx_hdr *txhdr;
        size_t payload_len, hdrlen;
@@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        txhdr->retry_limit = info->control.rates[0].count;
 
        adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
-
-       return NETDEV_TX_OK;
 }
 
 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
index 1476314afa8a5dfb2811d4805d36c3eef2602d5b..298601436ee2631e2eaa8afc9b3f3989ea459e0b 100644 (file)
@@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb)
        ieee80211_wake_queues(priv->hw);
 }
 
-static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct at76_priv *priv = hw->priv;
        struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
@@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (priv->tx_urb->status == -EINPROGRESS) {
                wiphy_err(priv->hw->wiphy,
                          "%s called while tx urb is pending\n", __func__);
-               return NETDEV_TX_BUSY;
+               dev_kfree_skb_any(skb);
+               return;
        }
 
        /* The following code lines are important when the device is going to
@@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
                        memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
                        ieee80211_queue_work(hw, &priv->work_join_bssid);
-                       return NETDEV_TX_BUSY;
+                       dev_kfree_skb_any(skb);
+                       return;
                }
        }
 
@@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                                  priv->tx_urb,
                                  priv->tx_urb->hcpriv, priv->tx_urb->complete);
        }
-
-       return 0;
 }
 
 static int at76_mac80211_start(struct ieee80211_hw *hw)
index d7a4799d20fbd7c779588cf154651c01d8cc1217..7b9672b0d090384fcf6e9eb430cf28f64faa445d 100644 (file)
@@ -1,8 +1,10 @@
 config AR9170_USB
-       tristate "Atheros AR9170 802.11n USB support"
+       tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
        depends on USB && MAC80211
        select FW_LOADER
        help
+         This driver is going to get replaced by carl9170.
+
          This is a driver for the Atheros "otus" 802.11n USB devices.
 
          These devices require additional firmware (2 files).
index 4f845f80c0984ea054481979d9e928dc5ce0f290..371e4ce495287f373f2955ab876c9d6ce0b6b2da 100644 (file)
@@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 int ar9170_nag_limiter(struct ar9170 *ar);
 
 /* MAC */
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 int ar9170_init_mac(struct ar9170 *ar);
 int ar9170_set_qos(struct ar9170 *ar);
 int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
index 32bf79e6a320ff2b04722b8a17d9cd7b38088303..b761fec0d72174ce45965dcf3e1d6d633abf354f 100644 (file)
@@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar)
                                     msecs_to_jiffies(AR9170_JANITOR_DELAY));
 }
 
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
@@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        skb_queue_tail(&ar->tx_pending[queue], skb);
 
        ar9170_tx(ar);
-       return NETDEV_TX_OK;
+       return;
 
 err_free:
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 static int ar9170_op_add_interface(struct ieee80211_hw *hw,
@@ -1945,7 +1944,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
 static int ar9170_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                              u8 buf_size)
 {
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
index e43210c8585c20e14e8ec91d1923e2081d0a9769..a6c6a466000f737ab8ca0c1f8737031faaba55ec 100644 (file)
@@ -108,12 +108,14 @@ enum ath_cipher {
  * struct ath_ops - Register read/write operations
  *
  * @read: Register read
+ * @multi_read: Multiple register read
  * @write: Register write
  * @enable_write_buffer: Enable multiple register writes
  * @write_flush: flush buffered register writes and disable buffering
  */
 struct ath_ops {
        unsigned int (*read)(void *, u32 reg_offset);
+       void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
        void (*write)(void *, u32 val, u32 reg_offset);
        void (*enable_write_buffer)(void *);
        void (*write_flush) (void *);
index e0793319389d19a7aa27b52a3346f30bb68366cc..e18a9aa7b6ca3d2c854c92a2715d51823a915c99 100644 (file)
@@ -40,6 +40,17 @@ config ATH5K_DEBUG
 
          modprobe ath5k debug=0x00000400
 
+config ATH5K_TRACER
+       bool "Atheros 5xxx tracer"
+       depends on ATH5K
+       depends on EVENT_TRACING
+       ---help---
+         Say Y here to enable tracepoints for the ath5k driver
+         using the kernel tracing infrastructure.  Select this
+         option if you are interested in debugging the driver.
+
+         If unsure, say N.
+
 config ATH5K_AHB
        bool "Atheros 5xxx AHB bus support"
        depends on (ATHEROS_AR231X && !PCI)
index 707cde149248a0570cd847fb60a002065bf94886..ae84b86c3bf2e1c78b6e593935a1184d6d5a0acf 100644 (file)
@@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
        *csz = L1_CACHE_BYTES >> 2;
 }
 
-bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 {
        struct ath5k_softc *sc = common->priv;
        struct platform_device *pdev = to_platform_device(sc->dev);
@@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 
        eeprom += off;
        if (eeprom > eeprom_end)
-               return -EINVAL;
+               return false;
 
        *data = *eeprom;
-       return 0;
+       return true;
 }
 
 int ath5k_hw_read_srev(struct ath5k_hw *ah)
index 407e39c2b10bae1de88352360b12a792584d10ff..0ee54eb333de08303438df1dd9e699b636276e0d 100644 (file)
 /* Initial values */
 #define        AR5K_INIT_CYCRSSI_THR1                  2
 
-/* Tx retry limits */
-#define AR5K_INIT_SH_RETRY                     10
-#define AR5K_INIT_LG_RETRY                     AR5K_INIT_SH_RETRY
-/* For station mode */
-#define AR5K_INIT_SSH_RETRY                    32
-#define AR5K_INIT_SLG_RETRY                    AR5K_INIT_SSH_RETRY
-#define AR5K_INIT_TX_RETRY                     10
-
+/* Tx retry limit defaults from standard */
+#define AR5K_INIT_RETRY_SHORT                  7
+#define AR5K_INIT_RETRY_LONG                   4
 
 /* Slot time */
 #define AR5K_INIT_SLOT_TIME_TURBO              6
@@ -1057,7 +1052,9 @@ struct ath5k_hw {
 #define ah_modes               ah_capabilities.cap_mode
 #define ah_ee_version          ah_capabilities.cap_eeprom.ee_version
 
-       u32                     ah_limit_tx_retries;
+       u8                      ah_retry_long;
+       u8                      ah_retry_short;
+
        u8                      ah_coverage_class;
        bool                    ah_ack_bitrate_high;
        u8                      ah_bwmode;
@@ -1067,7 +1064,6 @@ struct ath5k_hw {
        u8                      ah_ant_mode;
        u8                      ah_tx_ant;
        u8                      ah_def_ant;
-       bool                    ah_software_retry;
 
        struct ath5k_capabilities ah_capabilities;
 
@@ -1162,6 +1158,26 @@ void ath5k_hw_deinit(struct ath5k_hw *ah);
 int ath5k_sysfs_register(struct ath5k_softc *sc);
 void ath5k_sysfs_unregister(struct ath5k_softc *sc);
 
+/* base.c */
+struct ath5k_buf;
+struct ath5k_txq;
+
+void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+bool ath_any_vif_assoc(struct ath5k_softc *sc);
+void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+                   struct ath5k_txq *txq);
+int ath5k_init_hw(struct ath5k_softc *sc);
+int ath5k_stop_hw(struct ath5k_softc *sc);
+void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+                                       struct ieee80211_vif *vif);
+int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
+void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_config(struct ath5k_softc *sc);
+void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+
 /*Chip id helper functions */
 const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
 int ath5k_hw_read_srev(struct ath5k_hw *ah);
@@ -1250,6 +1266,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
                            enum ath5k_tx_queue queue_type,
                            struct ath5k_txq_info *queue_info);
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+                                 unsigned int queue);
 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
index cdac5cff017768c3f780cb41e2a9264486482e68..bc82405604884dadfb293c60817b4f2a93d3a40b 100644 (file)
@@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
        ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
        ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
        ah->ah_imr = 0;
-       ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
-       ah->ah_software_retry = false;
+       ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
+       ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
        ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
        ah->ah_noise_floor = -95;       /* until first NF calibration is run */
        sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
@@ -220,7 +220,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
                        ah->ah_radio = AR5K_RF5112;
                        ah->ah_single_chip = false;
                        ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
-               } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
+               } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
+                       ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
                        ah->ah_radio = AR5K_RF2316;
                        ah->ah_single_chip = true;
                        ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
index 09ae4ef0fd51fbf2dda13c6b138d76f75007577c..91411e9b4b6815c12cfcfa306be1aaf97e53164c 100644 (file)
@@ -61,6 +61,9 @@
 #include "debug.h"
 #include "ani.h"
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 int ath5k_modparam_nohwcrypt;
 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -241,74 +244,69 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
 * Channel/mode setup *
 \********************/
 
-/*
- * Convert IEEE channel number to MHz frequency.
- */
-static inline short
-ath5k_ieee2mhz(short chan)
-{
-       if (chan <= 14 || chan >= 27)
-               return ieee80211chan2mhz(chan);
-       else
-               return 2212 + chan * 20;
-}
-
 /*
  * Returns true for the channel numbers used without all_channels modparam.
  */
-static bool ath5k_is_standard_channel(short chan)
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
 {
-       return ((chan <= 14) ||
-               /* UNII 1,2 */
-               ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+       if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+               return true;
+
+       return  /* UNII 1,2 */
+               (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
                /* midband */
                ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
                /* UNII-3 */
-               ((chan & 3) == 1 && chan >= 149 && chan <= 165));
+               ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
+               /* 802.11j 5.030-5.080 GHz (20MHz) */
+               (chan == 8 || chan == 12 || chan == 16) ||
+               /* 802.11j 4.9GHz (20MHz) */
+               (chan == 184 || chan == 188 || chan == 192 || chan == 196));
 }
 
 static unsigned int
-ath5k_copy_channels(struct ath5k_hw *ah,
-               struct ieee80211_channel *channels,
-               unsigned int mode,
-               unsigned int max)
+ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
+               unsigned int mode, unsigned int max)
 {
-       unsigned int i, count, size, chfreq, freq, ch;
-
-       if (!test_bit(mode, ah->ah_modes))
-               return 0;
+       unsigned int count, size, chfreq, freq, ch;
+       enum ieee80211_band band;
 
        switch (mode) {
        case AR5K_MODE_11A:
                /* 1..220, but 2GHz frequencies are filtered by check_channel */
-               size = 220 ;
+               size = 220;
                chfreq = CHANNEL_5GHZ;
+               band = IEEE80211_BAND_5GHZ;
                break;
        case AR5K_MODE_11B:
        case AR5K_MODE_11G:
                size = 26;
                chfreq = CHANNEL_2GHZ;
+               band = IEEE80211_BAND_2GHZ;
                break;
        default:
                ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
                return 0;
        }
 
-       for (i = 0, count = 0; i < size && max > 0; i++) {
-               ch = i + 1 ;
-               freq = ath5k_ieee2mhz(ch);
+       count = 0;
+       for (ch = 1; ch <= size && count < max; ch++) {
+               freq = ieee80211_channel_to_frequency(ch, band);
+
+               if (freq == 0) /* mapping failed - not a standard channel */
+                       continue;
 
                /* Check if channel is supported by the chipset */
                if (!ath5k_channel_ok(ah, freq, chfreq))
                        continue;
 
-               if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
+               if (!modparam_all_channels &&
+                   !ath5k_is_standard_channel(ch, band))
                        continue;
 
                /* Write channel info and increment counter */
                channels[count].center_freq = freq;
-               channels[count].band = (chfreq == CHANNEL_2GHZ) ?
-                       IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+               channels[count].band = band;
                switch (mode) {
                case AR5K_MODE_11A:
                case AR5K_MODE_11G:
@@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
                }
 
                count++;
-               max--;
        }
 
        return count;
@@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_bitrates = 12;
 
                sband->channels = sc->channels;
-               sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+               sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11G, max_c);
 
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                }
 
                sband->channels = sc->channels;
-               sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+               sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11B, max_c);
 
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
                sband->n_bitrates = 8;
 
                sband->channels = &sc->channels[count_c];
-               sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+               sband->n_channels = ath5k_setup_channels(ah, sband->channels,
                                        AR5K_MODE_11A, max_c);
 
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -445,18 +442,6 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
        return ath5k_reset(sc, chan, true);
 }
 
-static void
-ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
-{
-       sc->curmode = mode;
-
-       if (mode == AR5K_MODE_11A) {
-               sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
-       } else {
-               sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
-       }
-}
-
 struct ath_vif_iter_data {
        const u8        *hw_macaddr;
        u8              mask[ETH_ALEN];
@@ -569,7 +554,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
                        "hw_rix out of bounds: %x\n", hw_rix))
                return 0;
 
-       rix = sc->rate_idx[sc->curband->band][hw_rix];
+       rix = sc->rate_idx[sc->curchan->band][hw_rix];
        if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
                rix = 0;
 
@@ -1376,10 +1361,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
         * right now, so it's not too bad...
         */
        rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
-       rxs->flag |= RX_FLAG_TSFT;
+       rxs->flag |= RX_FLAG_MACTIME_MPDU;
 
        rxs->freq = sc->curchan->center_freq;
-       rxs->band = sc->curband->band;
+       rxs->band = sc->curchan->band;
 
        rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
 
@@ -1394,10 +1379,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
        rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
 
        if (rxs->rate_idx >= 0 && rs->rs_rate ==
-           sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+           sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
                rxs->flag |= RX_FLAG_SHORTPRE;
 
-       ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
+       trace_ath5k_rx(sc, skb);
 
        ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
 
@@ -1533,7 +1518,7 @@ unlock:
 * TX Handling *
 \*************/
 
-int
+void
 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
               struct ath5k_txq *txq)
 {
@@ -1542,7 +1527,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
        unsigned long flags;
        int padsize;
 
-       ath5k_debug_dump_skb(sc, skb, "TX  ", 1);
+       trace_ath5k_tx(sc, skb, txq);
 
        /*
         * The hardware expects the header padded to 4 byte boundaries.
@@ -1582,16 +1567,15 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
                spin_unlock_irqrestore(&sc->txbuflock, flags);
                goto drop_packet;
        }
-       return NETDEV_TX_OK;
+       return;
 
 drop_packet:
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 static void
 ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
-                        struct ath5k_tx_status *ts)
+                        struct ath5k_txq *txq, struct ath5k_tx_status *ts)
 {
        struct ieee80211_tx_info *info;
        int i;
@@ -1643,6 +1627,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
        else
                sc->stats.antenna_tx[0]++; /* invalid */
 
+       trace_ath5k_tx_complete(sc, skb, txq, ts);
        ieee80211_tx_status(sc->hw, skb);
 }
 
@@ -1679,7 +1664,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
 
                        dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
                                        DMA_TO_DEVICE);
-                       ath5k_tx_frame_completed(sc, skb, &ts);
+                       ath5k_tx_frame_completed(sc, skb, txq, &ts);
                }
 
                /*
@@ -1821,8 +1806,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
                goto out;
        }
 
-       ath5k_debug_dump_skb(sc, skb, "BC  ", 1);
-
        ath5k_txbuf_free_skb(sc, avf->bbuf);
        avf->bbuf->skb = skb;
        ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,6 +1900,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
                        sc->opmode == NL80211_IFTYPE_MESH_POINT)
                ath5k_beacon_update(sc->hw, vif);
 
+       trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
+
        ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
        ath5k_hw_start_tx_dma(ah, sc->bhalq);
        ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2417,7 +2402,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
        /* set up multi-rate retry capabilities */
        if (sc->ah->ah_version == AR5K_AR5212) {
                hw->max_rates = 4;
-               hw->max_rate_tries = 11;
+               hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
+                                        AR5K_INIT_RETRY_LONG);
        }
 
        hw->vif_data_size = sizeof(struct ath5k_vif);
@@ -2554,7 +2540,6 @@ ath5k_init_hw(struct ath5k_softc *sc)
         * and then setup of the interrupt mask.
         */
        sc->curchan = sc->hw->conf.channel;
-       sc->curband = &sc->sbands[sc->curchan->band];
        sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
                AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
                AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
@@ -2681,10 +2666,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
         * so we should also free any remaining
         * tx buffers */
        ath5k_drain_tx_buffs(sc);
-       if (chan) {
+       if (chan)
                sc->curchan = chan;
-               sc->curband = &sc->sbands[chan->band];
-       }
        ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
                                                                skip_pcu);
        if (ret) {
@@ -2782,12 +2765,6 @@ ath5k_init(struct ieee80211_hw *hw)
                goto err;
        }
 
-       /* NB: setup here so ath5k_rate_update is happy */
-       if (test_bit(AR5K_MODE_11A, ah->ah_modes))
-               ath5k_setcurmode(sc, AR5K_MODE_11A);
-       else
-               ath5k_setcurmode(sc, AR5K_MODE_11B);
-
        /*
         * Allocate tx+rx descriptors and populate the lists.
         */
index 6d511476e4d2695c1b0a8edb1eac552620cd06eb..8f919dca95f1bd64a8494c337d9b2ebb52c4e24a 100644 (file)
@@ -183,8 +183,6 @@ struct ath5k_softc {
        enum nl80211_iftype     opmode;
        struct ath5k_hw         *ah;            /* Atheros HW */
 
-       struct ieee80211_supported_band         *curband;
-
 #ifdef CONFIG_ATH5K_DEBUG
        struct ath5k_dbg_info   debug;          /* debug info */
 #endif /* CONFIG_ATH5K_DEBUG */
@@ -202,7 +200,6 @@ struct ath5k_softc {
 #define ATH_STAT_STARTED       4               /* opened & irqs enabled */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
-       unsigned int            curmode;        /* current phy mode */
        struct ieee80211_channel *curchan;      /* current h/w channel */
 
        u16                     nvifs;
index 31cad80e9b01f84c17e536371ce25f6887ca74e4..f77e8a703c5cbcd7212751509828db9efe46f74f 100644 (file)
  */
 int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
 {
+       struct ath5k_capabilities *caps = &ah->ah_capabilities;
        u16 ee_header;
 
        /* Capabilities stored in the EEPROM */
-       ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
+       ee_header = caps->cap_eeprom.ee_header;
 
        if (ah->ah_version == AR5K_AR5210) {
                /*
                 * Set radio capabilities
                 * (The AR5110 only supports the middle 5GHz band)
                 */
-               ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
-               ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
-               ah->ah_capabilities.cap_range.range_2ghz_min = 0;
-               ah->ah_capabilities.cap_range.range_2ghz_max = 0;
+               caps->cap_range.range_5ghz_min = 5120;
+               caps->cap_range.range_5ghz_max = 5430;
+               caps->cap_range.range_2ghz_min = 0;
+               caps->cap_range.range_2ghz_max = 0;
 
                /* Set supported modes */
-               __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
+               __set_bit(AR5K_MODE_11A, caps->cap_mode);
        } else {
                /*
                 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                 * XXX current ieee80211 implementation because the IEEE
                 * XXX channel mapping does not support negative channel
                 * XXX numbers (2312MHz is channel -19). Of course, this
-                * XXX doesn't matter because these channels are out of range
-                * XXX but some regulation domains like MKK (Japan) will
-                * XXX support frequencies somewhere around 4.8GHz.
+                * XXX doesn't matter because these channels are out of the
+                * XXX legal range.
                 */
 
                /*
@@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                 */
 
                if (AR5K_EEPROM_HDR_11A(ee_header)) {
-                       /* 4920 */
-                       ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
-                       ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
+                       if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
+                               caps->cap_range.range_5ghz_min = 4920;
+                       else
+                               caps->cap_range.range_5ghz_min = 5005;
+                       caps->cap_range.range_5ghz_max = 6100;
 
                        /* Set supported modes */
-                       __set_bit(AR5K_MODE_11A,
-                                       ah->ah_capabilities.cap_mode);
+                       __set_bit(AR5K_MODE_11A, caps->cap_mode);
                }
 
                /* Enable  802.11b if a 2GHz capable radio (2111/5112) is
@@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                    (AR5K_EEPROM_HDR_11G(ee_header) &&
                     ah->ah_version != AR5K_AR5211)) {
                        /* 2312 */
-                       ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
-                       ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
+                       caps->cap_range.range_2ghz_min = 2412;
+                       caps->cap_range.range_2ghz_max = 2732;
 
                        if (AR5K_EEPROM_HDR_11B(ee_header))
-                               __set_bit(AR5K_MODE_11B,
-                                               ah->ah_capabilities.cap_mode);
+                               __set_bit(AR5K_MODE_11B, caps->cap_mode);
 
                        if (AR5K_EEPROM_HDR_11G(ee_header) &&
                            ah->ah_version != AR5K_AR5211)
-                               __set_bit(AR5K_MODE_11G,
-                                               ah->ah_capabilities.cap_mode);
+                               __set_bit(AR5K_MODE_11G, caps->cap_mode);
                }
        }
 
        /* Set number of supported TX queues */
        if (ah->ah_version == AR5K_AR5210)
-               ah->ah_capabilities.cap_queues.q_tx_num =
-                       AR5K_NUM_TX_QUEUES_NOQCU;
+               caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
        else
-               ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+               caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
 
        /* newer hardware has PHY error counters */
        if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
-               ah->ah_capabilities.cap_has_phyerr_counters = true;
+               caps->cap_has_phyerr_counters = true;
        else
-               ah->ah_capabilities.cap_has_phyerr_counters = false;
+               caps->cap_has_phyerr_counters = false;
 
        return 0;
 }
index d2f84d76bb070d9f60d68d7dad02b1536de2ccd6..0230f30e9e9aac1fa59d8d1ddb4faa7012db3d89 100644 (file)
@@ -308,8 +308,6 @@ static const struct {
        { ATH5K_DEBUG_CALIBRATE, "calib",       "periodic calibration" },
        { ATH5K_DEBUG_TXPOWER,  "txpower",      "transmit power setting" },
        { ATH5K_DEBUG_LED,      "led",          "LED management" },
-       { ATH5K_DEBUG_DUMP_RX,  "dumprx",       "print received skb content" },
-       { ATH5K_DEBUG_DUMP_TX,  "dumptx",       "print transmit skb content" },
        { ATH5K_DEBUG_DUMPBANDS, "dumpbands",   "dump bands" },
        { ATH5K_DEBUG_DMA,      "dma",          "dma start/stop" },
        { ATH5K_DEBUG_ANI,      "ani",          "adaptive noise immunity" },
@@ -1035,24 +1033,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
        spin_unlock_bh(&sc->rxbuflock);
 }
 
-void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
-                       struct sk_buff *skb, const char *prefix, int tx)
-{
-       char buf[16];
-
-       if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
-                    (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
-               return;
-
-       snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
-
-       print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
-               min(200U, skb->len));
-
-       printk(KERN_DEBUG "\n");
-}
-
 void
 ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
 {
index 3e34428d5126146b1cab147ef5a82f0858069e04..b0355aef68d3f196394db308f4392d84273245dd 100644 (file)
@@ -116,8 +116,6 @@ enum ath5k_debug_level {
        ATH5K_DEBUG_CALIBRATE   = 0x00000020,
        ATH5K_DEBUG_TXPOWER     = 0x00000040,
        ATH5K_DEBUG_LED         = 0x00000080,
-       ATH5K_DEBUG_DUMP_RX     = 0x00000100,
-       ATH5K_DEBUG_DUMP_TX     = 0x00000200,
        ATH5K_DEBUG_DUMPBANDS   = 0x00000400,
        ATH5K_DEBUG_DMA         = 0x00000800,
        ATH5K_DEBUG_ANI         = 0x00002000,
@@ -151,10 +149,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
 void
 ath5k_debug_dump_bands(struct ath5k_softc *sc);
 
-void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
-                       struct sk_buff *skb, const char *prefix, int tx);
-
 void
 ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
 
@@ -181,10 +175,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
 static inline void
 ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
 
-static inline void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
-                       struct sk_buff *skb, const char *prefix, int tx) {}
-
 static inline void
 ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
 
index 80e625608bac69665b29ff32915d3e8f0d13c92a..b6561f785c6eb1e87886892760f6d15edd772fd8 100644 (file)
@@ -72,7 +72,6 @@ static int
 ath5k_eeprom_init_header(struct ath5k_hw *ah)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
-       int ret;
        u16 val;
        u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
 
@@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 o = *offset;
        u16 val;
-       int ret, i = 0;
+       int i = 0;
 
        AR5K_EEPROM_READ(o++, val);
        ee->ee_switch_settling[mode]    = (val >> 8) & 0x7f;
@@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 o = *offset;
        u16 val;
-       int ret;
 
        ee->ee_n_piers[mode] = 0;
        AR5K_EEPROM_READ(o++, val);
@@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
        int o = *offset;
        int i = 0;
        u8 freq1, freq2;
-       int ret;
        u16 val;
 
        ee->ee_n_piers[mode] = 0;
@@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
-       int i, ret;
+       int i;
        u16 val;
        u8 mask;
 
@@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
        u32 offset;
        u8 i, c;
        u16 val;
-       int ret;
        u8 pd_gains = 0;
 
        /* Count how many curves we have and
@@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
        struct ath5k_chan_pcal_info *chinfo;
        u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
        u32 offset;
-       int idx, i, ret;
+       int idx, i;
        u16 val;
        u8 pd_gains = 0;
 
@@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
        u8 *rate_target_pwr_num;
        u32 offset;
        u16 val;
-       int ret, i;
+       int i;
 
        offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
        rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
        struct ath5k_edge_power *rep;
        unsigned int fmask, pmask;
        unsigned int ctl_mode;
-       int ret, i, j;
+       int i, j;
        u32 offset;
        u16 val;
 
@@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
        u8 mac_d[ETH_ALEN] = {};
        u32 total, offset;
        u16 data;
-       int octet, ret;
+       int octet;
 
-       ret = ath5k_hw_nvram_read(ah, 0x20, &data);
-       if (ret)
-               return ret;
+       AR5K_EEPROM_READ(0x20, data);
 
        for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
-               ret = ath5k_hw_nvram_read(ah, offset, &data);
-               if (ret)
-                       return ret;
+               AR5K_EEPROM_READ(offset, data);
 
                total += data;
                mac_d[octet + 1] = data & 0xff;
index 7c09e150dbdc640555bb3f7717de1e4a1182b8cc..6511c27d938ef54d81266aaf91f0ffe0a2078bc9 100644 (file)
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
 #define        AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz      6250
 
 #define AR5K_EEPROM_READ(_o, _v) do {                  \
-       ret = ath5k_hw_nvram_read(ah, (_o), &(_v));     \
-       if (ret)                                        \
-               return ret;                             \
+       if (!ath5k_hw_nvram_read(ah, (_o), &(_v)))      \
+               return -EIO;                            \
 } while (0)
 
 #define AR5K_EEPROM_READ_HDR(_o, _v)                                   \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
        AR5K_CTL_MODE_M = 15,
 };
 
-/* Default CTL ids for the 3 main reg domains.
- * Atheros only uses these by default but vendors
- * can have up to 32 different CTLs for different
- * scenarios. Note that theese values are ORed with
- * the mode id (above) so we can have up to 24 CTL
- * datasets out of these 3 main regdomains. That leaves
- * 8 ids that can be used by vendors and since 0x20 is
- * missing from HAL sources i guess this is the set of
- * custom CTLs vendors can use. */
-#define        AR5K_CTL_FCC    0x10
-#define        AR5K_CTL_CUSTOM 0x20
-#define        AR5K_CTL_ETSI   0x30
-#define        AR5K_CTL_MKK    0x40
-
-/* Indicates a CTL with only mode set and
- * no reg domain mapping, such CTLs are used
- * for world roaming domains or simply when
- * a reg domain is not set */
-#define        AR5K_CTL_NO_REGDOMAIN   0xf0
-
-/* Indicates an empty (invalid) CTL */
-#define AR5K_CTL_NO_CTL                0xff
-
 /* Per channel calibration data, used for power table setup */
 struct ath5k_chan_pcal_info_rf5111 {
        /* Power levels in half dbm units
index d76d68c99f7204b1daae1f1fb8f8bcc68547d61e..1fbe3c0b9f0864f50f87803e84af2630fe8072fc 100644 (file)
 
 extern int ath5k_modparam_nohwcrypt;
 
-/* functions used from base.c */
-void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
-bool ath_any_vif_assoc(struct ath5k_softc *sc);
-int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
-                  struct ath5k_txq *txq);
-int ath5k_init_hw(struct ath5k_softc *sc);
-int ath5k_stop_hw(struct ath5k_softc *sc);
-void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
-                                       struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
-void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
-int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void ath5k_beacon_config(struct ath5k_softc *sc);
-void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-
 /********************\
 * Mac80211 functions *
 \********************/
 
-static int
+static void
 ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ath5k_softc *sc = hw->priv;
@@ -77,10 +60,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
                dev_kfree_skb_any(skb);
-               return 0;
+               return;
        }
 
-       return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
+       ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
 }
 
 
@@ -226,6 +209,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
        struct ath5k_hw *ah = sc->ah;
        struct ieee80211_conf *conf = &hw->conf;
        int ret = 0;
+       int i;
 
        mutex_lock(&sc->lock);
 
@@ -243,6 +227,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
                ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
        }
 
+       if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+               ah->ah_retry_long = conf->long_frame_max_tx_count;
+               ah->ah_retry_short = conf->short_frame_max_tx_count;
+
+               for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
+                       ath5k_hw_set_tx_retry_limits(ah, i);
+       }
+
        /* TODO:
         * 1) Move this on config_interface and handle each case
         * separately eg. when we have only one STA vif, use
index 7f8c5b0e9d2a78e011ecee22701363c7db8fb66a..66598a0d1df05ad8251f1624acfda7dc72f54574 100644 (file)
@@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
 /*
  * Read from eeprom
  */
-bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
+static bool
+ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
 {
        struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
        u32 status, timeout;
@@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
                status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
                if (status & AR5K_EEPROM_STAT_RDDONE) {
                        if (status & AR5K_EEPROM_STAT_RDERR)
-                               return -EIO;
+                               return false;
                        *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
                                        0xffff);
-                       return 0;
+                       return true;
                }
                udelay(15);
        }
 
-       return -ETIMEDOUT;
+       return false;
 }
 
 int ath5k_hw_read_srev(struct ath5k_hw *ah)
index 2c9c9e793d4ef23b2ac1a66df7355d091d0ce777..3343fb9e4940f24012ed07020d7c99dc3dbad0e6 100644 (file)
@@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
 /*
  * Set tx retry limits on DCU
  */
-static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
-                                       unsigned int queue)
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+                                 unsigned int queue)
 {
-       u32 retry_lg, retry_sh;
-
-       /*
-        * Calculate and set retry limits
-        */
-       if (ah->ah_software_retry) {
-               /* XXX Need to test this */
-               retry_lg = ah->ah_limit_tx_retries;
-               retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
-                       AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
-       } else {
-               retry_lg = AR5K_INIT_LG_RETRY;
-               retry_sh = AR5K_INIT_SH_RETRY;
-       }
-
        /* Single data queue on AR5210 */
        if (ah->ah_version == AR5K_AR5210) {
                struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
 
                ath5k_hw_reg_write(ah,
                        (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
-                       | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
-                               AR5K_NODCU_RETRY_LMT_SLG_RETRY)
-                       | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
-                               AR5K_NODCU_RETRY_LMT_SSH_RETRY)
-                       | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
-                       | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
+                       | AR5K_REG_SM(ah->ah_retry_long,
+                                     AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+                       | AR5K_REG_SM(ah->ah_retry_short,
+                                     AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+                       | AR5K_REG_SM(ah->ah_retry_long,
+                                     AR5K_NODCU_RETRY_LMT_LG_RETRY)
+                       | AR5K_REG_SM(ah->ah_retry_short,
+                                     AR5K_NODCU_RETRY_LMT_SH_RETRY),
                        AR5K_NODCU_RETRY_LMT);
        /* DCU on AR5211+ */
        } else {
                ath5k_hw_reg_write(ah,
-                       AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
-                               AR5K_DCU_RETRY_LMT_SLG_RETRY) |
-                       AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
-                               AR5K_DCU_RETRY_LMT_SSH_RETRY) |
-                       AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
-                       AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
+                       AR5K_REG_SM(ah->ah_retry_long,
+                                   AR5K_DCU_RETRY_LMT_RTS)
+                       | AR5K_REG_SM(ah->ah_retry_long,
+                                     AR5K_DCU_RETRY_LMT_STA_RTS)
+                       | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+                                     AR5K_DCU_RETRY_LMT_STA_DATA),
                        AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
        }
-       return;
 }
 
 /**
index fd14b9103951710853d5f1b9057a3bfed854ae81..e1c9abd8c879cf6375cc4de53304c79eaf390102 100644 (file)
 
 /*
  * DCU retry limit registers
+ * all these fields don't allow zero values
  */
 #define AR5K_DCU_RETRY_LMT_BASE                0x1080                  /* Register Address -Queue0 DCU_RETRY_LMT */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY    0x0000000f      /* Short retry limit mask */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY_S  0
-#define AR5K_DCU_RETRY_LMT_LG_RETRY    0x000000f0      /* Long retry limit mask */
-#define AR5K_DCU_RETRY_LMT_LG_RETRY_S  4
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY   0x00003f00      /* Station short retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY   0x000fc000      /* Station long retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
+#define AR5K_DCU_RETRY_LMT_RTS         0x0000000f      /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
+#define AR5K_DCU_RETRY_LMT_RTS_S       0
+#define AR5K_DCU_RETRY_LMT_STA_RTS     0x00003f00      /* STA RTS failure limit. If exceeded CW reset */
+#define AR5K_DCU_RETRY_LMT_STA_RTS_S   8
+#define AR5K_DCU_RETRY_LMT_STA_DATA    0x000fc000      /* STA data failure limit. If exceeded CW reset. */
+#define AR5K_DCU_RETRY_LMT_STA_DATA_S  14
 #define        AR5K_QUEUE_DFS_RETRY_LIMIT(_q)  AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
 
 /*
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644 (file)
index 0000000..2de68ad
--- /dev/null
@@ -0,0 +1,107 @@
+#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_ATH5K_H
+
+#include <linux/tracepoint.h>
+#include "base.h"
+
+#ifndef CONFIG_ATH5K_TRACER
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+struct sk_buff;
+
+#define PRIV_ENTRY  __field(struct ath5k_softc *, priv)
+#define PRIV_ASSIGN __entry->priv = priv
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath5k
+
+TRACE_EVENT(ath5k_rx,
+       TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
+       TP_ARGS(priv, skb),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(unsigned long, skbaddr)
+               __dynamic_array(u8, frame, skb->len)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->skbaddr = (unsigned long) skb;
+               memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+       ),
+       TP_printk(
+               "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
+       )
+);
+
+TRACE_EVENT(ath5k_tx,
+       TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+                struct ath5k_txq *q),
+
+       TP_ARGS(priv, skb, q),
+
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(unsigned long, skbaddr)
+               __field(u8, qnum)
+               __dynamic_array(u8, frame, skb->len)
+       ),
+
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->skbaddr = (unsigned long) skb;
+               __entry->qnum = (u8) q->qnum;
+               memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+       ),
+
+       TP_printk(
+               "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
+               __entry->qnum
+       )
+);
+
+TRACE_EVENT(ath5k_tx_complete,
+       TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+                struct ath5k_txq *q, struct ath5k_tx_status *ts),
+
+       TP_ARGS(priv, skb, q, ts),
+
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(unsigned long, skbaddr)
+               __field(u8, qnum)
+               __field(u8, ts_status)
+               __field(s8, ts_rssi)
+               __field(u8, ts_antenna)
+       ),
+
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->skbaddr = (unsigned long) skb;
+               __entry->qnum = (u8) q->qnum;
+               __entry->ts_status = ts->ts_status;
+               __entry->ts_rssi =  ts->ts_rssi;
+               __entry->ts_antenna = ts->ts_antenna;
+       ),
+
+       TP_printk(
+               "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
+               __entry->priv, __entry->skbaddr, __entry->qnum,
+               __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
+       )
+);
+
+#endif /* __TRACE_ATH5K_H */
+
+#ifdef CONFIG_ATH5K_TRACER
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
+
+#endif
index aca01621c205213a1b6503047f68c6d7da6dc25e..4d66ca8042ebcd3bd242b7fe6c0dd8edaf684ce6 100644 (file)
@@ -4,7 +4,6 @@ ath9k-y +=      beacon.o \
                main.o \
                recv.o \
                xmit.o \
-               virtual.o \
 
 ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_PCI) += pci.o
index 25a6e4417cdb14877e14d87b5a07999fbb0eb2b8..99367210596309c6b1fdfaa0ea6d01db5a00cbed 100644 (file)
@@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops  = {
 static int ath_ahb_probe(struct platform_device *pdev)
 {
        void __iomem *mem;
-       struct ath_wiphy *aphy;
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
        struct resource *res;
@@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
 
        irq = res->start;
 
-       hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
-                               sizeof(struct ath_softc), &ath9k_ops);
+       hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
        if (hw == NULL) {
                dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
                ret = -ENOMEM;
@@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
        SET_IEEE80211_DEV(hw, &pdev->dev);
        platform_set_drvdata(pdev, hw);
 
-       aphy = hw->priv;
-       sc = (struct ath_softc *) (aphy + 1);
-       aphy->sc = sc;
-       aphy->hw = hw;
-       sc->pri_wiphy = aphy;
+       sc = hw->priv;
        sc->hw = hw;
        sc->dev = &pdev->dev;
        sc->mem = mem;
@@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
        struct ieee80211_hw *hw = platform_get_drvdata(pdev);
 
        if (hw) {
-               struct ath_wiphy *aphy = hw->priv;
-               struct ath_softc *sc = aphy->sc;
+               struct ath_softc *sc = hw->priv;
                void __iomem *mem = sc->mem;
 
                ath9k_deinit_device(sc);
index 5e300bd3d26457fe999cc41185c6cce42b07eb22..76388c6d66923a0f6e455e1d7c597ebe5087ae5f 100644 (file)
@@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
 {
        struct ath_common *common = ath9k_hw_common(ah);
 
-       if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+       if (AR_SREV_9271(ah)) {
+               if (!ar9285_hw_cl_cal(ah, chan))
+                       return false;
+       } else if (AR_SREV_9285_12_OR_LATER(ah)) {
                if (!ar9285_hw_clc(ah, chan))
                        return false;
        } else {
index 4819747fa4c3a1d13a518a51e069dac4604982ef..4a9271802991bdf696d28f9fe042b0d82eb5336e 100644 (file)
@@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
                                return;
 
                        reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
-                                     (7 << 14) | (6 << 17) | (1 << 20) |
+                                     (2 << 14) | (6 << 17) | (1 << 20) |
                                      (3 << 24) | (1 << 28);
 
                        REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
@@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
 {
 #define POW_SM(_r, _s)     (((_r) & 0x3f) << (_s))
        /* make sure forced gain is not set */
-       REG_WRITE(ah, 0xa458, 0);
+       REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
 
        /* Write the OFDM power per rate set */
 
        /* 6 (LSB), 9, 12, 18 (MSB) */
-       REG_WRITE(ah, 0xa3c0,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
 
        /* 24 (LSB), 36, 48, 54 (MSB) */
-       REG_WRITE(ah, 0xa3c4,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
        /* Write the CCK power per rate set */
 
        /* 1L (LSB), reserved, 2L, 2S (MSB) */
-       REG_WRITE(ah, 0xa3c8,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
                  /* POW_SM(txPowerTimes2,  8) | this is reserved for AR9003 */
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
 
        /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
-       REG_WRITE(ah, 0xa3cc,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
        /* Write the HT20 power per rate set */
 
        /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
-       REG_WRITE(ah, 0xa3d0,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
                  POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 6 (LSB), 7, 12, 13 (MSB) */
-       REG_WRITE(ah, 0xa3d4,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
                  POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 14 (LSB), 15, 20, 21 */
-       REG_WRITE(ah, 0xa3e4,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
                  POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
        /* Mixed HT20 and HT40 rates */
 
        /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
-       REG_WRITE(ah, 0xa3e8,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
         * correct PAR difference between HT40 and HT20/LEGACY
         * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
         */
-       REG_WRITE(ah, 0xa3d8,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 6 (LSB), 7, 12, 13 (MSB) */
-       REG_WRITE(ah, 0xa3dc,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
            );
 
        /* 14 (LSB), 15, 20, 21 */
-       REG_WRITE(ah, 0xa3ec,
+       REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
                  POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
                  POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
index 06fb2c850535f3dc2f84db86a5988bd110c1c160..7f5de6e4448bf40ac2e8af6e439361f442a4d56c 100644 (file)
  */
 static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 {
-       if (AR_SREV_9485(ah)) {
+       if (AR_SREV_9485_11(ah)) {
+               /* mac */
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+                               ar9485_1_1_mac_core,
+                               ARRAY_SIZE(ar9485_1_1_mac_core), 2);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+                               ar9485_1_1_mac_postamble,
+                               ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
+
+               /* bb */
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
+                               ARRAY_SIZE(ar9485_1_1), 2);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                               ar9485_1_1_baseband_core,
+                               ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                               ar9485_1_1_baseband_postamble,
+                               ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
+
+               /* radio */
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+                               ar9485_1_1_radio_core,
+                               ARRAY_SIZE(ar9485_1_1_radio_core), 2);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+                               ar9485_1_1_radio_postamble,
+                               ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
+
+               /* soc */
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+                               ar9485_1_1_soc_preamble,
+                               ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+
+               /* rx/tx gain */
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                               ar9485_common_rx_gain_1_1,
+                               ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2);
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                               ar9485_modes_lowest_ob_db_tx_gain_1_1,
+                               ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+                               5);
+
+               /* Load PCIE SERDES settings from INI */
+
+               /* Awake Setting */
+
+               INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                               ar9485_1_1_pcie_phy_clkreq_disable_L1,
+                               ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+                               2);
+
+               /* Sleep Setting */
+
+               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                               ar9485_1_1_pcie_phy_clkreq_disable_L1,
+                               ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+                               2);
+       } else if (AR_SREV_9485(ah)) {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -85,8 +145,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                /* Sleep Setting */
 
                INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                               ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1,
-                               ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1),
+                               ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
+                               ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
                                2);
        } else {
                /* mac */
@@ -163,7 +223,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
        switch (ar9003_hw_get_tx_gain_idx(ah)) {
        case 0:
        default:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485_modes_lowest_ob_db_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_lowest_ob_db_tx_gain_1_0,
                                       ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
@@ -175,10 +240,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
                                       5);
                break;
        case 1:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485Modes_high_ob_db_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_high_ob_db_tx_gain_1_0,
-                                      ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+                                      ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
                                       5);
                else
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -187,10 +257,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
                                       5);
                break;
        case 2:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485Modes_low_ob_db_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_low_ob_db_tx_gain_1_0,
-                                      ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+                                      ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
                                       5);
                else
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -199,7 +274,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
                                       5);
                break;
        case 3:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesTxGain,
+                                      ar9485Modes_high_power_tx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
+                                      5);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9485Modes_high_power_tx_gain_1_0,
                                       ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
@@ -218,7 +298,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
        switch (ar9003_hw_get_rx_gain_idx(ah)) {
        case 0:
        default:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesRxGain,
+                                      ar9485_common_rx_gain_1_1,
+                                      ARRAY_SIZE(ar9485_common_rx_gain_1_1),
+                                      2);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesRxGain,
                                       ar9485Common_rx_gain_1_0,
                                       ARRAY_SIZE(ar9485Common_rx_gain_1_0),
@@ -230,7 +315,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
                                       2);
                break;
        case 1:
-               if (AR_SREV_9485(ah))
+               if (AR_SREV_9485_11(ah))
+                       INIT_INI_ARRAY(&ah->iniModesRxGain,
+                                      ar9485Common_wo_xlna_rx_gain_1_1,
+                                      ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
+                                      2);
+               else if (AR_SREV_9485(ah))
                        INIT_INI_ARRAY(&ah->iniModesRxGain,
                                       ar9485Common_wo_xlna_rx_gain_1_0,
                                       ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
index 4ceddbbdfcee6d0b24917e6c9bd82e46032dca84..038a0cbfc6e7c9cae862838b51580b419c2571cd 100644 (file)
@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                 */
                if (rxsp->status11 & AR_CRCErr)
                        rxs->rs_status |= ATH9K_RXERR_CRC;
-               if (rxsp->status11 & AR_PHYErr) {
+               else if (rxsp->status11 & AR_PHYErr) {
                        phyerr = MS(rxsp->status11, AR_PHYErrCode);
                        /*
                         * If we reach a point here where AR_PostDelimCRCErr is
@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                                rxs->rs_phyerr = phyerr;
                        }
 
-               }
-               if (rxsp->status11 & AR_DecryptCRCErr)
+               } else if (rxsp->status11 & AR_DecryptCRCErr)
                        rxs->rs_status |= ATH9K_RXERR_DECRYPT;
-               if (rxsp->status11 & AR_MichaelErr)
+               else if (rxsp->status11 & AR_MichaelErr)
                        rxs->rs_status |= ATH9K_RXERR_MIC;
+
                if (rxsp->status11 & AR_KeyMiss)
                        rxs->rs_status |= ATH9K_RXERR_DECRYPT;
        }
index 59bab6bd8a743b73d09becce5390ff18ede9ad6f..8bdda2cf9dd75e7ba7fbef00b43579333f553273 100644 (file)
 #define AR_PHY_HEAVYCLIP_40      (AR_SM_BASE + 0x1ac)
 #define AR_PHY_ILLEGAL_TXRATE    (AR_SM_BASE + 0x1b0)
 
+#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
+
 #define AR_PHY_PWRTX_MAX         (AR_SM_BASE + 0x1f0)
 #define AR_PHY_POWER_TX_SUB      (AR_SM_BASE + 0x1f4)
 
index 70de3d89a7b5f1e862fe2f80f883d31ea4b1b06a..eac4d8526fc107ce3a9872735df394b6623f056f 100644 (file)
@@ -940,4 +940,1145 @@ static const u32 ar9485_1_0_mac_core[][2] = {
        {0x000083cc, 0x00000200},
        {0x000083d0, 0x000301ff},
 };
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+       /*  Addr       allmodes */
+       {0x00000008, 0x00000000},
+       {0x00000030, 0x00020085},
+       {0x00000034, 0x00000005},
+       {0x00000040, 0x00000000},
+       {0x00000044, 0x00000000},
+       {0x00000048, 0x00000008},
+       {0x0000004c, 0x00000010},
+       {0x00000050, 0x00000000},
+       {0x00001040, 0x002ffc0f},
+       {0x00001044, 0x002ffc0f},
+       {0x00001048, 0x002ffc0f},
+       {0x0000104c, 0x002ffc0f},
+       {0x00001050, 0x002ffc0f},
+       {0x00001054, 0x002ffc0f},
+       {0x00001058, 0x002ffc0f},
+       {0x0000105c, 0x002ffc0f},
+       {0x00001060, 0x002ffc0f},
+       {0x00001064, 0x002ffc0f},
+       {0x000010f0, 0x00000100},
+       {0x00001270, 0x00000000},
+       {0x000012b0, 0x00000000},
+       {0x000012f0, 0x00000000},
+       {0x0000143c, 0x00000000},
+       {0x0000147c, 0x00000000},
+       {0x00008000, 0x00000000},
+       {0x00008004, 0x00000000},
+       {0x00008008, 0x00000000},
+       {0x0000800c, 0x00000000},
+       {0x00008018, 0x00000000},
+       {0x00008020, 0x00000000},
+       {0x00008038, 0x00000000},
+       {0x0000803c, 0x00000000},
+       {0x00008040, 0x00000000},
+       {0x00008044, 0x00000000},
+       {0x00008048, 0x00000000},
+       {0x0000804c, 0xffffffff},
+       {0x00008054, 0x00000000},
+       {0x00008058, 0x00000000},
+       {0x0000805c, 0x000fc78f},
+       {0x00008060, 0x0000000f},
+       {0x00008064, 0x00000000},
+       {0x00008070, 0x00000310},
+       {0x00008074, 0x00000020},
+       {0x00008078, 0x00000000},
+       {0x0000809c, 0x0000000f},
+       {0x000080a0, 0x00000000},
+       {0x000080a4, 0x02ff0000},
+       {0x000080a8, 0x0e070605},
+       {0x000080ac, 0x0000000d},
+       {0x000080b0, 0x00000000},
+       {0x000080b4, 0x00000000},
+       {0x000080b8, 0x00000000},
+       {0x000080bc, 0x00000000},
+       {0x000080c0, 0x2a800000},
+       {0x000080c4, 0x06900168},
+       {0x000080c8, 0x13881c22},
+       {0x000080cc, 0x01f40000},
+       {0x000080d0, 0x00252500},
+       {0x000080d4, 0x00a00000},
+       {0x000080d8, 0x00400000},
+       {0x000080dc, 0x00000000},
+       {0x000080e0, 0xffffffff},
+       {0x000080e4, 0x0000ffff},
+       {0x000080e8, 0x3f3f3f3f},
+       {0x000080ec, 0x00000000},
+       {0x000080f0, 0x00000000},
+       {0x000080f4, 0x00000000},
+       {0x000080fc, 0x00020000},
+       {0x00008100, 0x00000000},
+       {0x00008108, 0x00000052},
+       {0x0000810c, 0x00000000},
+       {0x00008110, 0x00000000},
+       {0x00008114, 0x000007ff},
+       {0x00008118, 0x000000aa},
+       {0x0000811c, 0x00003210},
+       {0x00008124, 0x00000000},
+       {0x00008128, 0x00000000},
+       {0x0000812c, 0x00000000},
+       {0x00008130, 0x00000000},
+       {0x00008134, 0x00000000},
+       {0x00008138, 0x00000000},
+       {0x0000813c, 0x0000ffff},
+       {0x00008144, 0xffffffff},
+       {0x00008168, 0x00000000},
+       {0x0000816c, 0x00000000},
+       {0x00008170, 0x18486200},
+       {0x00008174, 0x33332210},
+       {0x00008178, 0x00000000},
+       {0x0000817c, 0x00020000},
+       {0x000081c0, 0x00000000},
+       {0x000081c4, 0x33332210},
+       {0x000081d4, 0x00000000},
+       {0x000081ec, 0x00000000},
+       {0x000081f0, 0x00000000},
+       {0x000081f4, 0x00000000},
+       {0x000081f8, 0x00000000},
+       {0x000081fc, 0x00000000},
+       {0x00008240, 0x00100000},
+       {0x00008244, 0x0010f400},
+       {0x00008248, 0x00000800},
+       {0x0000824c, 0x0001e800},
+       {0x00008250, 0x00000000},
+       {0x00008254, 0x00000000},
+       {0x00008258, 0x00000000},
+       {0x0000825c, 0x40000000},
+       {0x00008260, 0x00080922},
+       {0x00008264, 0x9ca00010},
+       {0x00008268, 0xffffffff},
+       {0x0000826c, 0x0000ffff},
+       {0x00008270, 0x00000000},
+       {0x00008274, 0x40000000},
+       {0x00008278, 0x003e4180},
+       {0x0000827c, 0x00000004},
+       {0x00008284, 0x0000002c},
+       {0x00008288, 0x0000002c},
+       {0x0000828c, 0x000000ff},
+       {0x00008294, 0x00000000},
+       {0x00008298, 0x00000000},
+       {0x0000829c, 0x00000000},
+       {0x00008300, 0x00000140},
+       {0x00008314, 0x00000000},
+       {0x0000831c, 0x0000010d},
+       {0x00008328, 0x00000000},
+       {0x0000832c, 0x00000007},
+       {0x00008330, 0x00000302},
+       {0x00008334, 0x00000700},
+       {0x00008338, 0x00ff0000},
+       {0x0000833c, 0x02400000},
+       {0x00008340, 0x000107ff},
+       {0x00008344, 0xa248105b},
+       {0x00008348, 0x008f0000},
+       {0x0000835c, 0x00000000},
+       {0x00008360, 0xffffffff},
+       {0x00008364, 0xffffffff},
+       {0x00008368, 0x00000000},
+       {0x00008370, 0x00000000},
+       {0x00008374, 0x000000ff},
+       {0x00008378, 0x00000000},
+       {0x0000837c, 0x00000000},
+       {0x00008380, 0xffffffff},
+       {0x00008384, 0xffffffff},
+       {0x00008390, 0xffffffff},
+       {0x00008394, 0xffffffff},
+       {0x00008398, 0x00000000},
+       {0x0000839c, 0x00000000},
+       {0x000083a0, 0x00000000},
+       {0x000083a4, 0x0000fa14},
+       {0x000083a8, 0x000f0c00},
+       {0x000083ac, 0x33332210},
+       {0x000083b0, 0x33332210},
+       {0x000083b4, 0x33332210},
+       {0x000083b8, 0x33332210},
+       {0x000083bc, 0x00000000},
+       {0x000083c0, 0x00000000},
+       {0x000083c4, 0x00000000},
+       {0x000083c8, 0x00000000},
+       {0x000083cc, 0x00000200},
+       {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9485_1_1_baseband_core[][2] = {
+       /* Addr       allmodes */
+       {0x00009800, 0xafe68e30},
+       {0x00009804, 0xfd14e000},
+       {0x00009808, 0x9c0a8f6b},
+       {0x0000980c, 0x04800000},
+       {0x00009814, 0x9280c00a},
+       {0x00009818, 0x00000000},
+       {0x0000981c, 0x00020028},
+       {0x00009834, 0x5f3ca3de},
+       {0x00009838, 0x0108ecff},
+       {0x0000983c, 0x14750600},
+       {0x00009880, 0x201fff00},
+       {0x00009884, 0x00001042},
+       {0x000098a4, 0x00200400},
+       {0x000098b0, 0x52440bbe},
+       {0x000098d0, 0x004b6a8e},
+       {0x000098d4, 0x00000820},
+       {0x000098dc, 0x00000000},
+       {0x000098f0, 0x00000000},
+       {0x000098f4, 0x00000000},
+       {0x00009c04, 0x00000000},
+       {0x00009c08, 0x03200000},
+       {0x00009c0c, 0x00000000},
+       {0x00009c10, 0x00000000},
+       {0x00009c14, 0x00046384},
+       {0x00009c18, 0x05b6b440},
+       {0x00009c1c, 0x00b6b440},
+       {0x00009d00, 0xc080a333},
+       {0x00009d04, 0x40206c10},
+       {0x00009d08, 0x009c4060},
+       {0x00009d0c, 0x1883800a},
+       {0x00009d10, 0x01834061},
+       {0x00009d14, 0x00c00400},
+       {0x00009d18, 0x00000000},
+       {0x00009d1c, 0x00000000},
+       {0x00009e08, 0x0038233c},
+       {0x00009e24, 0x9927b515},
+       {0x00009e28, 0x12ef0200},
+       {0x00009e30, 0x06336f77},
+       {0x00009e34, 0x6af6532f},
+       {0x00009e38, 0x0cc80c00},
+       {0x00009e40, 0x0d261820},
+       {0x00009e4c, 0x00001004},
+       {0x00009e50, 0x00ff03f1},
+       {0x00009fc0, 0x80be4788},
+       {0x00009fc4, 0x0001efb5},
+       {0x00009fcc, 0x40000014},
+       {0x0000a20c, 0x00000000},
+       {0x0000a210, 0x00000000},
+       {0x0000a220, 0x00000000},
+       {0x0000a224, 0x00000000},
+       {0x0000a228, 0x10002310},
+       {0x0000a23c, 0x00000000},
+       {0x0000a244, 0x0c000000},
+       {0x0000a2a0, 0x00000001},
+       {0x0000a2c0, 0x00000001},
+       {0x0000a2c8, 0x00000000},
+       {0x0000a2cc, 0x18c43433},
+       {0x0000a2d4, 0x00000000},
+       {0x0000a2dc, 0x00000000},
+       {0x0000a2e0, 0x00000000},
+       {0x0000a2e4, 0x00000000},
+       {0x0000a2e8, 0x00000000},
+       {0x0000a2ec, 0x00000000},
+       {0x0000a2f0, 0x00000000},
+       {0x0000a2f4, 0x00000000},
+       {0x0000a2f8, 0x00000000},
+       {0x0000a344, 0x00000000},
+       {0x0000a34c, 0x00000000},
+       {0x0000a350, 0x0000a000},
+       {0x0000a364, 0x00000000},
+       {0x0000a370, 0x00000000},
+       {0x0000a390, 0x00000001},
+       {0x0000a394, 0x00000444},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
+       {0x0000a3a4, 0x000000ff},
+       {0x0000a3a8, 0x3b3b3b3b},
+       {0x0000a3ac, 0x2f2f2f2f},
+       {0x0000a3c0, 0x20202020},
+       {0x0000a3c4, 0x22222220},
+       {0x0000a3c8, 0x20200020},
+       {0x0000a3cc, 0x20202020},
+       {0x0000a3d0, 0x20202020},
+       {0x0000a3d4, 0x20202020},
+       {0x0000a3d8, 0x20202020},
+       {0x0000a3dc, 0x20202020},
+       {0x0000a3e0, 0x20202020},
+       {0x0000a3e4, 0x20202020},
+       {0x0000a3e8, 0x20202020},
+       {0x0000a3ec, 0x20202020},
+       {0x0000a3f0, 0x00000000},
+       {0x0000a3f4, 0x00000006},
+       {0x0000a3f8, 0x0cdbd380},
+       {0x0000a3fc, 0x000f0f01},
+       {0x0000a400, 0x8fa91f01},
+       {0x0000a404, 0x00000000},
+       {0x0000a408, 0x0e79e5c6},
+       {0x0000a40c, 0x00820820},
+       {0x0000a414, 0x1ce739cf},
+       {0x0000a418, 0x2d0019ce},
+       {0x0000a41c, 0x1ce739ce},
+       {0x0000a420, 0x000001ce},
+       {0x0000a424, 0x1ce739ce},
+       {0x0000a428, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce},
+       {0x0000a434, 0x00000000},
+       {0x0000a438, 0x00001801},
+       {0x0000a43c, 0x00000000},
+       {0x0000a440, 0x00000000},
+       {0x0000a444, 0x00000000},
+       {0x0000a448, 0x04000000},
+       {0x0000a44c, 0x00000001},
+       {0x0000a450, 0x00010000},
+       {0x0000a5c4, 0xbfad9d74},
+       {0x0000a5c8, 0x0048060a},
+       {0x0000a5cc, 0x00000637},
+       {0x0000a760, 0x03020100},
+       {0x0000a764, 0x09080504},
+       {0x0000a768, 0x0d0c0b0a},
+       {0x0000a76c, 0x13121110},
+       {0x0000a770, 0x31301514},
+       {0x0000a774, 0x35343332},
+       {0x0000a778, 0x00000036},
+       {0x0000a780, 0x00000838},
+       {0x0000a7c0, 0x00000000},
+       {0x0000a7c4, 0xfffffffc},
+       {0x0000a7c8, 0x00000000},
+       {0x0000a7cc, 0x00000000},
+       {0x0000a7d0, 0x00000000},
+       {0x0000a7d4, 0x00000004},
+       {0x0000a7dc, 0x00000000},
+};
+
+static const u32 ar9485Common_1_1[][2] = {
+       /*  Addr      allmodes */
+       {0x00007010, 0x00000022},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+       /* Addr       5G_HT20        5G_HT40       2G_HT40       2G_HT20 */
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+       {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+       {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+       {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+       {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+       {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+       {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+       {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+       {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+       {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+       {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+       {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+       {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+       {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+       {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+       {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+       {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+       {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+       {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+       {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+       {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+       {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+       {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+       {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+       {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+       {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+       {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+       {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+       {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+       {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20  */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+       /* Addr        allmodes */
+       {0x0001609c, 0x0b283f31},
+       {0x000160ac, 0x24611800},
+       {0x000160b0, 0x03284f3e},
+       {0x0001610c, 0x00170000},
+       {0x00016140, 0x10804008},
+};
+
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+       {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+       {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+       {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+       {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+       {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+       {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+       {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9485_1_1_radio_core[][2] = {
+       /* Addr        allmodes */
+       {0x00016000, 0x36db6db6},
+       {0x00016004, 0x6db6db40},
+       {0x00016008, 0x73800000},
+       {0x0001600c, 0x00000000},
+       {0x00016040, 0x7f80fff8},
+       {0x0001604c, 0x000f0278},
+       {0x00016050, 0x4db6db8c},
+       {0x00016054, 0x6db60000},
+       {0x00016080, 0x00080000},
+       {0x00016084, 0x0e48048c},
+       {0x00016088, 0x14214514},
+       {0x0001608c, 0x119f081e},
+       {0x00016090, 0x24926490},
+       {0x00016098, 0xd28b3330},
+       {0x000160a0, 0xc2108ffe},
+       {0x000160a4, 0x812fc370},
+       {0x000160a8, 0x423c8000},
+       {0x000160b4, 0x92480040},
+       {0x000160c0, 0x006db6db},
+       {0x000160c4, 0x0186db60},
+       {0x000160c8, 0x6db6db6c},
+       {0x000160cc, 0x6de6fbe0},
+       {0x000160d0, 0xf7dfcf3c},
+       {0x00016100, 0x04cb0001},
+       {0x00016104, 0xfff80015},
+       {0x00016108, 0x00080010},
+       {0x00016144, 0x01884080},
+       {0x00016148, 0x00008040},
+       {0x00016240, 0x08400000},
+       {0x00016244, 0x1bf90f00},
+       {0x00016248, 0x00000000},
+       {0x0001624c, 0x00000000},
+       {0x00016280, 0x01000015},
+       {0x00016284, 0x00d30000},
+       {0x00016288, 0x00318000},
+       {0x0001628c, 0x50000000},
+       {0x00016290, 0x4b96210f},
+       {0x00016380, 0x00000000},
+       {0x00016384, 0x00000000},
+       {0x00016388, 0x00800700},
+       {0x0001638c, 0x00800700},
+       {0x00016390, 0x00800700},
+       {0x00016394, 0x00000000},
+       {0x00016398, 0x00000000},
+       {0x0001639c, 0x00000000},
+       {0x000163a0, 0x00000001},
+       {0x000163a4, 0x00000001},
+       {0x000163a8, 0x00000000},
+       {0x000163ac, 0x00000000},
+       {0x000163b0, 0x00000000},
+       {0x000163b4, 0x00000000},
+       {0x000163b8, 0x00000000},
+       {0x000163bc, 0x00000000},
+       {0x000163c0, 0x000000a0},
+       {0x000163c4, 0x000c0000},
+       {0x000163c8, 0x14021402},
+       {0x000163cc, 0x00001402},
+       {0x000163d0, 0x00000000},
+       {0x000163d4, 0x00000000},
+       {0x00016c40, 0x13188278},
+       {0x00016c44, 0x12000000},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+       /* Addr        allmodes */
+       {0x00018c00, 0x10052e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1[][2] = {
+       /* Addr        allmodes */
+       {0x0000a580, 0x00000000},
+       {0x0000a584, 0x00000000},
+       {0x0000a588, 0x00000000},
+       {0x0000a58c, 0x00000000},
+       {0x0000a590, 0x00000000},
+       {0x0000a594, 0x00000000},
+       {0x0000a598, 0x00000000},
+       {0x0000a59c, 0x00000000},
+       {0x0000a5a0, 0x00000000},
+       {0x0000a5a4, 0x00000000},
+       {0x0000a5a8, 0x00000000},
+       {0x0000a5ac, 0x00000000},
+       {0x0000a5b0, 0x00000000},
+       {0x0000a5b4, 0x00000000},
+       {0x0000a5b8, 0x00000000},
+       {0x0000a5bc, 0x00000000},
+};
+
+static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20 */
+       {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+       {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+       {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+       {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+       {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+       {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+       /* Addr        allmodes */
+       {0x00018c00, 0x10013e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+       /* Addr        allmodes */
+       {0x00004014, 0xba280400},
+       {0x000040a4, 0x00a0c9c9},
+       {0x00007010, 0x00000022},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+       {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+       /* Addr        allmodes */
+       {0x0000a398, 0x00000000},
+       {0x0000a39c, 0x6f7f0301},
+       {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+       /* Addr        5G_HT20       5G_HT40       2G_HT40       2G_HT20  */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+       /* Addr        5G_HT2        5G_HT40  */
+       {0x00009e00, 0x03721821, 0x03721821},
+       {0x0000a230, 0x0000400b, 0x00004016},
+       {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+       /* Addr        allmodes  */
+       {0x00018c00, 0x10012e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_common_rx_gain_1_1[][2] = {
+       /* Addr        allmodes */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x01800082},
+       {0x0000a014, 0x01820181},
+       {0x0000a018, 0x01840183},
+       {0x0000a01c, 0x01880185},
+       {0x0000a020, 0x018a0189},
+       {0x0000a024, 0x02850284},
+       {0x0000a028, 0x02890288},
+       {0x0000a02c, 0x03850384},
+       {0x0000a030, 0x03890388},
+       {0x0000a034, 0x038b038a},
+       {0x0000a038, 0x038d038c},
+       {0x0000a03c, 0x03910390},
+       {0x0000a040, 0x03930392},
+       {0x0000a044, 0x03950394},
+       {0x0000a048, 0x00000396},
+       {0x0000a04c, 0x00000000},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x28282828},
+       {0x0000a084, 0x28282828},
+       {0x0000a088, 0x28282828},
+       {0x0000a08c, 0x28282828},
+       {0x0000a090, 0x28282828},
+       {0x0000a094, 0x21212128},
+       {0x0000a098, 0x171c1c1c},
+       {0x0000a09c, 0x02020212},
+       {0x0000a0a0, 0x00000202},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x111f1100},
+       {0x0000a0c8, 0x111d111e},
+       {0x0000a0cc, 0x111b111c},
+       {0x0000a0d0, 0x22032204},
+       {0x0000a0d4, 0x22012202},
+       {0x0000a0d8, 0x221f2200},
+       {0x0000a0dc, 0x221d221e},
+       {0x0000a0e0, 0x33013302},
+       {0x0000a0e4, 0x331f3300},
+       {0x0000a0e8, 0x4402331e},
+       {0x0000a0ec, 0x44004401},
+       {0x0000a0f0, 0x441e441f},
+       {0x0000a0f4, 0x55015502},
+       {0x0000a0f8, 0x551f5500},
+       {0x0000a0fc, 0x6602551e},
+       {0x0000a100, 0x66006601},
+       {0x0000a104, 0x661e661f},
+       {0x0000a108, 0x7703661d},
+       {0x0000a10c, 0x77017702},
+       {0x0000a110, 0x00007700},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x111f1100},
+       {0x0000a148, 0x111d111e},
+       {0x0000a14c, 0x111b111c},
+       {0x0000a150, 0x22032204},
+       {0x0000a154, 0x22012202},
+       {0x0000a158, 0x221f2200},
+       {0x0000a15c, 0x221d221e},
+       {0x0000a160, 0x33013302},
+       {0x0000a164, 0x331f3300},
+       {0x0000a168, 0x4402331e},
+       {0x0000a16c, 0x44004401},
+       {0x0000a170, 0x441e441f},
+       {0x0000a174, 0x55015502},
+       {0x0000a178, 0x551f5500},
+       {0x0000a17c, 0x6602551e},
+       {0x0000a180, 0x66006601},
+       {0x0000a184, 0x661e661f},
+       {0x0000a188, 0x7703661d},
+       {0x0000a18c, 0x77017702},
+       {0x0000a190, 0x00007700},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
+       /* Addr        allmodes */
+       {0x00018c00, 0x10053e5e},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+       /* Addr        allmodes */
+       {0x0000a000, 0x00060005},
+       {0x0000a004, 0x00810080},
+       {0x0000a008, 0x00830082},
+       {0x0000a00c, 0x00850084},
+       {0x0000a010, 0x01820181},
+       {0x0000a014, 0x01840183},
+       {0x0000a018, 0x01880185},
+       {0x0000a01c, 0x018a0189},
+       {0x0000a020, 0x02850284},
+       {0x0000a024, 0x02890288},
+       {0x0000a028, 0x028b028a},
+       {0x0000a02c, 0x03850384},
+       {0x0000a030, 0x03890388},
+       {0x0000a034, 0x038b038a},
+       {0x0000a038, 0x038d038c},
+       {0x0000a03c, 0x03910390},
+       {0x0000a040, 0x03930392},
+       {0x0000a044, 0x03950394},
+       {0x0000a048, 0x00000396},
+       {0x0000a04c, 0x00000000},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x28282828},
+       {0x0000a084, 0x28282828},
+       {0x0000a088, 0x28282828},
+       {0x0000a08c, 0x28282828},
+       {0x0000a090, 0x28282828},
+       {0x0000a094, 0x24242428},
+       {0x0000a098, 0x171e1e1e},
+       {0x0000a09c, 0x02020b0b},
+       {0x0000a0a0, 0x02020202},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x22072208},
+       {0x0000a0c4, 0x22052206},
+       {0x0000a0c8, 0x22032204},
+       {0x0000a0cc, 0x22012202},
+       {0x0000a0d0, 0x221f2200},
+       {0x0000a0d4, 0x221d221e},
+       {0x0000a0d8, 0x33023303},
+       {0x0000a0dc, 0x33003301},
+       {0x0000a0e0, 0x331e331f},
+       {0x0000a0e4, 0x4402331d},
+       {0x0000a0e8, 0x44004401},
+       {0x0000a0ec, 0x441e441f},
+       {0x0000a0f0, 0x55025503},
+       {0x0000a0f4, 0x55005501},
+       {0x0000a0f8, 0x551e551f},
+       {0x0000a0fc, 0x6602551d},
+       {0x0000a100, 0x66006601},
+       {0x0000a104, 0x661e661f},
+       {0x0000a108, 0x7703661d},
+       {0x0000a10c, 0x77017702},
+       {0x0000a110, 0x00007700},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x111f1100},
+       {0x0000a148, 0x111d111e},
+       {0x0000a14c, 0x111b111c},
+       {0x0000a150, 0x22032204},
+       {0x0000a154, 0x22012202},
+       {0x0000a158, 0x221f2200},
+       {0x0000a15c, 0x221d221e},
+       {0x0000a160, 0x33013302},
+       {0x0000a164, 0x331f3300},
+       {0x0000a168, 0x4402331e},
+       {0x0000a16c, 0x44004401},
+       {0x0000a170, 0x441e441f},
+       {0x0000a174, 0x55015502},
+       {0x0000a178, 0x551f5500},
+       {0x0000a17c, 0x6602551e},
+       {0x0000a180, 0x66006601},
+       {0x0000a184, 0x661e661f},
+       {0x0000a188, 0x7703661d},
+       {0x0000a18c, 0x77017702},
+       {0x0000a190, 0x00007700},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000296},
+};
+
 #endif
index 1a7fa6ea4cf57fedabac3a673d7c2f69be0d8cc7..c718ab512a979e15eafd4111c83dfc04898edcff 100644 (file)
@@ -92,9 +92,9 @@ struct ath_config {
  * @BUF_XRETRY: To denote excessive retries of the buffer
  */
 enum buffer_type {
-       BUF_AMPDU               = BIT(2),
-       BUF_AGGR                = BIT(3),
-       BUF_XRETRY              = BIT(5),
+       BUF_AMPDU               = BIT(0),
+       BUF_AGGR                = BIT(1),
+       BUF_XRETRY              = BIT(2),
 };
 
 #define bf_isampdu(bf)         (bf->bf_state.bf_type & BUF_AMPDU)
@@ -134,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
         (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
         WME_AC_VO)
 
-#define ADDBA_EXCHANGE_ATTEMPTS    10
 #define ATH_AGGR_DELIM_SZ          4
 #define ATH_AGGR_MINPLEN           256 /* in bytes, minimum packet length */
 /* number of delimiters for encryption padding */
@@ -181,7 +180,8 @@ enum ATH_AGGR_STATUS {
 
 #define ATH_TXFIFO_DEPTH 8
 struct ath_txq {
-       u32 axq_qnum;
+       int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
+       u32 axq_qnum; /* ath9k hardware queue number */
        u32 *axq_link;
        struct list_head axq_q;
        spinlock_t axq_lock;
@@ -189,6 +189,7 @@ struct ath_txq {
        u32 axq_ampdu_depth;
        bool stopped;
        bool axq_tx_inprogress;
+       bool txq_flush_inprogress;
        struct list_head axq_acq;
        struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
        struct list_head txq_fifo_pending;
@@ -231,7 +232,6 @@ struct ath_buf {
        bool bf_stale;
        u16 bf_flags;
        struct ath_buf_state bf_state;
-       struct ath_wiphy *aphy;
 };
 
 struct ath_atx_tid {
@@ -252,7 +252,10 @@ struct ath_atx_tid {
 };
 
 struct ath_node {
-       struct ath_common *common;
+#ifdef CONFIG_ATH9K_DEBUGFS
+       struct list_head list; /* for sc->nodes */
+       struct ieee80211_sta *sta; /* station struct we're part of */
+#endif
        struct ath_atx_tid tid[WME_NUM_TID];
        struct ath_atx_ac ac[WME_NUM_AC];
        u16 maxampdu;
@@ -275,6 +278,11 @@ struct ath_tx_control {
 #define ATH_TX_XRETRY       0x02
 #define ATH_TX_BAR          0x04
 
+/**
+ * @txq_map:  Index is mac80211 queue number.  This is
+ *  not necessarily the same as the hardware queue number
+ *  (axq_qnum).
+ */
 struct ath_tx {
        u16 seq_no;
        u32 txqsetup;
@@ -301,6 +309,8 @@ struct ath_rx {
        struct ath_descdma rxdma;
        struct ath_buf *rx_bufptr;
        struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
+       struct sk_buff *frag;
 };
 
 int ath_startrecv(struct ath_softc *sc);
@@ -337,10 +347,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
 
 struct ath_vif {
        int av_bslot;
+       bool is_bslot_active;
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
        enum nl80211_iftype av_opmode;
        struct ath_buf *av_bcbuf;
-       struct ath_tx_control av_btxctl;
        u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
 };
 
@@ -360,7 +370,7 @@ struct ath_vif {
 #define IEEE80211_MS_TO_TU(x)           (((x) * 1000) / 1024)
 
 struct ath_beacon_config {
-       u16 beacon_interval;
+       int beacon_interval;
        u16 listen_interval;
        u16 dtim_period;
        u16 bmiss_timeout;
@@ -379,7 +389,6 @@ struct ath_beacon {
        u32 ast_be_xmit;
        u64 bc_tstamp;
        struct ieee80211_vif *bslot[ATH_BCBUF];
-       struct ath_wiphy *bslot_aphy[ATH_BCBUF];
        int slottime;
        int slotupdate;
        struct ath9k_tx_queue_info beacon_qi;
@@ -390,9 +399,10 @@ struct ath_beacon {
 
 void ath_beacon_tasklet(unsigned long data);
 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
 int ath_beaconq_config(struct ath_softc *sc);
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
 
 /*******/
 /* ANI */
@@ -439,26 +449,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
 
 #define ATH_LED_PIN_DEF                1
 #define ATH_LED_PIN_9287               8
-#define ATH_LED_ON_DURATION_IDLE       350     /* in msecs */
-#define ATH_LED_OFF_DURATION_IDLE      250     /* in msecs */
-
-enum ath_led_type {
-       ATH_LED_RADIO,
-       ATH_LED_ASSOC,
-       ATH_LED_TX,
-       ATH_LED_RX
-};
-
-struct ath_led {
-       struct ath_softc *sc;
-       struct led_classdev led_cdev;
-       enum ath_led_type led_type;
-       char name[32];
-       bool registered;
-};
+#define ATH_LED_PIN_9485               6
 
+#ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
 void ath_deinit_leds(struct ath_softc *sc);
+#else
+static inline void ath_init_leds(struct ath_softc *sc)
+{
+}
+
+static inline void ath_deinit_leds(struct ath_softc *sc)
+{
+}
+#endif
+
 
 /* Antenna diversity/combining */
 #define ATH_ANT_RX_CURRENT_SHIFT 4
@@ -527,7 +532,6 @@ struct ath_ant_comb {
 #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
 #define ATH_MAX_SW_RETRIES      10
 #define ATH_CHAN_MAX            255
-#define IEEE80211_WEP_NKID      4       /* number of key ids */
 
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
 #define ATH_RATE_DUMMY_MARKER   0
@@ -555,27 +559,28 @@ struct ath_ant_comb {
 #define PS_WAIT_FOR_TX_ACK        BIT(3)
 #define PS_BEACON_SYNC            BIT(4)
 
-struct ath_wiphy;
 struct ath_rate_table;
 
+struct ath9k_vif_iter_data {
+       const u8 *hw_macaddr; /* phy's hardware address, set
+                              * before starting iteration for
+                              * valid bssid mask.
+                              */
+       u8 mask[ETH_ALEN]; /* bssid mask */
+       int naps;      /* number of AP vifs */
+       int nmeshes;   /* number of mesh vifs */
+       int nstations; /* number of station vifs */
+       int nwds;      /* number of nwd vifs */
+       int nadhocs;   /* number of adhoc vifs */
+       int nothers;   /* number of vifs not specified above. */
+};
+
 struct ath_softc {
        struct ieee80211_hw *hw;
        struct device *dev;
 
-       spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
-       struct ath_wiphy *pri_wiphy;
-       struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
-                                      * have NULL entries */
-       int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
        int chan_idx;
        int chan_is_ht;
-       struct ath_wiphy *next_wiphy;
-       struct work_struct chan_work;
-       int wiphy_select_failures;
-       unsigned long wiphy_select_first_fail;
-       struct delayed_work wiphy_work;
-       unsigned long wiphy_scheduler_int;
-       int wiphy_scheduler_index;
        struct survey_info *cur_survey;
        struct survey_info survey[ATH9K_NUM_CHANNELS];
 
@@ -592,14 +597,16 @@ struct ath_softc {
        struct work_struct hw_check_work;
        struct completion paprd_complete;
 
+       unsigned int hw_busy_count;
+
        u32 intrstatus;
        u32 sc_flags; /* SC_OP_* */
        u16 ps_flags; /* PS_* */
        u16 curtxpow;
-       u8 nbcnvifs;
-       u16 nvifs;
        bool ps_enabled;
        bool ps_idle;
+       short nbcnvifs;
+       short nvifs;
        unsigned long ps_usecount;
 
        struct ath_config config;
@@ -608,23 +615,24 @@ struct ath_softc {
        struct ath_beacon beacon;
        struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 
-       struct ath_led radio_led;
-       struct ath_led assoc_led;
-       struct ath_led tx_led;
-       struct ath_led rx_led;
-       struct delayed_work ath_led_blink_work;
-       int led_on_duration;
-       int led_off_duration;
-       int led_on_cnt;
-       int led_off_cnt;
+#ifdef CONFIG_MAC80211_LEDS
+       bool led_registered;
+       char led_name[32];
+       struct led_classdev led_cdev;
+#endif
 
-       int beacon_interval;
+       struct ath9k_hw_cal_data caldata;
+       int last_rssi;
 
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath9k_debug debug;
+       spinlock_t nodes_lock;
+       struct list_head nodes; /* basically, stations */
+       unsigned int tx_complete_poll_work_seen;
 #endif
        struct ath_beacon_config cur_beacon_conf;
        struct delayed_work tx_complete_work;
+       struct delayed_work hw_pll_work;
        struct ath_btcoex btcoex;
 
        struct ath_descdma txsdma;
@@ -632,23 +640,6 @@ struct ath_softc {
        struct ath_ant_comb ant_comb;
 };
 
-struct ath_wiphy {
-       struct ath_softc *sc; /* shared for all virtual wiphys */
-       struct ieee80211_hw *hw;
-       struct ath9k_hw_cal_data caldata;
-       enum ath_wiphy_state {
-               ATH_WIPHY_INACTIVE,
-               ATH_WIPHY_ACTIVE,
-               ATH_WIPHY_PAUSING,
-               ATH_WIPHY_PAUSED,
-               ATH_WIPHY_SCAN,
-       } state;
-       bool idle;
-       int chan_idx;
-       int chan_is_ht;
-       int last_rssi;
-};
-
 void ath9k_tasklet(unsigned long data);
 int ath_reset(struct ath_softc *sc, bool retry_tx);
 int ath_cabq_update(struct ath_softc *);
@@ -669,14 +660,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
                    const struct ath_bus_ops *bus_ops);
 void ath9k_deinit_device(struct ath_softc *sc);
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
-                          struct ath9k_channel *ichan);
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan);
 
 void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
 bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
+bool ath9k_uses_beacons(int type);
 
 #ifdef CONFIG_PCI
 int ath_pci_init(void);
@@ -700,26 +690,12 @@ void ath9k_ps_restore(struct ath_softc *sc);
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
 
 void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-int ath9k_wiphy_add(struct ath_softc *sc);
-int ath9k_wiphy_del(struct ath_wiphy *aphy);
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
-int ath9k_wiphy_pause(struct ath_wiphy *aphy);
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-int ath9k_wiphy_select(struct ath_wiphy *aphy);
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
-void ath9k_wiphy_chan_work(struct work_struct *work);
-bool ath9k_wiphy_started(struct ath_softc *sc);
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
-                                 struct ath_wiphy *selected);
-bool ath9k_wiphy_scanning(struct ath_softc *sc);
-void ath9k_wiphy_work(struct work_struct *work);
-bool ath9k_all_wiphys_idle(struct ath_softc *sc);
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
-
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
 
 void ath_start_rfkill_poll(struct ath_softc *sc);
 extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ath9k_vif_iter_data *iter_data);
+
 
 #endif /* ATH9K_H */
index 385ba03134ba1b100ae2c3419f7690d898c74f25..a4bdfdb043ef2da62acaaa06366381c226a3577c 100644 (file)
@@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
 
 static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_tx_control txctl;
 
@@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
                                           struct ieee80211_vif *vif)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_buf *bf;
        struct ath_vif *avp;
@@ -142,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *info;
        int cabq_depth;
 
-       if (aphy->state != ATH_WIPHY_ACTIVE)
-               return NULL;
-
        avp = (void *)vif->drv_priv;
        cabq = sc->beacon.cabq;
 
-       if (avp->av_bcbuf == NULL)
+       if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
                return NULL;
 
        /* Release the old beacon first */
@@ -225,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
        return bf;
 }
 
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
 {
-       struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_vif *avp;
        struct ath_buf *bf;
        struct sk_buff *skb;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        __le64 tstamp;
 
        avp = (void *)vif->drv_priv;
@@ -244,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                                                 struct ath_buf, list);
                list_del(&avp->av_bcbuf->list);
 
-               if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
-                   sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
-                   sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+               if (ath9k_uses_beacons(vif->type)) {
                        int slot;
                        /*
                         * Assign the vif to a beacon xmit slot. As
@@ -256,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                        for (slot = 0; slot < ATH_BCBUF; slot++)
                                if (sc->beacon.bslot[slot] == NULL) {
                                        avp->av_bslot = slot;
+                                       avp->is_bslot_active = false;
 
                                        /* NB: keep looking for a double slot */
                                        if (slot == 0 || !sc->beacon.bslot[slot-1])
@@ -263,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                                }
                        BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
                        sc->beacon.bslot[avp->av_bslot] = vif;
-                       sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
                        sc->nbcnvifs++;
                }
        }
@@ -281,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
 
        /* NB: the beacon data buffer must be 32-bit aligned. */
        skb = ieee80211_beacon_get(sc->hw, vif);
-       if (skb == NULL) {
-               ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
+       if (skb == NULL)
                return -ENOMEM;
-       }
 
        tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
        sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -293,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                u64 tsfadjust;
                int intval;
 
-               intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+               intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
 
                /*
                 * Calculate the TSF offset for this beacon slot, i.e., the
@@ -325,6 +316,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
                ath_err(common, "dma_mapping_error on beacon alloc\n");
                return -ENOMEM;
        }
+       avp->is_bslot_active = true;
 
        return 0;
 }
@@ -336,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
 
                if (avp->av_bslot != -1) {
                        sc->beacon.bslot[avp->av_bslot] = NULL;
-                       sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
                        sc->nbcnvifs--;
                }
 
@@ -358,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
 void ath_beacon_tasklet(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_buf *bf = NULL;
        struct ieee80211_vif *vif;
-       struct ath_wiphy *aphy;
        int slot;
        u32 bfaddr, bc = 0, tsftu;
        u64 tsf;
@@ -406,7 +397,7 @@ void ath_beacon_tasklet(unsigned long data)
         * on the tsf to safeguard against missing an swba.
         */
 
-       intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+       intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
 
        tsf = ath9k_hw_gettsf64(ah);
        tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -420,7 +411,6 @@ void ath_beacon_tasklet(unsigned long data)
         */
        slot = ATH_BCBUF - slot - 1;
        vif = sc->beacon.bslot[slot];
-       aphy = sc->beacon.bslot_aphy[slot];
 
        ath_dbg(common, ATH_DBG_BEACON,
                "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -428,7 +418,7 @@ void ath_beacon_tasklet(unsigned long data)
 
        bfaddr = 0;
        if (vif) {
-               bf = ath_beacon_generate(aphy->hw, vif);
+               bf = ath_beacon_generate(sc->hw, vif);
                if (bf != NULL) {
                        bfaddr = bf->bf_daddr;
                        bc = 1;
@@ -720,10 +710,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
                iftype = sc->sc_ah->opmode;
        }
 
-               cur_conf->listen_interval = 1;
-               cur_conf->dtim_count = 1;
-               cur_conf->bmiss_timeout =
-                       ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+       cur_conf->listen_interval = 1;
+       cur_conf->dtim_count = 1;
+       cur_conf->bmiss_timeout =
+               ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
 
        /*
         * It looks like mac80211 may end up using beacon interval of zero in
@@ -735,8 +725,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
                cur_conf->beacon_interval = 100;
 
        /*
-        * Some times we dont parse dtim period from mac80211, in that case
-        * use a default value
+        * We don't parse dtim period from mac80211 during the driver
+        * initialization as it breaks association with hidden-ssid
+        * AP and it causes latency in roaming
         */
        if (cur_conf->dtim_period == 0)
                cur_conf->dtim_period = 1;
@@ -760,3 +751,36 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
 
        sc->sc_flags |= SC_OP_BEACONS;
 }
+
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_vif *avp;
+       int slot;
+       bool found = false;
+
+       ath9k_ps_wakeup(sc);
+       if (status) {
+               for (slot = 0; slot < ATH_BCBUF; slot++) {
+                       if (sc->beacon.bslot[slot]) {
+                               avp = (void *)sc->beacon.bslot[slot]->drv_priv;
+                               if (avp->is_bslot_active) {
+                                       found = true;
+                                       break;
+                               }
+                       }
+               }
+               if (found) {
+                       /* Re-enable beaconing */
+                       ah->imask |= ATH9K_INT_SWBA;
+                       ath9k_hw_set_interrupts(ah, ah->imask);
+               }
+       } else {
+               /* Disable SWBA interrupt */
+               ah->imask &= ~ATH9K_INT_SWBA;
+               ath9k_hw_set_interrupts(ah, ah->imask);
+               tasklet_kill(&sc->bcon_tasklet);
+               ath9k_hw_stoptxdma(ah, sc->beacon.beaconq);
+       }
+       ath9k_ps_restore(sc);
+}
index b68a1acbddd0164c707f8f0daf57dda34e702efe..b4a92a4313f6f8fdda2a6afc91057c0201a2909d 100644 (file)
@@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
        s16 default_nf;
        int i, j;
 
-       if (!ah->caldata)
-               return;
-
+       ah->caldata->channel = chan->channel;
+       ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
        h = ah->caldata->nfCalHist;
        default_nf = ath9k_hw_get_default_nf(ah, chan);
        for (i = 0; i < NUM_NF_READINGS; i++) {
index df1998d4825303d6d0612612a99e576be7928aa9..615e68276e72638b0e3f05205e62713210a0595b 100644 (file)
@@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
 }
 EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
 
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+                           u16 new_txpow, u16 *txpower)
+{
+       if (cur_txpow != new_txpow) {
+               ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
+               /* read back in case value is clamped */
+               *txpower = ath9k_hw_regulatory(ah)->power_limit;
+       }
+}
+EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+
 static int __init ath9k_cmn_init(void)
 {
        return 0;
index a126bddebb0a124f62cc27ad78c0cb5ceca76c72..b2f7b5f89097422fb6a1ef696b5201d993a26544 100644 (file)
@@ -23,8 +23,6 @@
 
 /* Common header for Atheros 802.11n base driver cores */
 
-#define IEEE80211_WEP_NKID 4
-
 #define WME_NUM_TID             16
 #define WME_BA_BMP_SIZE         64
 #define WME_MAX_BA              WME_BA_BMP_SIZE
@@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
 int ath9k_cmn_count_streams(unsigned int chainmask, int max);
 void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
                                  enum ath_stomp_type stomp_type);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+                           u16 new_txpow, u16 *txpower);
index 3586c43077a72801a342480c8a93447d5f92f268..5cfcf8c235a4429d6778ecf9afdeb5f18f91f448 100644 (file)
@@ -381,41 +381,40 @@ static const struct file_operations fops_interrupt = {
        .llseek = default_llseek,
 };
 
-static const char * ath_wiphy_state_str(enum ath_wiphy_state state)
+static const char *channel_type_str(enum nl80211_channel_type t)
 {
-       switch (state) {
-       case ATH_WIPHY_INACTIVE:
-               return "INACTIVE";
-       case ATH_WIPHY_ACTIVE:
-               return "ACTIVE";
-       case ATH_WIPHY_PAUSING:
-               return "PAUSING";
-       case ATH_WIPHY_PAUSED:
-               return "PAUSED";
-       case ATH_WIPHY_SCAN:
-               return "SCAN";
+       switch (t) {
+       case NL80211_CHAN_NO_HT:
+               return "no ht";
+       case NL80211_CHAN_HT20:
+               return "ht20";
+       case NL80211_CHAN_HT40MINUS:
+               return "ht40-";
+       case NL80211_CHAN_HT40PLUS:
+               return "ht40+";
+       default:
+               return "???";
        }
-       return "?";
 }
 
 static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
                               size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
-       struct ath_wiphy *aphy = sc->pri_wiphy;
-       struct ieee80211_channel *chan = aphy->hw->conf.channel;
+       struct ieee80211_channel *chan = sc->hw->conf.channel;
+       struct ieee80211_conf *conf = &(sc->hw->conf);
        char buf[512];
        unsigned int len = 0;
-       int i;
        u8 addr[ETH_ALEN];
        u32 tmp;
 
        len += snprintf(buf + len, sizeof(buf) - len,
-                       "primary: %s (%s chan=%d ht=%d)\n",
-                       wiphy_name(sc->pri_wiphy->hw->wiphy),
-                       ath_wiphy_state_str(sc->pri_wiphy->state),
+                       "%s (chan=%d  center-freq: %d MHz  channel-type: %d (%s))\n",
+                       wiphy_name(sc->hw->wiphy),
                        ieee80211_frequency_to_channel(chan->center_freq),
-                       aphy->chan_is_ht);
+                       chan->center_freq,
+                       conf->channel_type,
+                       channel_type_str(conf->channel_type));
 
        put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
        put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -457,156 +456,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
        else
                len += snprintf(buf + len, sizeof(buf) - len, "\n");
 
-       /* Put variable-length stuff down here, and check for overflows. */
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
-               if (aphy_tmp == NULL)
-                       continue;
-               chan = aphy_tmp->hw->conf.channel;
-               len += snprintf(buf + len, sizeof(buf) - len,
-                       "secondary: %s (%s chan=%d ht=%d)\n",
-                       wiphy_name(aphy_tmp->hw->wiphy),
-                       ath_wiphy_state_str(aphy_tmp->state),
-                       ieee80211_frequency_to_channel(chan->center_freq),
-                                                      aphy_tmp->chan_is_ht);
-       }
        if (len > sizeof(buf))
                len = sizeof(buf);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
-static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
-{
-       int i;
-       if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
-               return sc->pri_wiphy;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
-                       return aphy;
-       }
-       return NULL;
-}
-
-static int del_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_del(aphy);
-}
-
-static int pause_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_pause(aphy);
-}
-
-static int unpause_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_unpause(aphy);
-}
-
-static int select_wiphy(struct ath_softc *sc, const char *name)
-{
-       struct ath_wiphy *aphy = get_wiphy(sc, name);
-       if (!aphy)
-               return -ENOENT;
-       return ath9k_wiphy_select(aphy);
-}
-
-static int schedule_wiphy(struct ath_softc *sc, const char *msec)
-{
-       ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
-       return 0;
-}
-
-static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
-                               size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       char buf[50];
-       size_t len;
-
-       len = min(count, sizeof(buf) - 1);
-       if (copy_from_user(buf, user_buf, len))
-               return -EFAULT;
-       buf[len] = '\0';
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = '\0';
-
-       if (strncmp(buf, "add", 3) == 0) {
-               int res = ath9k_wiphy_add(sc);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "del=", 4) == 0) {
-               int res = del_wiphy(sc, buf + 4);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "pause=", 6) == 0) {
-               int res = pause_wiphy(sc, buf + 6);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "unpause=", 8) == 0) {
-               int res = unpause_wiphy(sc, buf + 8);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "select=", 7) == 0) {
-               int res = select_wiphy(sc, buf + 7);
-               if (res < 0)
-                       return res;
-       } else if (strncmp(buf, "schedule=", 9) == 0) {
-               int res = schedule_wiphy(sc, buf + 9);
-               if (res < 0)
-                       return res;
-       } else
-               return -EOPNOTSUPP;
-
-       return count;
-}
-
 static const struct file_operations fops_wiphy = {
        .read = read_file_wiphy,
-       .write = write_file_wiphy,
        .open = ath9k_debugfs_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
 };
 
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
 #define PR(str, elem)                                                  \
        do {                                                            \
                len += snprintf(buf + len, size - len,                  \
                                "%s%13u%11u%10u%10u\n", str,            \
-               sc->debug.stats.txstats[WME_AC_BE].elem, \
-               sc->debug.stats.txstats[WME_AC_BK].elem, \
-               sc->debug.stats.txstats[WME_AC_VI].elem, \
-               sc->debug.stats.txstats[WME_AC_VO].elem); \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
+               sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
+               if (len >= size)                          \
+                       goto done;                        \
+} while(0)
+
+#define PRX(str, elem)                                                 \
+do {                                                                   \
+       len += snprintf(buf + len, size - len,                          \
+                       "%s%13u%11u%10u%10u\n", str,                    \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem),        \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem),        \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem),        \
+                       (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem));       \
+       if (len >= size)                                                \
+               goto done;                                              \
 } while(0)
 
+#define PRQLE(str, elem)                                               \
+do {                                                                   \
+       len += snprintf(buf + len, size - len,                          \
+                       "%s%13i%11i%10i%10i\n", str,                    \
+                       list_empty(&sc->tx.txq_map[WME_AC_BE]->elem),   \
+                       list_empty(&sc->tx.txq_map[WME_AC_BK]->elem),   \
+                       list_empty(&sc->tx.txq_map[WME_AC_VI]->elem),   \
+                       list_empty(&sc->tx.txq_map[WME_AC_VO]->elem));  \
+       if (len >= size)                                                \
+               goto done;                                              \
+} while (0)
+
 static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
        char *buf;
-       unsigned int len = 0, size = 2048;
+       unsigned int len = 0, size = 8000;
+       int i;
        ssize_t retval = 0;
+       char tmp[32];
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
 
-       len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
+       len += sprintf(buf, "Num-Tx-Queues: %i  tx-queues-setup: 0x%x"
+                      " poll-work-seen: %u\n"
+                      "%30s %10s%10s%10s\n\n",
+                      ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
+                      sc->tx_complete_poll_work_seen,
+                      "BE", "BK", "VI", "VO");
 
        PR("MPDUs Queued:    ", queued);
        PR("MPDUs Completed: ", completed);
        PR("Aggregates:      ", a_aggr);
-       PR("AMPDUs Queued:   ", a_queued);
+       PR("AMPDUs Queued HW:", a_queued_hw);
+       PR("AMPDUs Queued SW:", a_queued_sw);
        PR("AMPDUs Completed:", a_completed);
        PR("AMPDUs Retried:  ", a_retries);
        PR("AMPDUs XRetried: ", a_xretries);
@@ -618,6 +543,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
        PR("DELIM Underrun:  ", delim_underrun);
        PR("TX-Pkts-All:     ", tx_pkts_all);
        PR("TX-Bytes-All:    ", tx_bytes_all);
+       PR("hw-put-tx-buf:   ", puttxbuf);
+       PR("hw-tx-start:     ", txstart);
+       PR("hw-tx-proc-desc: ", txprocdesc);
+       len += snprintf(buf + len, size - len,
+                       "%s%11p%11p%10p%10p\n", "txq-memory-address:",
+                       &(sc->tx.txq_map[WME_AC_BE]),
+                       &(sc->tx.txq_map[WME_AC_BK]),
+                       &(sc->tx.txq_map[WME_AC_VI]),
+                       &(sc->tx.txq_map[WME_AC_VO]));
+       if (len >= size)
+               goto done;
+
+       PRX("axq-qnum:        ", axq_qnum);
+       PRX("axq-depth:       ", axq_depth);
+       PRX("axq-ampdu_depth: ", axq_ampdu_depth);
+       PRX("axq-stopped      ", stopped);
+       PRX("tx-in-progress   ", axq_tx_inprogress);
+       PRX("pending-frames   ", pending_frames);
+       PRX("txq_headidx:     ", txq_headidx);
+       PRX("txq_tailidx:     ", txq_headidx);
+
+       PRQLE("axq_q empty:       ", axq_q);
+       PRQLE("axq_acq empty:     ", axq_acq);
+       PRQLE("txq_fifo_pending:  ", txq_fifo_pending);
+       for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
+               snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
+               PRQLE(tmp, txq_fifo[i]);
+       }
+
+       /* Print out more detailed queue-info */
+       for (i = 0; i <= WME_AC_BK; i++) {
+               struct ath_txq *txq = &(sc->tx.txq[i]);
+               struct ath_atx_ac *ac;
+               struct ath_atx_tid *tid;
+               if (len >= size)
+                       goto done;
+               spin_lock_bh(&txq->axq_lock);
+               if (!list_empty(&txq->axq_acq)) {
+                       ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
+                                             list);
+                       len += snprintf(buf + len, size - len,
+                                       "txq[%i] first-ac: %p sched: %i\n",
+                                       i, ac, ac->sched);
+                       if (list_empty(&ac->tid_q) || (len >= size))
+                               goto done_for;
+                       tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+                                              list);
+                       len += snprintf(buf + len, size - len,
+                                       " first-tid: %p sched: %i paused: %i\n",
+                                       tid, tid->sched, tid->paused);
+               }
+       done_for:
+               spin_unlock_bh(&txq->axq_lock);
+       }
+
+done:
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static ssize_t read_file_stations(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char *buf;
+       unsigned int len = 0, size = 64000;
+       struct ath_node *an = NULL;
+       ssize_t retval = 0;
+       int q;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       len += snprintf(buf + len, size - len,
+                       "Stations:\n"
+                       " tid: addr sched paused buf_q-empty an ac\n"
+                       " ac: addr sched tid_q-empty txq\n");
+
+       spin_lock(&sc->nodes_lock);
+       list_for_each_entry(an, &sc->nodes, list) {
+               len += snprintf(buf + len, size - len,
+                               "%pM\n", an->sta->addr);
+               if (len >= size)
+                       goto done;
+
+               for (q = 0; q < WME_NUM_TID; q++) {
+                       struct ath_atx_tid *tid = &(an->tid[q]);
+                       len += snprintf(buf + len, size - len,
+                                       " tid: %p %s %s %i %p %p\n",
+                                       tid, tid->sched ? "sched" : "idle",
+                                       tid->paused ? "paused" : "running",
+                                       list_empty(&tid->buf_q),
+                                       tid->an, tid->ac);
+                       if (len >= size)
+                               goto done;
+               }
+
+               for (q = 0; q < WME_NUM_AC; q++) {
+                       struct ath_atx_ac *ac = &(an->ac[q]);
+                       len += snprintf(buf + len, size - len,
+                                       " ac: %p %s %i %p\n",
+                                       ac, ac->sched ? "sched" : "idle",
+                                       list_empty(&ac->tid_q), ac->txq);
+                       if (len >= size)
+                               goto done;
+               }
+       }
+
+done:
+       spin_unlock(&sc->nodes_lock);
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static ssize_t read_file_misc(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_hw *ah = sc->sc_ah;
+       struct ieee80211_hw *hw = sc->hw;
+       char *buf;
+       unsigned int len = 0, size = 8000;
+       ssize_t retval = 0;
+       const char *tmp;
+       unsigned int reg;
+       struct ath9k_vif_iter_data iter_data;
+
+       ath9k_calculate_iter_data(hw, NULL, &iter_data);
+       
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       switch (sc->sc_ah->opmode) {
+       case  NL80211_IFTYPE_ADHOC:
+               tmp = "ADHOC";
+               break;
+       case  NL80211_IFTYPE_MESH_POINT:
+               tmp = "MESH";
+               break;
+       case  NL80211_IFTYPE_AP:
+               tmp = "AP";
+               break;
+       case  NL80211_IFTYPE_STATION:
+               tmp = "STATION";
+               break;
+       default:
+               tmp = "???";
+               break;
+       }
+
+       len += snprintf(buf + len, size - len,
+                       "curbssid: %pM\n"
+                       "OP-Mode: %s(%i)\n"
+                       "Beacon-Timer-Register: 0x%x\n",
+                       common->curbssid,
+                       tmp, (int)(sc->sc_ah->opmode),
+                       REG_READ(ah, AR_BEACON_PERIOD));
+
+       reg = REG_READ(ah, AR_TIMER_MODE);
+       len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
+                       reg);
+       if (reg & AR_TBTT_TIMER_EN)
+               len += snprintf(buf + len, size - len, "TBTT ");
+       if (reg & AR_DBA_TIMER_EN)
+               len += snprintf(buf + len, size - len, "DBA ");
+       if (reg & AR_SWBA_TIMER_EN)
+               len += snprintf(buf + len, size - len, "SWBA ");
+       if (reg & AR_HCF_TIMER_EN)
+               len += snprintf(buf + len, size - len, "HCF ");
+       if (reg & AR_TIM_TIMER_EN)
+               len += snprintf(buf + len, size - len, "TIM ");
+       if (reg & AR_DTIM_TIMER_EN)
+               len += snprintf(buf + len, size - len, "DTIM ");
+       len += snprintf(buf + len, size - len, ")\n");
+
+       reg = sc->sc_ah->imask;
+       len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
+       if (reg & ATH9K_INT_SWBA)
+               len += snprintf(buf + len, size - len, "SWBA ");
+       if (reg & ATH9K_INT_BMISS)
+               len += snprintf(buf + len, size - len, "BMISS ");
+       if (reg & ATH9K_INT_CST)
+               len += snprintf(buf + len, size - len, "CST ");
+       if (reg & ATH9K_INT_RX)
+               len += snprintf(buf + len, size - len, "RX ");
+       if (reg & ATH9K_INT_RXHP)
+               len += snprintf(buf + len, size - len, "RXHP ");
+       if (reg & ATH9K_INT_RXLP)
+               len += snprintf(buf + len, size - len, "RXLP ");
+       if (reg & ATH9K_INT_BB_WATCHDOG)
+               len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
+       /* there are other IRQs if one wanted to add them. */
+       len += snprintf(buf + len, size - len, ")\n");
+
+       len += snprintf(buf + len, size - len,
+                       "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
+                       " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
+                       iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+                       iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
+                       sc->nvifs, sc->nbcnvifs);
+
+       len += snprintf(buf + len, size - len,
+                       "Calculated-BSSID-Mask: %pM\n",
+                       iter_data.mask);
 
        if (len > size)
                len = size;
@@ -629,9 +771,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
 }
 
 void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
-                      struct ath_tx_status *ts)
+                      struct ath_tx_status *ts, struct ath_txq *txq)
 {
-       int qnum = skb_get_queue_mapping(bf->bf_mpdu);
+       int qnum = txq->axq_qnum;
 
        TX_STAT_INC(qnum, tx_pkts_all);
        sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
@@ -666,6 +808,20 @@ static const struct file_operations fops_xmit = {
        .llseek = default_llseek,
 };
 
+static const struct file_operations fops_stations = {
+       .read = read_file_stations,
+       .open = ath9k_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static const struct file_operations fops_misc = {
+       .read = read_file_misc,
+       .open = ath9k_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static ssize_t read_file_recv(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
@@ -903,6 +1059,14 @@ int ath9k_init_debug(struct ath_hw *ah)
                        sc, &fops_xmit))
                goto err;
 
+       if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
+                       sc, &fops_stations))
+               goto err;
+
+       if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
+                       sc, &fops_misc))
+               goto err;
+
        if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
                        sc, &fops_recv))
                goto err;
index 1e5078bd03444481b1e64794d9d7944398b91d80..59338de0ce19bdaef71d4a0a4c5b1a7594628eb6 100644 (file)
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
  * @queued: Total MPDUs (non-aggr) queued
  * @completed: Total MPDUs (non-aggr) completed
  * @a_aggr: Total no. of aggregates queued
- * @a_queued: Total AMPDUs queued
+ * @a_queued_hw: Total AMPDUs queued to hardware
+ * @a_queued_sw: Total AMPDUs queued to software queues
  * @a_completed: Total AMPDUs completed
  * @a_retries: No. of AMPDUs retried (SW)
  * @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
  * @desc_cfg_err: Descriptor configuration errors
  * @data_urn: TX data underrun errors
  * @delim_urn: TX delimiter underrun errors
+ * @puttxbuf: Number of times hardware was given txbuf to write.
+ * @txstart:  Number of times hardware was told to start tx.
+ * @txprocdesc:  Number of times tx descriptor was processed
  */
 struct ath_tx_stats {
        u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
        u32 queued;
        u32 completed;
        u32 a_aggr;
-       u32 a_queued;
+       u32 a_queued_hw;
+       u32 a_queued_sw;
        u32 a_completed;
        u32 a_retries;
        u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
        u32 desc_cfg_err;
        u32 data_underrun;
        u32 delim_underrun;
+       u32 puttxbuf;
+       u32 txstart;
+       u32 txprocdesc;
 };
 
 /**
@@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah);
 
 void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
 void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
-                      struct ath_tx_status *ts);
+                      struct ath_tx_status *ts, struct ath_txq *txq);
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
 
 #else
@@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
 
 static inline void ath_debug_stat_tx(struct ath_softc *sc,
                                     struct ath_buf *bf,
-                                    struct ath_tx_status *ts)
+                                    struct ath_tx_status *ts,
+                                    struct ath_txq *txq)
 {
 }
 
index d05163159572ab220a795257965d28f5ec2a73fe..8c18bed3a55890e3fbc9195bc37802aaab2a9015 100644 (file)
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
        return false;
 }
 
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+                                 int eep_start_loc, int size)
+{
+       int i = 0, j, addr;
+       u32 addrdata[8];
+       u32 data[8];
+
+       for (addr = 0; addr < size; addr++) {
+               addrdata[i] = AR5416_EEPROM_OFFSET +
+                       ((addr + eep_start_loc) << AR5416_EEPROM_S);
+               i++;
+               if (i == 8) {
+                       REG_READ_MULTI(ah, addrdata, data, i);
+
+                       for (j = 0; j < i; j++) {
+                               *eep_data = data[j];
+                               eep_data++;
+                       }
+                       i = 0;
+               }
+       }
+
+       if (i != 0) {
+               REG_READ_MULTI(ah, addrdata, data, i);
+
+               for (j = 0; j < i; j++) {
+                       *eep_data = data[j];
+                       eep_data++;
+               }
+       }
+}
+
 bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
 {
        return common->bus_ops->eeprom_read(common, off, data);
index 58e2ddc927a9878309c17cc3fb021e7466319c4d..bd82447f5b780b26df88c138191610c34e2a582b 100644 (file)
@@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
 bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
                                    u16 *indexL, u16 *indexR);
 bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+                                 int eep_start_loc, int size);
 void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
                             u8 *pVpdList, u16 numIntercepts,
                             u8 *pRetVpdList);
index fbdff7e4795299277c5ae43b5901f3985956114f..bc77a308c901eaf8ccd4c111f2600a754d0774b9 100644 (file)
@@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
        return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
 }
 
-static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
-{
 #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+
+static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
        struct ath_common *common = ath9k_hw_common(ah);
        u16 *eep_data = (u16 *)&ah->eeprom.map4k;
-       int addr, eep_start_loc = 0;
-
-       eep_start_loc = 64;
-
-       if (!ath9k_hw_use_flash(ah)) {
-               ath_dbg(common, ATH_DBG_EEPROM,
-                       "Reading from EEPROM, not flash\n");
-       }
+       int addr, eep_start_loc = 64;
 
        for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
                if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
@@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
        }
 
        return true;
-#undef SIZE_EEPROM_4K
 }
 
+static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
+{
+       u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+
+       ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
+
+       return true;
+}
+
+static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_hw_use_flash(ah)) {
+               ath_dbg(common, ATH_DBG_EEPROM,
+                       "Reading from EEPROM, not flash\n");
+       }
+
+       if (common->bus_ops->ath_bus_type == ATH_USB)
+               return __ath9k_hw_usb_4k_fill_eeprom(ah);
+       else
+               return __ath9k_hw_4k_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_4K
+
 static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
 {
 #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
index 9b6bc8a953bc17f982e74a4ba7b54c41700f7b3e..8cd8333cc0865f71556e163f2ae9c3e4c4c3333a 100644 (file)
@@ -17,7 +17,7 @@
 #include "hw.h"
 #include "ar9002_phy.h"
 
-#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
 
 static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
 {
@@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
        return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
 }
 
-static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
 {
        struct ar9287_eeprom *eep = &ah->eeprom.map9287;
        struct ath_common *common = ath9k_hw_common(ah);
        u16 *eep_data;
-       int addr, eep_start_loc;
+       int addr, eep_start_loc = AR9287_EEP_START_LOC;
        eep_data = (u16 *)eep;
 
-       if (common->bus_ops->ath_bus_type == ATH_USB)
-               eep_start_loc = AR9287_HTC_EEP_START_LOC;
-       else
-               eep_start_loc = AR9287_EEP_START_LOC;
-
-       if (!ath9k_hw_use_flash(ah)) {
-               ath_dbg(common, ATH_DBG_EEPROM,
-                       "Reading from EEPROM, not flash\n");
-       }
-
-       for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+       for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
                if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
                                         eep_data)) {
                        ath_dbg(common, ATH_DBG_EEPROM,
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
        return true;
 }
 
+static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+       u16 *eep_data = (u16 *)&ah->eeprom.map9287;
+
+       ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+                                    AR9287_HTC_EEP_START_LOC,
+                                    SIZE_EEPROM_AR9287);
+       return true;
+}
+
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_hw_use_flash(ah)) {
+               ath_dbg(common, ATH_DBG_EEPROM,
+                       "Reading from EEPROM, not flash\n");
+       }
+
+       if (common->bus_ops->ath_bus_type == ATH_USB)
+               return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
+       else
+               return __ath9k_hw_ar9287_fill_eeprom(ah);
+}
+
 static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
 {
        u32 sum = 0, el, integer;
@@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
                                need_swap = true;
                                eepdata = (u16 *)(&ah->eeprom);
 
-                               for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+                               for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
                                        temp = swab16(*eepdata);
                                        *eepdata = temp;
                                        eepdata++;
index 749a93608664916f4c737bb15fa1d58fa03e204a..fccd87df7300b27a91ed896325af85a134e8a9b0 100644 (file)
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
        return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
 }
 
-static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
-{
 #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+
+static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
        struct ath_common *common = ath9k_hw_common(ah);
        u16 *eep_data = (u16 *)&ah->eeprom.def;
        int addr, ar5416_eep_start_loc = 0x100;
@@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
                eep_data++;
        }
        return true;
-#undef SIZE_EEPROM_DEF
 }
 
+static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
+{
+       u16 *eep_data = (u16 *)&ah->eeprom.def;
+
+       ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+                                    0x100, SIZE_EEPROM_DEF);
+       return true;
+}
+
+static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!ath9k_hw_use_flash(ah)) {
+               ath_dbg(common, ATH_DBG_EEPROM,
+                       "Reading from EEPROM, not flash\n");
+       }
+
+       if (common->bus_ops->ath_bus_type == ATH_USB)
+               return __ath9k_hw_usb_def_fill_eeprom(ah);
+       else
+               return __ath9k_hw_def_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_DEF
+
 static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 {
        struct ar5416_eeprom_def *eep =
@@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
        }
 
        /* Enable fixup for AR_AN_TOP2 if necessary */
-       if (AR_SREV_9280_20_OR_LATER(ah) &&
-           (eep->baseEepHeader.version & 0xff) > 0x0a &&
-           eep->baseEepHeader.pwdclkind == 0)
+       if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
+           ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+           (eep->baseEepHeader.pwdclkind == 0))
                ah->need_an_top2_fixup = 1;
 
        if ((common->bus_ops->ath_bus_type == ATH_USB) &&
index 1337640692463428c6599acfd2b0556fa06a7ff9..0fb8f8ac275aceeaac60bc595cc0657b6e8ce8d9 100644 (file)
 /*      LED functions          */
 /********************************/
 
-static void ath_led_blink_work(struct work_struct *work)
-{
-       struct ath_softc *sc = container_of(work, struct ath_softc,
-                                           ath_led_blink_work.work);
-
-       if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
-               return;
-
-       if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
-           (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
-               ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-       else
-               ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
-                                 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
-       ieee80211_queue_delayed_work(sc->hw,
-                                    &sc->ath_led_blink_work,
-                                    (sc->sc_flags & SC_OP_LED_ON) ?
-                                       msecs_to_jiffies(sc->led_off_duration) :
-                                       msecs_to_jiffies(sc->led_on_duration));
-
-       sc->led_on_duration = sc->led_on_cnt ?
-                       max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
-                       ATH_LED_ON_DURATION_IDLE;
-       sc->led_off_duration = sc->led_off_cnt ?
-                       max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
-                       ATH_LED_OFF_DURATION_IDLE;
-       sc->led_on_cnt = sc->led_off_cnt = 0;
-       if (sc->sc_flags & SC_OP_LED_ON)
-               sc->sc_flags &= ~SC_OP_LED_ON;
-       else
-               sc->sc_flags |= SC_OP_LED_ON;
-}
-
+#ifdef CONFIG_MAC80211_LEDS
 static void ath_led_brightness(struct led_classdev *led_cdev,
                               enum led_brightness brightness)
 {
-       struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
-       struct ath_softc *sc = led->sc;
-
-       switch (brightness) {
-       case LED_OFF:
-               if (led->led_type == ATH_LED_ASSOC ||
-                   led->led_type == ATH_LED_RADIO) {
-                       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
-                               (led->led_type == ATH_LED_RADIO));
-                       sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
-                       if (led->led_type == ATH_LED_RADIO)
-                               sc->sc_flags &= ~SC_OP_LED_ON;
-               } else {
-                       sc->led_off_cnt++;
-               }
-               break;
-       case LED_FULL:
-               if (led->led_type == ATH_LED_ASSOC) {
-                       sc->sc_flags |= SC_OP_LED_ASSOCIATED;
-                       if (led_blink)
-                               ieee80211_queue_delayed_work(sc->hw,
-                                                    &sc->ath_led_blink_work, 0);
-               } else if (led->led_type == ATH_LED_RADIO) {
-                       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-                       sc->sc_flags |= SC_OP_LED_ON;
-               } else {
-                       sc->led_on_cnt++;
-               }
-               break;
-       default:
-               break;
-       }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
-                           char *trigger)
-{
-       int ret;
-
-       led->sc = sc;
-       led->led_cdev.name = led->name;
-       led->led_cdev.default_trigger = trigger;
-       led->led_cdev.brightness_set = ath_led_brightness;
-
-       ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
-       if (ret)
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Failed to register led:%s", led->name);
-       else
-               led->registered = 1;
-       return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
-       if (led->registered) {
-               led_classdev_unregister(&led->led_cdev);
-               led->registered = 0;
-       }
+       struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
+       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
 }
 
 void ath_deinit_leds(struct ath_softc *sc)
 {
-       ath_unregister_led(&sc->assoc_led);
-       sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
-       ath_unregister_led(&sc->tx_led);
-       ath_unregister_led(&sc->rx_led);
-       ath_unregister_led(&sc->radio_led);
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+       if (!sc->led_registered)
+               return;
+
+       ath_led_brightness(&sc->led_cdev, LED_OFF);
+       led_classdev_unregister(&sc->led_cdev);
 }
 
 void ath_init_leds(struct ath_softc *sc)
 {
-       char *trigger;
        int ret;
 
        if (AR_SREV_9287(sc->sc_ah))
                sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+       else if (AR_SREV_9485(sc->sc_ah))
+               sc->sc_ah->led_pin = ATH_LED_PIN_9485;
        else
                sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
 
@@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc)
        /* LED off, active low */
        ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
-       if (led_blink)
-               INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
-       trigger = ieee80211_get_radio_led_name(sc->hw);
-       snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
-               "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->radio_led, trigger);
-       sc->radio_led.led_type = ATH_LED_RADIO;
-       if (ret)
-               goto fail;
-
-       trigger = ieee80211_get_assoc_led_name(sc->hw);
-       snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
-               "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->assoc_led, trigger);
-       sc->assoc_led.led_type = ATH_LED_ASSOC;
-       if (ret)
-               goto fail;
-
-       trigger = ieee80211_get_tx_led_name(sc->hw);
-       snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
-               "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->tx_led, trigger);
-       sc->tx_led.led_type = ATH_LED_TX;
-       if (ret)
-               goto fail;
-
-       trigger = ieee80211_get_rx_led_name(sc->hw);
-       snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
-               "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
-       ret = ath_register_led(sc, &sc->rx_led, trigger);
-       sc->rx_led.led_type = ATH_LED_RX;
-       if (ret)
-               goto fail;
-
-       return;
-
-fail:
-       if (led_blink)
-               cancel_delayed_work_sync(&sc->ath_led_blink_work);
-       ath_deinit_leds(sc);
+       if (!led_blink)
+               sc->led_cdev.default_trigger =
+                       ieee80211_get_radio_led_name(sc->hw);
+
+       snprintf(sc->led_name, sizeof(sc->led_name),
+               "ath9k-%s", wiphy_name(sc->hw->wiphy));
+       sc->led_cdev.name = sc->led_name;
+       sc->led_cdev.brightness_set = ath_led_brightness;
+
+       ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
+       if (ret < 0)
+               return;
+
+       sc->led_registered = true;
 }
+#endif
 
 /*******************/
 /*     Rfkill     */
@@ -201,8 +85,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
 
 void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        bool blocked = !!ath_is_rfkill_set(sc);
 
        wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
index 07b1633b7f3ffe3beb4a41af65e8947eb1e9b30a..f1b8af64569cbdeea7d80316d071aea329eab494 100644 (file)
@@ -52,6 +52,9 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
        { USB_DEVICE(0x083A, 0xA704),
          .driver_info = AR9280_USB },  /* SMC Networks */
 
+       { USB_DEVICE(0x0cf3, 0x20ff),
+         .driver_info = STORAGE_DEVICE },
+
        { },
 };
 
@@ -914,13 +917,11 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
        if (ret) {
                dev_err(&hif_dev->udev->dev,
                        "ath9k_htc: Unable to allocate URBs\n");
-               goto err_urb;
+               goto err_fw_download;
        }
 
        return 0;
 
-err_urb:
-       ath9k_hif_usb_dealloc_urbs(hif_dev);
 err_fw_download:
        release_firmware(hif_dev->firmware);
 err_fw_req:
@@ -935,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
                release_firmware(hif_dev->firmware);
 }
 
+/*
+ * An exact copy of the function from zd1211rw.
+ */
+static int send_eject_command(struct usb_interface *interface)
+{
+       struct usb_device *udev = interface_to_usbdev(interface);
+       struct usb_host_interface *iface_desc = &interface->altsetting[0];
+       struct usb_endpoint_descriptor *endpoint;
+       unsigned char *cmd;
+       u8 bulk_out_ep;
+       int r;
+
+       /* Find bulk out endpoint */
+       for (r = 1; r >= 0; r--) {
+               endpoint = &iface_desc->endpoint[r].desc;
+               if (usb_endpoint_dir_out(endpoint) &&
+                   usb_endpoint_xfer_bulk(endpoint)) {
+                       bulk_out_ep = endpoint->bEndpointAddress;
+                       break;
+               }
+       }
+       if (r == -1) {
+               dev_err(&udev->dev,
+                       "ath9k_htc: Could not find bulk out endpoint\n");
+               return -ENODEV;
+       }
+
+       cmd = kzalloc(31, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENODEV;
+
+       /* USB bulk command block */
+       cmd[0] = 0x55;  /* bulk command signature */
+       cmd[1] = 0x53;  /* bulk command signature */
+       cmd[2] = 0x42;  /* bulk command signature */
+       cmd[3] = 0x43;  /* bulk command signature */
+       cmd[14] = 6;    /* command length */
+
+       cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
+       cmd[19] = 0x2;  /* eject disc */
+
+       dev_info(&udev->dev, "Ejecting storage device...\n");
+       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
+               cmd, 31, NULL, 2000);
+       kfree(cmd);
+       if (r)
+               return r;
+
+       /* At this point, the device disconnects and reconnects with the real
+        * ID numbers. */
+
+       usb_set_intfdata(interface, NULL);
+       return 0;
+}
+
 static int ath9k_hif_usb_probe(struct usb_interface *interface,
                               const struct usb_device_id *id)
 {
@@ -942,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
        struct hif_device_usb *hif_dev;
        int ret = 0;
 
+       if (id->driver_info == STORAGE_DEVICE)
+               return send_eject_command(interface);
+
        hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
        if (!hif_dev) {
                ret = -ENOMEM;
@@ -1028,12 +1087,13 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
        struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
        bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
 
-       if (hif_dev) {
-               ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
-               ath9k_htc_hw_free(hif_dev->htc_handle);
-               ath9k_hif_usb_dev_deinit(hif_dev);
-               usb_set_intfdata(interface, NULL);
-       }
+       if (!hif_dev)
+               return;
+
+       ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+       ath9k_htc_hw_free(hif_dev->htc_handle);
+       ath9k_hif_usb_dev_deinit(hif_dev);
+       usb_set_intfdata(interface, NULL);
 
        if (!unplugged && (hif_dev->flags & HIF_USB_START))
                ath9k_hif_usb_reboot(udev);
index 780ac5eac501d401470e6fdb99d14c5f085f4cc7..753a245c5ad117201c89e74799110c6e4fe88b2c 100644 (file)
@@ -32,6 +32,7 @@
 #include "wmi.h"
 
 #define ATH_STA_SHORT_CALINTERVAL 1000    /* 1 second */
+#define ATH_AP_SHORT_CALINTERVAL  100     /* 100 ms */
 #define ATH_ANI_POLLINTERVAL      100     /* 100 ms */
 #define ATH_LONG_CALINTERVAL      30000   /* 30 seconds */
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
@@ -204,8 +205,50 @@ struct ath9k_htc_target_stats {
        __be32 ht_tx_xretries;
 } __packed;
 
+#define ATH9K_HTC_MAX_VIF 2
+#define ATH9K_HTC_MAX_BCN_VIF 2
+
+#define INC_VIF(_priv, _type) do {             \
+               switch (_type) {                \
+               case NL80211_IFTYPE_STATION:    \
+                       _priv->num_sta_vif++;   \
+                       break;                  \
+               case NL80211_IFTYPE_ADHOC:      \
+                       _priv->num_ibss_vif++;  \
+                       break;                  \
+               case NL80211_IFTYPE_AP:         \
+                       _priv->num_ap_vif++;    \
+                       break;                  \
+               default:                        \
+                       break;                  \
+               }                               \
+       } while (0)
+
+#define DEC_VIF(_priv, _type) do {             \
+               switch (_type) {                \
+               case NL80211_IFTYPE_STATION:    \
+                       _priv->num_sta_vif--;   \
+                       break;                  \
+               case NL80211_IFTYPE_ADHOC:      \
+                       _priv->num_ibss_vif--;  \
+                       break;                  \
+               case NL80211_IFTYPE_AP:         \
+                       _priv->num_ap_vif--;    \
+                       break;                  \
+               default:                        \
+                       break;                  \
+               }                               \
+       } while (0)
+
 struct ath9k_htc_vif {
        u8 index;
+       u16 seq_no;
+       bool beacon_configured;
+};
+
+struct ath9k_vif_iter_data {
+       const u8 *hw_macaddr;
+       u8 mask[ETH_ALEN];
 };
 
 #define ATH9K_HTC_MAX_STA 8
@@ -310,10 +353,8 @@ struct ath_led {
 
 struct htc_beacon_config {
        u16 beacon_interval;
-       u16 listen_interval;
        u16 dtim_period;
        u16 bmiss_timeout;
-       u8 dtim_count;
 };
 
 struct ath_btcoex {
@@ -333,13 +374,12 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
 #define OP_SCANNING               BIT(1)
 #define OP_LED_ASSOCIATED         BIT(2)
 #define OP_LED_ON                 BIT(3)
-#define OP_PREAMBLE_SHORT         BIT(4)
-#define OP_PROTECT_ENABLE         BIT(5)
-#define OP_ASSOCIATED             BIT(6)
-#define OP_ENABLE_BEACON          BIT(7)
-#define OP_LED_DEINIT             BIT(8)
-#define OP_BT_PRIORITY_DETECTED    BIT(9)
-#define OP_BT_SCAN                 BIT(10)
+#define OP_ENABLE_BEACON          BIT(4)
+#define OP_LED_DEINIT             BIT(5)
+#define OP_BT_PRIORITY_DETECTED    BIT(6)
+#define OP_BT_SCAN                 BIT(7)
+#define OP_ANI_RUNNING             BIT(8)
+#define OP_TSF_RESET               BIT(9)
 
 struct ath9k_htc_priv {
        struct device *dev;
@@ -358,15 +398,24 @@ struct ath9k_htc_priv {
        enum htc_endpoint_id data_vi_ep;
        enum htc_endpoint_id data_vo_ep;
 
+       u8 vif_slot;
+       u8 mon_vif_idx;
+       u8 sta_slot;
+       u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
+       u8 num_ibss_vif;
+       u8 num_sta_vif;
+       u8 num_ap_vif;
+
        u16 op_flags;
        u16 curtxpow;
        u16 txpowlimit;
        u16 nvifs;
        u16 nstations;
-       u16 seq_no;
        u32 bmiss_cnt;
+       bool rearm_ani;
+       bool reconfig_beacon;
 
-       struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS];
+       struct ath9k_hw_cal_data caldata;
 
        spinlock_t beacon_lock;
 
@@ -382,7 +431,7 @@ struct ath9k_htc_priv {
        struct ath9k_htc_rx rx;
        struct tasklet_struct tx_tasklet;
        struct sk_buff_head tx_queue;
-       struct delayed_work ath9k_ani_work;
+       struct delayed_work ani_work;
        struct work_struct ps_work;
        struct work_struct fatal_work;
 
@@ -424,6 +473,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv);
 void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
 void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
                             struct ieee80211_vif *vif);
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
 void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
 
 void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
@@ -436,8 +486,9 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
 int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
 void ath9k_htc_station_work(struct work_struct *work);
 void ath9k_htc_aggr_work(struct work_struct *work);
-void ath9k_ani_work(struct work_struct *work);;
-void ath_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_ani_work(struct work_struct *work);
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
 
 int ath9k_tx_init(struct ath9k_htc_priv *priv);
 void ath9k_tx_tasklet(unsigned long data);
@@ -460,7 +511,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
 void ath9k_ps_work(struct work_struct *work);
 bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
                        enum ath9k_power_mode mode);
-void ath_update_txpow(struct ath9k_htc_priv *priv);
 
 void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
 void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
index 87cc65a78a3f1509bd2032200e82495e6858251b..8d1d8792436d201fd103e3e8d7343b4e82406e58 100644 (file)
@@ -123,8 +123,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
        /* TSF out of range threshold fixed at 1 second */
        bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
 
-       ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
-       ath_dbg(common, ATH_DBG_BEACON,
+       ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+               intval, tsf, tsftu);
+       ath_dbg(common, ATH_DBG_CONFIG,
                "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
                bs.bs_bmissthreshold, bs.bs_sleepduration,
                bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
        WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
 }
 
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+                                      struct htc_beacon_config *bss_conf)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       enum ath9k_int imask = 0;
+       u32 nexttbtt, intval, tsftu;
+       __be32 htc_imask = 0;
+       int ret;
+       u8 cmd_rsp;
+       u64 tsf;
+
+       intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+       intval /= ATH9K_HTC_MAX_BCN_VIF;
+       nexttbtt = intval;
+
+       if (priv->op_flags & OP_TSF_RESET) {
+               intval |= ATH9K_BEACON_RESET_TSF;
+               priv->op_flags &= ~OP_TSF_RESET;
+       } else {
+               /*
+                * Pull nexttbtt forward to reflect the current TSF.
+                */
+               tsf = ath9k_hw_gettsf64(priv->ah);
+               tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+               do {
+                       nexttbtt += intval;
+               } while (nexttbtt < tsftu);
+       }
+
+       intval |= ATH9K_BEACON_ENA;
+
+       if (priv->op_flags & OP_ENABLE_BEACON)
+               imask |= ATH9K_INT_SWBA;
+
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n",
+               bss_conf->beacon_interval, nexttbtt, imask);
+
+       WMI_CMD(WMI_DISABLE_INTR_CMDID);
+       ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+       priv->bmiss_cnt = 0;
+       htc_imask = cpu_to_be32(imask);
+       WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
 static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
                                          struct htc_beacon_config *bss_conf)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        enum ath9k_int imask = 0;
-       u32 nexttbtt, intval;
+       u32 nexttbtt, intval, tsftu;
        __be32 htc_imask = 0;
        int ret;
        u8 cmd_rsp;
+       u64 tsf;
 
        intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
        nexttbtt = intval;
+
+       /*
+        * Pull nexttbtt forward to reflect the current TSF.
+        */
+       tsf = ath9k_hw_gettsf64(priv->ah);
+       tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+       do {
+               nexttbtt += intval;
+       } while (nexttbtt < tsftu);
+
        intval |= ATH9K_BEACON_ENA;
        if (priv->op_flags & OP_ENABLE_BEACON)
                imask |= ATH9K_INT_SWBA;
 
-       ath_dbg(common, ATH_DBG_BEACON,
-               "IBSS Beacon config, intval: %d, imask: 0x%x\n",
-               bss_conf->beacon_interval, imask);
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n",
+               bss_conf->beacon_interval, nexttbtt, imask);
 
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
        ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
        if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
                struct ieee80211_hdr *hdr =
                        (struct ieee80211_hdr *) beacon->data;
-               priv->seq_no += 0x10;
+               avp->seq_no += 0x10;
                hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-               hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+               hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
        }
 
        tx_ctl.type = ATH9K_HTC_NORMAL;
@@ -253,30 +310,123 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
        }
 }
 
+static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *beacon_configured = (bool *)data;
+       struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           avp->beacon_configured)
+               *beacon_configured = true;
+}
+
+static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
+                                         struct ieee80211_vif *vif)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       bool beacon_configured;
+
+       /*
+        * Changing the beacon interval when multiple AP interfaces
+        * are configured will affect beacon transmission of all
+        * of them.
+        */
+       if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+           (priv->num_ap_vif > 1) &&
+           (vif->type == NL80211_IFTYPE_AP) &&
+           (cur_conf->beacon_interval != bss_conf->beacon_int)) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Changing beacon interval of multiple AP interfaces !\n");
+               return false;
+       }
+
+       /*
+        * If the HW is operating in AP mode, any new station interfaces that
+        * are added cannot change the beacon parameters.
+        */
+       if (priv->num_ap_vif &&
+           (vif->type != NL80211_IFTYPE_AP)) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "HW in AP mode, cannot set STA beacon parameters\n");
+               return false;
+       }
+
+       /*
+        * The beacon parameters are configured only for the first
+        * station interface.
+        */
+       if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+           (priv->num_sta_vif > 1) &&
+           (vif->type == NL80211_IFTYPE_STATION)) {
+               beacon_configured = false;
+               ieee80211_iterate_active_interfaces_atomic(priv->hw,
+                                                          ath9k_htc_beacon_iter,
+                                                          &beacon_configured);
+
+               if (beacon_configured) {
+                       ath_dbg(common, ATH_DBG_CONFIG,
+                               "Beacon already configured for a station interface\n");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
                             struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+       if (!ath9k_htc_check_beacon_config(priv, vif))
+               return;
 
        cur_conf->beacon_interval = bss_conf->beacon_int;
        if (cur_conf->beacon_interval == 0)
                cur_conf->beacon_interval = 100;
 
        cur_conf->dtim_period = bss_conf->dtim_period;
-       cur_conf->listen_interval = 1;
-       cur_conf->dtim_count = 1;
        cur_conf->bmiss_timeout =
                ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
 
        switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               ath9k_htc_beacon_config_sta(priv, cur_conf);
+               avp->beacon_configured = true;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+               break;
+       case NL80211_IFTYPE_AP:
+               ath9k_htc_beacon_config_ap(priv, cur_conf);
+               break;
+       default:
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Unsupported beaconing mode\n");
+               return;
+       }
+}
+
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+
+       switch (priv->ah->opmode) {
        case NL80211_IFTYPE_STATION:
                ath9k_htc_beacon_config_sta(priv, cur_conf);
                break;
        case NL80211_IFTYPE_ADHOC:
                ath9k_htc_beacon_config_adhoc(priv, cur_conf);
                break;
+       case NL80211_IFTYPE_AP:
+               ath9k_htc_beacon_config_ap(priv, cur_conf);
+               break;
        default:
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Unsupported beaconing mode\n");
index fe70f67aa088d12c6cf75e58889d49d903272e28..7e630a81b4536e9f1786f6126577978b999a3d30 100644 (file)
@@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
                        ret, ah->curchan->channel);
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        /* Start RX */
        WMI_CMD(WMI_START_RECV_CMDID);
index 0352f0994caa16f8480fecee5b52d5816b5f56ce..fc67c937e172f6200ef366dfa8b1d0dbfaa7a4d8 100644 (file)
@@ -294,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
        return be32_to_cpu(val);
 }
 
+static void ath9k_multi_regread(void *hw_priv, u32 *addr,
+                               u32 *val, u16 count)
+{
+       struct ath_hw *ah = (struct ath_hw *) hw_priv;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       __be32 tmpaddr[8];
+       __be32 tmpval[8];
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+              tmpaddr[i] = cpu_to_be32(addr[i]);
+       }
+
+       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+                          (u8 *)tmpaddr , sizeof(u32) * count,
+                          (u8 *)tmpval, sizeof(u32) * count,
+                          100);
+       if (unlikely(ret)) {
+               ath_dbg(common, ATH_DBG_WMI,
+                       "Multiple REGISTER READ FAILED (count: %d)\n", count);
+       }
+
+       for (i = 0; i < count; i++) {
+              val[i] = be32_to_cpu(tmpval[i]);
+       }
+}
+
 static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
 {
        struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -404,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
 
 static const struct ath_ops ath9k_common_ops = {
        .read = ath9k_regread,
+       .multi_read = ath9k_multi_regread,
        .write = ath9k_regwrite,
        .enable_write_buffer = ath9k_enable_regwrite_buffer,
        .write_flush = ath9k_regwrite_flush,
@@ -650,7 +679,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
                     (unsigned long)priv);
        tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
                     (unsigned long)priv);
-       INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+       INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
        INIT_WORK(&priv->ps_work, ath9k_ps_work);
        INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
 
@@ -758,6 +787,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
        struct ath_hw *ah;
        int error = 0;
        struct ath_regulatory *reg;
+       char hw_name[64];
 
        /* Bring up device */
        error = ath9k_init_priv(priv, devid, product, drv_info);
@@ -798,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
                        goto err_world;
        }
 
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
+               "BE:%d, BK:%d, VI:%d, VO:%d\n",
+               priv->wmi_cmd_ep,
+               priv->beacon_ep,
+               priv->cab_ep,
+               priv->uapsd_ep,
+               priv->mgmt_ep,
+               priv->data_be_ep,
+               priv->data_bk_ep,
+               priv->data_vi_ep,
+               priv->data_vo_ep);
+
+       ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
+       wiphy_info(hw->wiphy, "%s\n", hw_name);
+
        ath9k_init_leds(priv);
        ath9k_start_rfkill_poll(priv);
 
index 6bb59958f71e471c58466bb8d1f3c43b8bc0d0d7..db8c0c044e9e8498a97dc4788906624b7087c531 100644 (file)
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
 /* Utilities */
 /*************/
 
-void ath_update_txpow(struct ath9k_htc_priv *priv)
-{
-       struct ath_hw *ah = priv->ah;
-
-       if (priv->curtxpow != priv->txpowlimit) {
-               ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
-               /* read back in case value is clamped */
-               priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
-       }
-}
-
 /* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
 static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
                                              struct ath9k_channel *ichan)
@@ -116,12 +105,88 @@ void ath9k_ps_work(struct work_struct *work)
        ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
 }
 
+static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath9k_htc_priv *priv = data;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+       if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
+               priv->reconfig_beacon = true;
+
+       if (bss_conf->assoc) {
+               priv->rearm_ani = true;
+               priv->reconfig_beacon = true;
+       }
+}
+
+static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
+{
+       priv->rearm_ani = false;
+       priv->reconfig_beacon = false;
+
+       ieee80211_iterate_active_interfaces_atomic(priv->hw,
+                                                  ath9k_htc_vif_iter, priv);
+       if (priv->rearm_ani)
+               ath9k_htc_start_ani(priv);
+
+       if (priv->reconfig_beacon) {
+               ath9k_htc_ps_wakeup(priv);
+               ath9k_htc_beacon_reconfig(priv);
+               ath9k_htc_ps_restore(priv);
+       }
+}
+
+static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath9k_vif_iter_data *iter_data = data;
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+}
+
+static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+                                    struct ieee80211_vif *vif)
+{
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_vif_iter_data iter_data;
+
+       /*
+        * Use the hardware MAC address as reference, the hardware uses it
+        * together with the BSSID mask when matching addresses.
+        */
+       iter_data.hw_macaddr = common->macaddr;
+       memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+       if (vif)
+               ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
+
+       /* Get list of all active MAC addresses */
+       ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
+                                                  &iter_data);
+
+       memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+       ath_hw_setbssidmask(common);
+}
+
+static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
+{
+       if (priv->num_ibss_vif)
+               priv->ah->opmode = NL80211_IFTYPE_ADHOC;
+       else if (priv->num_ap_vif)
+               priv->ah->opmode = NL80211_IFTYPE_AP;
+       else
+               priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+       ath9k_hw_setopmode(priv->ah);
+}
+
 void ath9k_htc_reset(struct ath9k_htc_priv *priv)
 {
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_channel *channel = priv->hw->conf.channel;
-       struct ath9k_hw_cal_data *caldata;
+       struct ath9k_hw_cal_data *caldata = NULL;
        enum htc_phymode mode;
        __be16 htc_mode;
        u8 cmd_rsp;
@@ -130,16 +195,14 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
 
-       if (priv->op_flags & OP_ASSOCIATED)
-               cancel_delayed_work_sync(&priv->ath9k_ani_work);
-
+       ath9k_htc_stop_ani(priv);
        ieee80211_stop_queues(priv->hw);
        htc_stop(priv->htc);
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
        WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
        WMI_CMD(WMI_STOP_RECV_CMDID);
 
-       caldata = &priv->caldata[channel->hw_value];
+       caldata = &priv->caldata;
        ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
        if (ret) {
                ath_err(common,
@@ -147,7 +210,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
                        channel->center_freq, ret);
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        WMI_CMD(WMI_START_RECV_CMDID);
        ath9k_host_rx_init(priv);
@@ -158,12 +222,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
 
        WMI_CMD(WMI_ENABLE_INTR_CMDID);
        htc_start(priv->htc);
-
-       if (priv->op_flags & OP_ASSOCIATED) {
-               ath9k_htc_beacon_config(priv, priv->vif);
-               ath_start_ani(priv);
-       }
-
+       ath9k_htc_vif_reconfig(priv);
        ieee80211_wake_queues(priv->hw);
 
        ath9k_htc_ps_restore(priv);
@@ -179,7 +238,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
        struct ieee80211_conf *conf = &common->hw->conf;
        bool fastcc;
        struct ieee80211_channel *channel = hw->conf.channel;
-       struct ath9k_hw_cal_data *caldata;
+       struct ath9k_hw_cal_data *caldata = NULL;
        enum htc_phymode mode;
        __be16 htc_mode;
        u8 cmd_rsp;
@@ -202,7 +261,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
                fastcc);
 
-       caldata = &priv->caldata[channel->hw_value];
+       if (!fastcc)
+               caldata = &priv->caldata;
        ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (ret) {
                ath_err(common,
@@ -211,7 +271,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                goto err;
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        WMI_CMD(WMI_START_RECV_CMDID);
        if (ret)
@@ -230,11 +291,23 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                goto err;
 
        htc_start(priv->htc);
+
+       if (!(priv->op_flags & OP_SCANNING) &&
+           !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+               ath9k_htc_vif_reconfig(priv);
+
 err:
        ath9k_htc_ps_restore(priv);
        return ret;
 }
 
+/*
+ * Monitor mode handling is a tad complicated because the firmware requires
+ * an interface to be created exclusively, while mac80211 doesn't associate
+ * an interface with the mode.
+ *
+ * So, for now, only one monitor interface can be configured.
+ */
 static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -244,9 +317,10 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
 
        memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
        memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
-       hvif.index = 0; /* Should do for now */
+       hvif.index = priv->mon_vif_idx;
        WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
        priv->nvifs--;
+       priv->vif_slot &= ~(1 << priv->mon_vif_idx);
 }
 
 static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
@@ -254,70 +328,87 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct ath9k_htc_target_vif hvif;
        struct ath9k_htc_target_sta tsta;
-       int ret = 0;
+       int ret = 0, sta_idx;
        u8 cmd_rsp;
 
-       if (priv->nvifs > 0)
-               return -ENOBUFS;
+       if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
+           (priv->nstations >= ATH9K_HTC_MAX_STA)) {
+               ret = -ENOBUFS;
+               goto err_vif;
+       }
 
-       if (priv->nstations >= ATH9K_HTC_MAX_STA)
-               return -ENOBUFS;
+       sta_idx = ffz(priv->sta_slot);
+       if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
+               ret = -ENOBUFS;
+               goto err_vif;
+       }
 
        /*
         * Add an interface.
         */
-
        memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
        memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
 
        hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
-       priv->ah->opmode = NL80211_IFTYPE_MONITOR;
-       hvif.index = priv->nvifs;
+       hvif.index = ffz(priv->vif_slot);
 
        WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
        if (ret)
-               return ret;
+               goto err_vif;
+
+       /*
+        * Assign the monitor interface index as a special case here.
+        * This is needed when the interface is brought down.
+        */
+       priv->mon_vif_idx = hvif.index;
+       priv->vif_slot |= (1 << hvif.index);
+
+       /*
+        * Set the hardware mode to monitor only if there are no
+        * other interfaces.
+        */
+       if (!priv->nvifs)
+               priv->ah->opmode = NL80211_IFTYPE_MONITOR;
 
        priv->nvifs++;
 
        /*
         * Associate a station with the interface for packet injection.
         */
-
        memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
 
        memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
 
        tsta.is_vif_sta = 1;
-       tsta.sta_index = priv->nstations;
+       tsta.sta_index = sta_idx;
        tsta.vif_index = hvif.index;
        tsta.maxampdu = 0xffff;
 
        WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
        if (ret) {
                ath_err(common, "Unable to add station entry for monitor mode\n");
-               goto err_vif;
+               goto err_sta;
        }
 
+       priv->sta_slot |= (1 << sta_idx);
        priv->nstations++;
-
-       /*
-        * Set chainmask etc. on the target.
-        */
-       ret = ath9k_htc_update_cap_target(priv);
-       if (ret)
-               ath_dbg(common, ATH_DBG_CONFIG,
-                       "Failed to update capability in target\n");
-
+       priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
        priv->ah->is_monitoring = true;
 
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Attached a monitor interface at idx: %d, sta idx: %d\n",
+               priv->mon_vif_idx, sta_idx);
+
        return 0;
 
-err_vif:
+err_sta:
        /*
         * Remove the interface from the target.
         */
        __ath9k_htc_remove_monitor_interface(priv);
+err_vif:
+       ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+
        return ret;
 }
 
@@ -329,7 +420,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
 
        __ath9k_htc_remove_monitor_interface(priv);
 
-       sta_idx = 0; /* Only single interface, for now */
+       sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
 
        WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
        if (ret) {
@@ -337,9 +428,14 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
                return ret;
        }
 
+       priv->sta_slot &= ~(1 << sta_idx);
        priv->nstations--;
        priv->ah->is_monitoring = false;
 
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Removed a monitor interface at idx: %d, sta idx: %d\n",
+               priv->mon_vif_idx, sta_idx);
+
        return 0;
 }
 
@@ -351,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
        struct ath9k_htc_target_sta tsta;
        struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
        struct ath9k_htc_sta *ista;
-       int ret;
+       int ret, sta_idx;
        u8 cmd_rsp;
 
        if (priv->nstations >= ATH9K_HTC_MAX_STA)
                return -ENOBUFS;
 
+       sta_idx = ffz(priv->sta_slot);
+       if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
+               return -ENOBUFS;
+
        memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
 
        if (sta) {
@@ -366,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                tsta.associd = common->curaid;
                tsta.is_vif_sta = 0;
                tsta.valid = true;
-               ista->index = priv->nstations;
+               ista->index = sta_idx;
        } else {
                memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
                tsta.is_vif_sta = 1;
        }
 
-       tsta.sta_index = priv->nstations;
+       tsta.sta_index = sta_idx;
        tsta.vif_index = avp->index;
        tsta.maxampdu = 0xffff;
        if (sta && sta->ht_cap.ht_supported)
@@ -387,12 +487,21 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                return ret;
        }
 
-       if (sta)
+       if (sta) {
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Added a station entry for: %pM (idx: %d)\n",
                        sta->addr, tsta.sta_index);
+       } else {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Added a station entry for VIF %d (idx: %d)\n",
+                       avp->index, tsta.sta_index);
+       }
 
+       priv->sta_slot |= (1 << sta_idx);
        priv->nstations++;
+       if (!sta)
+               priv->vif_sta_pos[avp->index] = sta_idx;
+
        return 0;
 }
 
@@ -401,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
                                    struct ieee80211_sta *sta)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
        struct ath9k_htc_sta *ista;
        int ret;
        u8 cmd_rsp, sta_idx;
@@ -409,7 +519,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                sta_idx = ista->index;
        } else {
-               sta_idx = 0;
+               sta_idx = priv->vif_sta_pos[avp->index];
        }
 
        WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
@@ -421,12 +531,19 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
                return ret;
        }
 
-       if (sta)
+       if (sta) {
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Removed a station entry for: %pM (idx: %d)\n",
                        sta->addr, sta_idx);
+       } else {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Removed a station entry for VIF %d (idx: %d)\n",
+                       avp->index, sta_idx);
+       }
 
+       priv->sta_slot &= ~(1 << sta_idx);
        priv->nstations--;
+
        return 0;
 }
 
@@ -808,7 +925,7 @@ void ath9k_htc_debug_remove_root(void)
 /* ANI */
 /*******/
 
-void ath_start_ani(struct ath9k_htc_priv *priv)
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -817,15 +934,22 @@ void ath_start_ani(struct ath9k_htc_priv *priv)
        common->ani.shortcal_timer = timestamp;
        common->ani.checkani_timer = timestamp;
 
-       ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+       priv->op_flags |= OP_ANI_RUNNING;
+
+       ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
                                     msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
 }
 
-void ath9k_ani_work(struct work_struct *work)
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
+{
+       cancel_delayed_work_sync(&priv->ani_work);
+       priv->op_flags &= ~OP_ANI_RUNNING;
+}
+
+void ath9k_htc_ani_work(struct work_struct *work)
 {
        struct ath9k_htc_priv *priv =
-               container_of(work, struct ath9k_htc_priv,
-                            ath9k_ani_work.work);
+               container_of(work, struct ath9k_htc_priv, ani_work.work);
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
        bool longcal = false;
@@ -834,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work)
        unsigned int timestamp = jiffies_to_msecs(jiffies);
        u32 cal_interval, short_cal_interval;
 
-       short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+       short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+               ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
 
        /* Only calibrate if awake */
        if (ah->power_mode != ATH9K_PM_AWAKE)
@@ -903,7 +1028,7 @@ set_timer:
        if (!common->ani.caldone)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
 
-       ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+       ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
                                     msecs_to_jiffies(cal_interval));
 }
 
@@ -911,7 +1036,7 @@ set_timer:
 /* mac80211 Callbacks */
 /**********************/
 
-static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct ath9k_htc_priv *priv = hw->priv;
@@ -924,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        padsize = padpos & 3;
        if (padsize && skb->len > padpos) {
                if (skb_headroom(skb) < padsize)
-                       return -1;
+                       goto fail_tx;
                skb_push(skb, padsize);
                memmove(skb->data, skb->data + padsize, padpos);
        }
@@ -945,11 +1070,10 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto fail_tx;
        }
 
-       return 0;
+       return;
 
 fail_tx:
        dev_kfree_skb_any(skb);
-       return 0;
 }
 
 static int ath9k_htc_start(struct ieee80211_hw *hw)
@@ -987,7 +1111,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
                return ret;
        }
 
-       ath_update_txpow(priv);
+       ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+                              &priv->curtxpow);
 
        mode = ath9k_htc_get_curmode(priv, init_channel);
        htc_mode = cpu_to_be16(mode);
@@ -997,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
 
        ath9k_host_rx_init(priv);
 
+       ret = ath9k_htc_update_cap_target(priv);
+       if (ret)
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Failed to update capability in target\n");
+
        priv->op_flags &= ~OP_INVALID;
        htc_start(priv->htc);
 
@@ -1051,25 +1181,21 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
        cancel_work_sync(&priv->fatal_work);
        cancel_work_sync(&priv->ps_work);
        cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+       ath9k_htc_stop_ani(priv);
        ath9k_led_stop_brightness(priv);
 
        mutex_lock(&priv->mutex);
 
-       /* Remove monitor interface here */
-       if (ah->opmode == NL80211_IFTYPE_MONITOR) {
-               if (ath9k_htc_remove_monitor_interface(priv))
-                       ath_err(common, "Unable to remove monitor interface\n");
-               else
-                       ath_dbg(common, ATH_DBG_CONFIG,
-                               "Monitor interface removed\n");
-       }
-
        if (ah->btcoex_hw.enabled) {
                ath9k_hw_btcoex_disable(ah);
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
                        ath_htc_cancel_btcoex_work(priv);
        }
 
+       /* Remove a monitor interface if it's present. */
+       if (priv->ah->is_monitoring)
+               ath9k_htc_remove_monitor_interface(priv);
+
        ath9k_hw_phy_disable(ah);
        ath9k_hw_disable(ah);
        ath9k_htc_ps_restore(priv);
@@ -1093,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&priv->mutex);
 
-       /* Only one interface for now */
-       if (priv->nvifs > 0) {
-               ret = -ENOBUFS;
-               goto out;
+       if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
+               mutex_unlock(&priv->mutex);
+               return -ENOBUFS;
+       }
+
+       if (priv->num_ibss_vif ||
+           (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+               ath_err(common, "IBSS coexistence with other modes is not allowed\n");
+               mutex_unlock(&priv->mutex);
+               return -ENOBUFS;
+       }
+
+       if (((vif->type == NL80211_IFTYPE_AP) ||
+            (vif->type == NL80211_IFTYPE_ADHOC)) &&
+           ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
+               ath_err(common, "Max. number of beaconing interfaces reached\n");
+               mutex_unlock(&priv->mutex);
+               return -ENOBUFS;
        }
 
        ath9k_htc_ps_wakeup(priv);
@@ -1110,6 +1250,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
        case NL80211_IFTYPE_ADHOC:
                hvif.opmode = cpu_to_be32(HTC_M_IBSS);
                break;
+       case NL80211_IFTYPE_AP:
+               hvif.opmode = cpu_to_be32(HTC_M_HOSTAP);
+               break;
        default:
                ath_err(common,
                        "Interface type %d not yet supported\n", vif->type);
@@ -1117,34 +1260,39 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
-       ath_dbg(common, ATH_DBG_CONFIG,
-               "Attach a VIF of type: %d\n", vif->type);
-
-       priv->ah->opmode = vif->type;
-
        /* Index starts from zero on the target */
-       avp->index = hvif.index = priv->nvifs;
+       avp->index = hvif.index = ffz(priv->vif_slot);
        hvif.rtsthreshold = cpu_to_be16(2304);
        WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
        if (ret)
                goto out;
 
-       priv->nvifs++;
-
        /*
         * We need a node in target to tx mgmt frames
         * before association.
         */
        ret = ath9k_htc_add_station(priv, vif, NULL);
-       if (ret)
+       if (ret) {
+               WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
                goto out;
+       }
 
-       ret = ath9k_htc_update_cap_target(priv);
-       if (ret)
-               ath_dbg(common, ATH_DBG_CONFIG,
-                       "Failed to update capability in target\n");
+       ath9k_htc_set_bssid_mask(priv, vif);
 
+       priv->vif_slot |= (1 << avp->index);
+       priv->nvifs++;
        priv->vif = vif;
+
+       INC_VIF(priv, vif->type);
+       ath9k_htc_set_opmode(priv);
+
+       if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+           !(priv->op_flags & OP_ANI_RUNNING))
+               ath9k_htc_start_ani(priv);
+
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+
 out:
        ath9k_htc_ps_restore(priv);
        mutex_unlock(&priv->mutex);
@@ -1162,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
        int ret = 0;
        u8 cmd_rsp;
 
-       ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
-
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
 
@@ -1172,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
        hvif.index = avp->index;
        WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
        priv->nvifs--;
+       priv->vif_slot &= ~(1 << avp->index);
 
        ath9k_htc_remove_station(priv, vif, NULL);
        priv->vif = NULL;
 
+       DEC_VIF(priv, vif->type);
+       ath9k_htc_set_opmode(priv);
+
+       /*
+        * Stop ANI only if there are no associated station interfaces.
+        */
+       if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
+               priv->rearm_ani = false;
+               ieee80211_iterate_active_interfaces_atomic(priv->hw,
+                                                  ath9k_htc_vif_iter, priv);
+               if (!priv->rearm_ani)
+                       ath9k_htc_stop_ani(priv);
+       }
+
+       ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+
        ath9k_htc_ps_restore(priv);
        mutex_unlock(&priv->mutex);
 }
@@ -1211,13 +1374,11 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
         * IEEE80211_CONF_CHANGE_CHANNEL is handled.
         */
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               if (conf->flags & IEEE80211_CONF_MONITOR) {
-                       if (ath9k_htc_add_monitor_interface(priv))
-                               ath_err(common, "Failed to set monitor mode\n");
-                       else
-                               ath_dbg(common, ATH_DBG_CONFIG,
-                                       "HW opmode set to Monitor mode\n");
-               }
+               if ((conf->flags & IEEE80211_CONF_MONITOR) &&
+                   !priv->ah->is_monitoring)
+                       ath9k_htc_add_monitor_interface(priv);
+               else if (priv->ah->is_monitoring)
+                       ath9k_htc_remove_monitor_interface(priv);
        }
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -1252,7 +1413,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
 
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
                priv->txpowlimit = 2 * conf->power_level;
-               ath_update_txpow(priv);
+               ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
+                                      priv->txpowlimit, &priv->curtxpow);
        }
 
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1439,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
+       bool set_assoc;
 
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
 
+       /*
+        * Set the HW AID/BSSID only for the first station interface
+        * or in IBSS mode.
+        */
+       set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
+                      ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+                       (priv->num_sta_vif == 1)));
+
+
        if (changed & BSS_CHANGED_ASSOC) {
-               common->curaid = bss_conf->assoc ?
-                                bss_conf->aid : 0;
-               ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
-                       bss_conf->assoc);
-
-               if (bss_conf->assoc) {
-                       priv->op_flags |= OP_ASSOCIATED;
-                       ath_start_ani(priv);
-               } else {
-                       priv->op_flags &= ~OP_ASSOCIATED;
-                       cancel_delayed_work_sync(&priv->ath9k_ani_work);
+               if (set_assoc) {
+                       ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+                               bss_conf->assoc);
+
+                       common->curaid = bss_conf->assoc ?
+                               bss_conf->aid : 0;
+
+                       if (bss_conf->assoc)
+                               ath9k_htc_start_ani(priv);
+                       else
+                               ath9k_htc_stop_ani(priv);
                }
        }
 
        if (changed & BSS_CHANGED_BSSID) {
-               /* Set BSSID */
-               memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
-               ath9k_hw_write_associd(ah);
+               if (set_assoc) {
+                       memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+                       ath9k_hw_write_associd(ah);
 
-               ath_dbg(common, ATH_DBG_CONFIG,
-                       "BSSID: %pM aid: 0x%x\n",
-                       common->curbssid, common->curaid);
+                       ath_dbg(common, ATH_DBG_CONFIG,
+                               "BSSID: %pM aid: 0x%x\n",
+                               common->curbssid, common->curaid);
+               }
        }
 
-       if ((changed & BSS_CHANGED_BEACON_INT) ||
-           (changed & BSS_CHANGED_BEACON) ||
-           ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-           bss_conf->enable_beacon)) {
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
                priv->op_flags |= OP_ENABLE_BEACON;
                ath9k_htc_beacon_config(priv, vif);
        }
 
-       if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-           !bss_conf->enable_beacon) {
-               priv->op_flags &= ~OP_ENABLE_BEACON;
-               ath9k_htc_beacon_config(priv, vif);
-       }
-
-       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-               ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
-                       bss_conf->use_short_preamble);
-               if (bss_conf->use_short_preamble)
-                       priv->op_flags |= OP_PREAMBLE_SHORT;
-               else
-                       priv->op_flags &= ~OP_PREAMBLE_SHORT;
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
+               /*
+                * Disable SWBA interrupt only if there are no
+                * AP/IBSS interfaces.
+                */
+               if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
+                       ath_dbg(common, ATH_DBG_CONFIG,
+                               "Beacon disabled for BSS: %pM\n",
+                               bss_conf->bssid);
+                       priv->op_flags &= ~OP_ENABLE_BEACON;
+                       ath9k_htc_beacon_config(priv, vif);
+               }
        }
 
-       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-               ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
-                       bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot &&
-                   hw->conf.channel->band != IEEE80211_BAND_5GHZ)
-                       priv->op_flags |= OP_PROTECT_ENABLE;
-               else
-                       priv->op_flags &= ~OP_PROTECT_ENABLE;
+       if (changed & BSS_CHANGED_BEACON_INT) {
+               /*
+                * Reset the HW TSF for the first AP interface.
+                */
+               if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+                   (priv->nvifs == 1) &&
+                   (priv->num_ap_vif == 1) &&
+                   (vif->type == NL80211_IFTYPE_AP)) {
+                       priv->op_flags |= OP_TSF_RESET;
+               }
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Beacon interval changed for BSS: %pM\n",
+                       bss_conf->bssid);
+               ath9k_htc_beacon_config(priv, vif);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1557,12 +1734,14 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
                                  enum ieee80211_ampdu_mlme_action action,
                                  struct ieee80211_sta *sta,
-                                 u16 tid, u16 *ssn)
+                                 u16 tid, u16 *ssn, u8 buf_size)
 {
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath9k_htc_sta *ista;
        int ret = 0;
 
+       mutex_lock(&priv->mutex);
+
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
                break;
@@ -1587,6 +1766,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
        }
 
+       mutex_unlock(&priv->mutex);
+
        return ret;
 }
 
@@ -1599,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
        priv->op_flags |= OP_SCANNING;
        spin_unlock_bh(&priv->beacon_lock);
        cancel_work_sync(&priv->ps_work);
-       if (priv->op_flags & OP_ASSOCIATED)
-               cancel_delayed_work_sync(&priv->ath9k_ani_work);
+       ath9k_htc_stop_ani(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -1609,14 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
        struct ath9k_htc_priv *priv = hw->priv;
 
        mutex_lock(&priv->mutex);
-       ath9k_htc_ps_wakeup(priv);
        spin_lock_bh(&priv->beacon_lock);
        priv->op_flags &= ~OP_SCANNING;
        spin_unlock_bh(&priv->beacon_lock);
-       if (priv->op_flags & OP_ASSOCIATED) {
-               ath9k_htc_beacon_config(priv, priv->vif);
-               ath_start_ani(priv);
-       }
+       ath9k_htc_ps_wakeup(priv);
+       ath9k_htc_vif_reconfig(priv);
        ath9k_htc_ps_restore(priv);
        mutex_unlock(&priv->mutex);
 }
index 7a5ffca21958a45657e1cf23370e01bb31e498f8..4a4f27ba96afdbbba1f47632203464815122e316 100644 (file)
@@ -84,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = tx_info->control.sta;
+       struct ieee80211_vif *vif = tx_info->control.vif;
        struct ath9k_htc_sta *ista;
+       struct ath9k_htc_vif *avp;
        struct ath9k_htc_tx_ctl tx_ctl;
        enum htc_endpoint_id epid;
        u16 qnum;
@@ -95,18 +97,31 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
        hdr = (struct ieee80211_hdr *) skb->data;
        fc = hdr->frame_control;
 
-       if (tx_info->control.vif &&
-                       (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
-               vif_idx = ((struct ath9k_htc_vif *)
-                               tx_info->control.vif->drv_priv)->index;
-       else
-               vif_idx = priv->nvifs;
+       /*
+        * Find out on which interface this packet has to be
+        * sent out.
+        */
+       if (vif) {
+               avp = (struct ath9k_htc_vif *) vif->drv_priv;
+               vif_idx = avp->index;
+       } else {
+               if (!priv->ah->is_monitoring) {
+                       ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+                               "VIF is null, but no monitor interface !\n");
+                       return -EINVAL;
+               }
 
+               vif_idx = priv->mon_vif_idx;
+       }
+
+       /*
+        * Find out which station this packet is destined for.
+        */
        if (sta) {
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                sta_idx = ista->index;
        } else {
-               sta_idx = 0;
+               sta_idx = priv->vif_sta_pos[vif_idx];
        }
 
        memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
@@ -141,7 +156,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
 
                /* CTS-to-self */
                if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
-                   (priv->op_flags & OP_PROTECT_ENABLE))
+                   (vif && vif->bss_conf.use_cts_prot))
                        flags |= ATH9K_HTC_TX_CTSONLY;
 
                tx_hdr.flags = cpu_to_be32(flags);
@@ -217,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
 void ath9k_tx_tasklet(unsigned long data)
 {
        struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+       struct ieee80211_vif *vif;
        struct ieee80211_sta *sta;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info;
@@ -228,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data)
                hdr = (struct ieee80211_hdr *) skb->data;
                fc = hdr->frame_control;
                tx_info = IEEE80211_SKB_CB(skb);
+               vif = tx_info->control.vif;
 
                memset(&tx_info->status, 0, sizeof(tx_info->status));
 
+               if (!vif)
+                       goto send_mac80211;
+
                rcu_read_lock();
 
-               sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+               sta = ieee80211_find_sta(vif, hdr->addr1);
                if (!sta) {
                        rcu_read_unlock();
                        ieee80211_tx_status(priv->hw, skb);
@@ -263,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data)
 
                rcu_read_unlock();
 
+       send_mac80211:
                /* Send status to mac80211 */
                ieee80211_tx_status(priv->hw, skb);
        }
@@ -386,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
         */
        if (((ah->opmode != NL80211_IFTYPE_AP) &&
             (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
-           (ah->opmode == NL80211_IFTYPE_MONITOR))
+           ah->is_monitoring)
                rfilt |= ATH9K_RX_FILTER_PROM;
 
        if (priv->rxfilter & FIF_CONTROL)
@@ -398,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
        else
                rfilt |= ATH9K_RX_FILTER_BEACON;
 
-       if (conf_is_ht(&priv->hw->conf))
+       if (conf_is_ht(&priv->hw->conf)) {
                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+               rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
+       }
+
+       if (priv->rxfilter & FIF_PSPOLL)
+               rfilt |= ATH9K_RX_FILTER_PSPOLL;
 
        return rfilt;
 
@@ -412,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
 static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
 {
        struct ath_hw *ah = priv->ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-
        u32 rfilt, mfilt[2];
 
        /* configure rx filter */
        rfilt = ath9k_htc_calcrxfilter(priv);
        ath9k_hw_setrxfilter(ah, rfilt);
 
-       /* configure bssid mask */
-       ath_hw_setbssidmask(common);
-
-       /* configure operational mode */
-       ath9k_hw_setopmode(ah);
-
        /* calculate and install multicast filter */
        mfilt[0] = mfilt[1] = ~0;
        ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -576,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
                           rxbuf->rxstatus.rs_flags);
 
-       if (priv->op_flags & OP_ASSOCIATED) {
-               if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
-                   !rxbuf->rxstatus.rs_moreaggr)
-                       ATH_RSSI_LPF(priv->rx.last_rssi,
-                                    rxbuf->rxstatus.rs_rssi);
+       if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+           !rxbuf->rxstatus.rs_moreaggr)
+               ATH_RSSI_LPF(priv->rx.last_rssi,
+                            rxbuf->rxstatus.rs_rssi);
 
-               last_rssi = priv->rx.last_rssi;
+       last_rssi = priv->rx.last_rssi;
 
-               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-                       rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
-                                                            ATH_RSSI_EP_MULTIPLIER);
+       if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+               rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+                                                    ATH_RSSI_EP_MULTIPLIER);
 
-               if (rxbuf->rxstatus.rs_rssi < 0)
-                       rxbuf->rxstatus.rs_rssi = 0;
+       if (rxbuf->rxstatus.rs_rssi < 0)
+               rxbuf->rxstatus.rs_rssi = 0;
 
-               if (ieee80211_is_beacon(fc))
-                       priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
-       }
+       if (ieee80211_is_beacon(fc))
+               priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
 
        rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
        rx_status->band = hw->conf.channel->band;
        rx_status->freq = hw->conf.channel->center_freq;
        rx_status->signal =  rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
        rx_status->antenna = rxbuf->rxstatus.rs_antenna;
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        return true;
 
index 9f01e50d5cda718e391f9582f71064f82c5f8733..9a3438174f862d0bcfbffa44db6046c0d6169434 100644 (file)
@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        if (ah->hw_version.devid == AR5416_AR9100_DEVID)
                ah->hw_version.macVersion = AR_SREV_VERSION_9100;
 
+       ath9k_hw_read_revisions(ah);
+
+       /*
+        * Read back AR_WA into a permanent copy and set bits 14 and 17.
+        * We need to do this to avoid RMW of this register. We cannot
+        * read the reg when chip is asleep.
+        */
+       ah->WARegVal = REG_READ(ah, AR_WA);
+       ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+                        AR_WA_ASPM_TIMER_BASED_DISABLE);
+
        if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
                ath_err(common, "Couldn't reset chip\n");
                return -EIO;
@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        ath9k_hw_init_mode_regs(ah);
 
-       /*
-        * Read back AR_WA into a permanent copy and set bits 14 and 17.
-        * We need to do this to avoid RMW of this register. We cannot
-        * read the reg when chip is asleep.
-        */
-       ah->WARegVal = REG_READ(ah, AR_WA);
-       ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
-                        AR_WA_ASPM_TIMER_BASED_DISABLE);
 
        if (ah->is_pciexpress)
                ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -668,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
        REGWRITE_BUFFER_FLUSH(ah);
 }
 
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+{
+               REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
+               udelay(100);
+               REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
+
+               while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+                       udelay(100);
+
+               return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+}
+EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+
+#define DPLL2_KD_VAL            0x3D
+#define DPLL2_KI_VAL            0x06
+#define DPLL3_PHASE_SHIFT_VAL   0x1
+
 static void ath9k_hw_init_pll(struct ath_hw *ah,
                              struct ath9k_channel *chan)
 {
        u32 pll;
 
-       if (AR_SREV_9485(ah))
+       if (AR_SREV_9485(ah)) {
+               REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+               REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
+
+               REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+
+               REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
+               udelay(100);
+
                REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
 
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+                             AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
+
+               REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+                             AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+               REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
+               udelay(110);
+       }
+
        pll = ath9k_hw_compute_pll_control(ah, chan);
 
        REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -1060,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
                REG_WRITE(ah, AR_RC, AR_RC_AHB);
 
        REG_WRITE(ah, AR_RTC_RESET, 0);
-       udelay(2);
 
        REGWRITE_BUFFER_FLUSH(ah);
 
@@ -1082,8 +1121,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
                return false;
        }
 
-       ath9k_hw_read_revisions(ah);
-
        return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
 }
 
@@ -1348,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ath9k_hw_spur_mitigate_freq(ah, chan);
        ah->eep_ops->set_board_values(ah, chan);
 
-       ath9k_hw_set_operating_mode(ah, ah->opmode);
-
        ENABLE_REGWRITE_BUFFER(ah);
 
        REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1367,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        REGWRITE_BUFFER_FLUSH(ah);
 
+       ath9k_hw_set_operating_mode(ah, ah->opmode);
+
        r = ath9k_hw_rf_set_freq(ah, chan);
        if (r)
                return r;
index ea9fde6706461cb12560b53018ef101a87be5481..ef79f4c876ca9e0577b8ee440076b3aab57dd430 100644 (file)
@@ -70,6 +70,9 @@
 #define REG_READ(_ah, _reg) \
        ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
 
+#define REG_READ_MULTI(_ah, _addr, _val, _cnt)         \
+       ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
+
 #define ENABLE_REGWRITE_BUFFER(_ah)                                    \
        do {                                                            \
                if (ath9k_hw_common(_ah)->ops->enable_write_buffer)     \
@@ -926,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
 void ath9k_hw_set11nmac2040(struct ath_hw *ah);
 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
index a033d01bf8a0bfe4203847c71458474b5e3a38cc..79aec983279ff3d6ada9020054034af96c0c5578 100644 (file)
@@ -140,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
        RATE(540, 0x0c, 0),
 };
 
+#ifdef CONFIG_MAC80211_LEDS
+static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
+       { .throughput = 0 * 1024, .blink_time = 334 },
+       { .throughput = 1 * 1024, .blink_time = 260 },
+       { .throughput = 5 * 1024, .blink_time = 220 },
+       { .throughput = 10 * 1024, .blink_time = 190 },
+       { .throughput = 20 * 1024, .blink_time = 170 },
+       { .throughput = 50 * 1024, .blink_time = 150 },
+       { .throughput = 70 * 1024, .blink_time = 130 },
+       { .throughput = 100 * 1024, .blink_time = 110 },
+       { .throughput = 200 * 1024, .blink_time = 80 },
+       { .throughput = 300 * 1024, .blink_time = 50 },
+};
+#endif
+
 static void ath9k_deinit_softc(struct ath_softc *sc);
 
 /*
@@ -250,8 +265,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
                              struct regulatory_request *request)
 {
        struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
 
        return ath_reg_notifier_apply(wiphy, request, reg);
@@ -438,9 +452,10 @@ static int ath9k_init_queues(struct ath_softc *sc)
        sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
        ath_cabq_update(sc);
 
-       for (i = 0; i < WME_NUM_AC; i++)
+       for (i = 0; i < WME_NUM_AC; i++) {
                sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
-
+               sc->tx.txq_map[i]->mac80211_qnum = i;
+       }
        return 0;
 }
 
@@ -512,10 +527,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
 
        sc->beacon.slottime = ATH9K_SLOT_TIME_9;
 
-       for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+       for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
                sc->beacon.bslot[i] = NULL;
-               sc->beacon.bslot_aphy[i] = NULL;
-       }
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
                sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -533,6 +546,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        if (!ah)
                return -ENOMEM;
 
+       ah->hw = sc->hw;
        ah->hw_version.devid = devid;
        ah->hw_version.subsysid = subsysid;
        sc->sc_ah = ah;
@@ -550,10 +564,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        common->btcoex_enabled = ath9k_btcoex_enable == 1;
        spin_lock_init(&common->cc_lock);
 
-       spin_lock_init(&sc->wiphy_lock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        mutex_init(&sc->mutex);
+#ifdef CONFIG_ATH9K_DEBUGFS
+       spin_lock_init(&sc->nodes_lock);
+       INIT_LIST_HEAD(&sc->nodes);
+#endif
        tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
        tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
                     (unsigned long)sc);
@@ -695,7 +712,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
                    const struct ath_bus_ops *bus_ops)
 {
        struct ieee80211_hw *hw = sc->hw;
-       struct ath_wiphy *aphy = hw->priv;
        struct ath_common *common;
        struct ath_hw *ah;
        int error = 0;
@@ -730,6 +746,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
 
        ath9k_init_txpower_limits(sc);
 
+#ifdef CONFIG_MAC80211_LEDS
+       /* must be initialized before ieee80211_register_hw */
+       sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
+               IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
+               ARRAY_SIZE(ath9k_tpt_blink));
+#endif
+
        /* Register with mac80211 */
        error = ieee80211_register_hw(hw);
        if (error)
@@ -750,10 +773,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
 
        INIT_WORK(&sc->hw_check_work, ath_hw_check);
        INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
-       INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
-       INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
-       sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-       aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+       sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
 
        ath_init_leds(sc);
        ath_start_rfkill_poll(sc);
@@ -805,7 +825,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 void ath9k_deinit_device(struct ath_softc *sc)
 {
        struct ieee80211_hw *hw = sc->hw;
-       int i = 0;
 
        ath9k_ps_wakeup(sc);
 
@@ -814,20 +833,10 @@ void ath9k_deinit_device(struct ath_softc *sc)
 
        ath9k_ps_restore(sc);
 
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy == NULL)
-                       continue;
-               sc->sec_wiphy[i] = NULL;
-               ieee80211_unregister_hw(aphy->hw);
-               ieee80211_free_hw(aphy->hw);
-       }
-
        ieee80211_unregister_hw(hw);
        ath_rx_cleanup(sc);
        ath_tx_cleanup(sc);
        ath9k_deinit_softc(sc);
-       kfree(sc->sec_wiphy);
 }
 
 void ath_descdma_cleanup(struct ath_softc *sc,
index 2915b11edefb915ca89321a7e2a349e1f525dcf6..5efc869d65ff3772591203bbdc82368a49dffa42 100644 (file)
@@ -690,17 +690,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
                rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
 
        if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+               /*
+                * Treat these errors as mutually exclusive to avoid spurious
+                * extra error reports from the hardware. If a CRC error is
+                * reported, then decryption and MIC errors are irrelevant,
+                * the frame is going to be dropped either way
+                */
                if (ads.ds_rxstatus8 & AR_CRCErr)
                        rs->rs_status |= ATH9K_RXERR_CRC;
-               if (ads.ds_rxstatus8 & AR_PHYErr) {
+               else if (ads.ds_rxstatus8 & AR_PHYErr) {
                        rs->rs_status |= ATH9K_RXERR_PHY;
                        phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
                        rs->rs_phyerr = phyerr;
-               }
-               if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+               } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
-               if (ads.ds_rxstatus8 & AR_MichaelErr)
+               else if (ads.ds_rxstatus8 & AR_MichaelErr)
                        rs->rs_status |= ATH9K_RXERR_MIC;
+
                if (ads.ds_rxstatus8 & AR_KeyMiss)
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
        }
index 7512f97e8f49a07839551833db8ca9206e6a02fc..04d58ae923bb602602fe9bccb40fe918e7776424 100644 (file)
@@ -639,6 +639,8 @@ enum ath9k_rx_filter {
        ATH9K_RX_FILTER_PHYERR = 0x00000100,
        ATH9K_RX_FILTER_MYBEACON = 0x00000200,
        ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
+       ATH9K_RX_FILTER_COMP_BA = 0x00000800,
+       ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
        ATH9K_RX_FILTER_PSPOLL = 0x00004000,
        ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
        ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
index a09d15f7aa6e0eff95cadfcb04ca280b3c292528..2e228aada1a906fdd6298d63fb6d66f08dfc7536 100644 (file)
  */
 
 #include <linux/nl80211.h>
+#include <linux/delay.h>
 #include "ath9k.h"
 #include "btcoex.h"
 
-static void ath_update_txpow(struct ath_softc *sc)
-{
-       struct ath_hw *ah = sc->sc_ah;
-
-       if (sc->curtxpow != sc->config.txpowlimit) {
-               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
-               /* read back in case value is clamped */
-               sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
-       }
-}
-
 static u8 parse_mpdudensity(u8 mpdudensity)
 {
        /*
@@ -64,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity)
        }
 }
 
-static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
-                                               struct ieee80211_hw *hw)
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
 {
-       struct ieee80211_channel *curchan = hw->conf.channel;
-       struct ath9k_channel *channel;
-       u8 chan_idx;
+       bool pending = false;
+
+       spin_lock_bh(&txq->axq_lock);
 
-       chan_idx = curchan->hw_value;
-       channel = &sc->sc_ah->channels[chan_idx];
-       ath9k_update_ichannel(sc, hw, channel);
-       return channel;
+       if (txq->axq_depth || !list_empty(&txq->axq_acq))
+               pending = true;
+       else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+               pending = !list_empty(&txq->txq_fifo_pending);
+
+       spin_unlock_bh(&txq->axq_lock);
+       return pending;
 }
 
 bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
@@ -177,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
        }
 }
 
-static void ath_update_survey_stats(struct ath_softc *sc)
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+static int ath_update_survey_stats(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
@@ -185,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
        struct survey_info *survey = &sc->survey[pos];
        struct ath_cycle_counters *cc = &common->cc_survey;
        unsigned int div = common->clockrate * 1000;
+       int ret = 0;
 
        if (!ah->curchan)
-               return;
+               return -1;
 
        if (ah->power_mode == ATH9K_PM_AWAKE)
                ath_hw_cycle_counters_update(common);
@@ -202,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
                survey->channel_time_rx += cc->rx_frame / div;
                survey->channel_time_tx += cc->tx_frame / div;
        }
+
+       if (cc->cycles < div)
+               return -1;
+
+       if (cc->cycles > 0)
+               ret = cc->rx_busy * 100 / cc->cycles;
+
        memset(cc, 0, sizeof(*cc));
 
        ath_update_survey_nf(sc, pos);
+
+       return ret;
 }
 
 /*
@@ -215,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan)
 {
-       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &common->hw->conf;
@@ -227,10 +233,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        if (sc->sc_flags & SC_OP_INVALID)
                return -EIO;
 
+       sc->hw_busy_count = 0;
+
        del_timer_sync(&common->ani.timer);
        cancel_work_sync(&sc->paprd_work);
        cancel_work_sync(&sc->hw_check_work);
        cancel_delayed_work_sync(&sc->tx_complete_work);
+       cancel_delayed_work_sync(&sc->hw_pll_work);
 
        ath9k_ps_wakeup(sc);
 
@@ -251,6 +260,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        if (!ath_stoprecv(sc))
                stopped = false;
 
+       if (!ath9k_hw_check_alive(ah))
+               stopped = false;
+
        /* XXX: do not flush receive queue here. We don't want
         * to flush data frames already in queue because of
         * changing channel. */
@@ -259,7 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                fastcc = false;
 
        if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
-               caldata = &aphy->caldata;
+               caldata = &sc->caldata;
 
        ath_dbg(common, ATH_DBG_CONFIG,
                "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
@@ -281,17 +293,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                goto ps_restore;
        }
 
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                              sc->config.txpowlimit, &sc->curtxpow);
        ath9k_hw_set_interrupts(ah, ah->imask);
 
        if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
                if (sc->sc_flags & SC_OP_BEACONS)
                        ath_beacon_config(sc, NULL);
                ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+               ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
                ath_start_ani(common);
        }
 
  ps_restore:
+       ieee80211_wake_queues(hw);
+
        spin_unlock_bh(&sc->sc_pcu_lock);
 
        ath9k_ps_restore(sc);
@@ -549,6 +565,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
        struct ath_hw *ah = sc->sc_ah;
        an = (struct ath_node *)sta->drv_priv;
 
+#ifdef CONFIG_ATH9K_DEBUGFS
+       spin_lock(&sc->nodes_lock);
+       list_add(&an->list, &sc->nodes);
+       spin_unlock(&sc->nodes_lock);
+       an->sta = sta;
+#endif
        if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
                sc->sc_flags |= SC_OP_ENABLE_APM;
 
@@ -564,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
 
+#ifdef CONFIG_ATH9K_DEBUGFS
+       spin_lock(&sc->nodes_lock);
+       list_del(&an->list);
+       spin_unlock(&sc->nodes_lock);
+       an->sta = NULL;
+#endif
+
        if (sc->sc_flags & SC_OP_TXAGGR)
                ath_tx_node_cleanup(sc, an);
 }
@@ -571,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
 void ath_hw_check(struct work_struct *work)
 {
        struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
-       int i;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       unsigned long flags;
+       int busy;
 
        ath9k_ps_wakeup(sc);
+       if (ath9k_hw_check_alive(sc->sc_ah))
+               goto out;
 
-       for (i = 0; i < 3; i++) {
-               if (ath9k_hw_check_alive(sc->sc_ah))
-                       goto out;
+       spin_lock_irqsave(&common->cc_lock, flags);
+       busy = ath_update_survey_stats(sc);
+       spin_unlock_irqrestore(&common->cc_lock, flags);
 
-               msleep(1);
-       }
-       ath_reset(sc, true);
+       ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
+               "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+       if (busy >= 99) {
+               if (++sc->hw_busy_count >= 3)
+                       ath_reset(sc, true);
+       } else if (busy >= 0)
+               sc->hw_busy_count = 0;
 
 out:
        ath9k_ps_restore(sc);
@@ -604,7 +641,15 @@ void ath9k_tasklet(unsigned long data)
        ath9k_ps_wakeup(sc);
        spin_lock(&sc->sc_pcu_lock);
 
-       if (!ath9k_hw_check_alive(ah))
+       /*
+        * Only run the baseband hang check if beacons stop working in AP or
+        * IBSS mode, because it has a high false positive rate. For station
+        * mode it should not be necessary, since the upper layers will detect
+        * this through a beacon miss automatically and the following channel
+        * change will trigger a hardware reset anyway
+        */
+       if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
+           !ath9k_hw_check_alive(ah))
                ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 
        if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -783,54 +828,11 @@ chip_reset:
 #undef SCHED_INTR
 }
 
-static u32 ath_get_extchanmode(struct ath_softc *sc,
-                              struct ieee80211_channel *chan,
-                              enum nl80211_channel_type channel_type)
-{
-       u32 chanmode = 0;
-
-       switch (chan->band) {
-       case IEEE80211_BAND_2GHZ:
-               switch(channel_type) {
-               case NL80211_CHAN_NO_HT:
-               case NL80211_CHAN_HT20:
-                       chanmode = CHANNEL_G_HT20;
-                       break;
-               case NL80211_CHAN_HT40PLUS:
-                       chanmode = CHANNEL_G_HT40PLUS;
-                       break;
-               case NL80211_CHAN_HT40MINUS:
-                       chanmode = CHANNEL_G_HT40MINUS;
-                       break;
-               }
-               break;
-       case IEEE80211_BAND_5GHZ:
-               switch(channel_type) {
-               case NL80211_CHAN_NO_HT:
-               case NL80211_CHAN_HT20:
-                       chanmode = CHANNEL_A_HT20;
-                       break;
-               case NL80211_CHAN_HT40PLUS:
-                       chanmode = CHANNEL_A_HT40PLUS;
-                       break;
-               case NL80211_CHAN_HT40MINUS:
-                       chanmode = CHANNEL_A_HT40MINUS;
-                       break;
-               }
-               break;
-       default:
-               break;
-       }
-
-       return chanmode;
-}
-
 static void ath9k_bss_assoc_info(struct ath_softc *sc,
                                 struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_bss_conf *bss_conf)
 {
-       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
 
@@ -854,7 +856,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
                ath_beacon_config(sc, vif);
 
                /* Reset rssi stats */
-               aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+               sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
                sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
                sc->sc_flags |= SC_OP_ANI_RUN;
@@ -881,7 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath9k_hw_configpcipowersave(ah, 0, 0);
 
        if (!ah->curchan)
-               ah->curchan = ath_get_curchannel(sc, sc->hw);
+               ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
 
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
@@ -890,7 +892,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
                        channel->center_freq, r);
        }
 
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                              sc->config.txpowlimit, &sc->curtxpow);
        if (ath_startrecv(sc) != 0) {
                ath_err(common, "Unable to restart recv logic\n");
                goto out;
@@ -907,6 +910,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath9k_hw_set_gpio(ah, ah->led_pin, 0);
 
        ieee80211_wake_queues(hw);
+       ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
+
 out:
        spin_unlock_bh(&sc->sc_pcu_lock);
 
@@ -920,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
        int r;
 
        ath9k_ps_wakeup(sc);
+       cancel_delayed_work_sync(&sc->hw_pll_work);
+
        spin_lock_bh(&sc->sc_pcu_lock);
 
        ieee80211_stop_queues(hw);
@@ -942,7 +949,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath_flushrecv(sc);              /* flush recv queue */
 
        if (!ah->curchan)
-               ah->curchan = ath_get_curchannel(sc, hw);
+               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
@@ -966,6 +973,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        struct ieee80211_hw *hw = sc->hw;
        int r;
 
+       sc->hw_busy_count = 0;
+
        /* Stop ANI */
        del_timer_sync(&common->ani.timer);
 
@@ -993,7 +1002,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
         * that changes the channel so update any state that
         * might change as a result.
         */
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                              sc->config.txpowlimit, &sc->curtxpow);
 
        if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
                ath_beacon_config(sc, NULL);    /* restart beacons */
@@ -1021,38 +1031,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        return r;
 }
 
-/* XXX: Remove me once we don't depend on ath9k_channel for all
- * this redundant data */
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
-                          struct ath9k_channel *ichan)
-{
-       struct ieee80211_channel *chan = hw->conf.channel;
-       struct ieee80211_conf *conf = &hw->conf;
-
-       ichan->channel = chan->center_freq;
-       ichan->chan = chan;
-
-       if (chan->band == IEEE80211_BAND_2GHZ) {
-               ichan->chanmode = CHANNEL_G;
-               ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
-       } else {
-               ichan->chanmode = CHANNEL_A;
-               ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
-       }
-
-       if (conf_is_ht(conf))
-               ichan->chanmode = ath_get_extchanmode(sc, chan,
-                                           conf->channel_type);
-}
-
 /**********************/
 /* mac80211 callbacks */
 /**********************/
 
 static int ath9k_start(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_channel *curchan = hw->conf.channel;
@@ -1065,32 +1050,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
 
        mutex_lock(&sc->mutex);
 
-       if (ath9k_wiphy_started(sc)) {
-               if (sc->chan_idx == curchan->hw_value) {
-                       /*
-                        * Already on the operational channel, the new wiphy
-                        * can be marked active.
-                        */
-                       aphy->state = ATH_WIPHY_ACTIVE;
-                       ieee80211_wake_queues(hw);
-               } else {
-                       /*
-                        * Another wiphy is on another channel, start the new
-                        * wiphy in paused state.
-                        */
-                       aphy->state = ATH_WIPHY_PAUSED;
-                       ieee80211_stop_queues(hw);
-               }
-               mutex_unlock(&sc->mutex);
-               return 0;
-       }
-       aphy->state = ATH_WIPHY_ACTIVE;
-
        /* setup initial channel */
-
        sc->chan_idx = curchan->hw_value;
 
-       init_channel = ath_get_curchannel(sc, hw);
+       init_channel = ath9k_cmn_get_curchannel(hw, ah);
 
        /* Reset SERDES registers */
        ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1116,7 +1079,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
         * This is needed only to setup initial state
         * but it's best done after a reset.
         */
-       ath_update_txpow(sc);
+       ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                       sc->config.txpowlimit, &sc->curtxpow);
 
        /*
         * Setup the hardware after reset:
@@ -1182,22 +1146,13 @@ mutex_unlock:
        return r;
 }
 
-static int ath9k_tx(struct ieee80211_hw *hw,
-                   struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_tx_control txctl;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
-       if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
-               ath_dbg(common, ATH_DBG_XMIT,
-                       "ath9k: %s: TX in unexpected wiphy state %d\n",
-                       wiphy_name(hw->wiphy), aphy->state);
-               goto exit;
-       }
-
        if (sc->ps_enabled) {
                /*
                 * mac80211 does not set PM field for normal data frames, so we
@@ -1248,52 +1203,30 @@ static int ath9k_tx(struct ieee80211_hw *hw,
                goto exit;
        }
 
-       return 0;
+       return;
 exit:
        dev_kfree_skb_any(skb);
-       return 0;
 }
 
 static void ath9k_stop(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       int i;
 
        mutex_lock(&sc->mutex);
 
-       aphy->state = ATH_WIPHY_INACTIVE;
-
-       if (led_blink)
-               cancel_delayed_work_sync(&sc->ath_led_blink_work);
-
        cancel_delayed_work_sync(&sc->tx_complete_work);
+       cancel_delayed_work_sync(&sc->hw_pll_work);
        cancel_work_sync(&sc->paprd_work);
        cancel_work_sync(&sc->hw_check_work);
 
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i])
-                       break;
-       }
-
-       if (i == sc->num_sec_wiphy) {
-               cancel_delayed_work_sync(&sc->wiphy_work);
-               cancel_work_sync(&sc->chan_work);
-       }
-
        if (sc->sc_flags & SC_OP_INVALID) {
                ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
                return;
        }
 
-       if (ath9k_wiphy_started(sc)) {
-               mutex_unlock(&sc->mutex);
-               return; /* another wiphy still in use */
-       }
-
        /* Ensure HW is awake when we try to shut it down. */
        ath9k_ps_wakeup(sc);
 
@@ -1319,6 +1252,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        } else
                sc->rx.rxlink = NULL;
 
+       if (sc->rx.frag) {
+               dev_kfree_skb_any(sc->rx.frag);
+               sc->rx.frag = NULL;
+       }
+
        /* disable HAL and put h/w to sleep */
        ath9k_hw_disable(ah);
        ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1334,7 +1272,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        ath9k_ps_restore(sc);
 
        sc->ps_idle = true;
-       ath9k_set_wiphy_idle(aphy, true);
        ath_radio_disable(sc, hw);
 
        sc->sc_flags |= SC_OP_INVALID;
@@ -1344,112 +1281,225 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
 }
 
-static int ath9k_add_interface(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif)
+bool ath9k_uses_beacons(int type)
+{
+       switch (type) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_MESH_POINT:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void ath9k_reclaim_beacon(struct ath_softc *sc,
+                                struct ieee80211_vif *vif)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
-       enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
-       int ret = 0;
 
-       mutex_lock(&sc->mutex);
+       ath9k_set_beaconing_status(sc, false);
+       ath_beacon_return(sc, avp);
+       ath9k_set_beaconing_status(sc, true);
+       sc->sc_flags &= ~SC_OP_BEACONS;
+}
+
+static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath9k_vif_iter_data *iter_data = data;
+       int i;
+
+       if (iter_data->hw_macaddr)
+               for (i = 0; i < ETH_ALEN; i++)
+                       iter_data->mask[i] &=
+                               ~(iter_data->hw_macaddr[i] ^ mac[i]);
 
        switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               ic_opmode = NL80211_IFTYPE_STATION;
+       case NL80211_IFTYPE_AP:
+               iter_data->naps++;
                break;
-       case NL80211_IFTYPE_WDS:
-               ic_opmode = NL80211_IFTYPE_WDS;
+       case NL80211_IFTYPE_STATION:
+               iter_data->nstations++;
                break;
        case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_AP:
+               iter_data->nadhocs++;
+               break;
        case NL80211_IFTYPE_MESH_POINT:
-               if (sc->nbcnvifs >= ATH_BCBUF) {
-                       ret = -ENOBUFS;
-                       goto out;
-               }
-               ic_opmode = vif->type;
+               iter_data->nmeshes++;
+               break;
+       case NL80211_IFTYPE_WDS:
+               iter_data->nwds++;
                break;
        default:
-               ath_err(common, "Interface type %d not yet supported\n",
-                       vif->type);
-               ret = -EOPNOTSUPP;
-               goto out;
+               iter_data->nothers++;
+               break;
        }
+}
 
-       ath_dbg(common, ATH_DBG_CONFIG,
-               "Attach a VIF of type: %d\n", ic_opmode);
+/* Called with sc->mutex held. */
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ath9k_vif_iter_data *iter_data)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
 
-       /* Set the VIF opmode */
-       avp->av_opmode = ic_opmode;
-       avp->av_bslot = -1;
+       /*
+        * Use the hardware MAC address as reference, the hardware uses it
+        * together with the BSSID mask when matching addresses.
+        */
+       memset(iter_data, 0, sizeof(*iter_data));
+       iter_data->hw_macaddr = common->macaddr;
+       memset(&iter_data->mask, 0xff, ETH_ALEN);
 
-       sc->nvifs++;
+       if (vif)
+               ath9k_vif_iter(iter_data, vif->addr, vif);
 
-       ath9k_set_bssid_mask(hw, vif);
+       /* Get list of all active MAC addresses */
+       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
+                                                  iter_data);
+}
 
-       if (sc->nvifs > 1)
-               goto out; /* skip global settings for secondary vif */
+/* Called with sc->mutex held. */
+static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_vif_iter_data iter_data;
+
+       ath9k_calculate_iter_data(hw, vif, &iter_data);
+
+       ath9k_ps_wakeup(sc);
+       /* Set BSSID mask. */
+       memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+       ath_hw_setbssidmask(common);
 
-       if (ic_opmode == NL80211_IFTYPE_AP) {
+       /* Set op-mode & TSF */
+       if (iter_data.naps > 0) {
                ath9k_hw_set_tsfadjust(ah, 1);
                sc->sc_flags |= SC_OP_TSF_RESET;
-       }
+               ah->opmode = NL80211_IFTYPE_AP;
+       } else {
+               ath9k_hw_set_tsfadjust(ah, 0);
+               sc->sc_flags &= ~SC_OP_TSF_RESET;
 
-       /* Set the device opmode */
-       ah->opmode = ic_opmode;
+               if (iter_data.nwds + iter_data.nmeshes)
+                       ah->opmode = NL80211_IFTYPE_AP;
+               else if (iter_data.nadhocs)
+                       ah->opmode = NL80211_IFTYPE_ADHOC;
+               else
+                       ah->opmode = NL80211_IFTYPE_STATION;
+       }
 
        /*
         * Enable MIB interrupts when there are hardware phy counters.
-        * Note we only do this (at the moment) for station mode.
         */
-       if ((vif->type == NL80211_IFTYPE_STATION) ||
-           (vif->type == NL80211_IFTYPE_ADHOC) ||
-           (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+       if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
                if (ah->config.enable_ani)
                        ah->imask |= ATH9K_INT_MIB;
                ah->imask |= ATH9K_INT_TSFOOR;
+       } else {
+               ah->imask &= ~ATH9K_INT_MIB;
+               ah->imask &= ~ATH9K_INT_TSFOOR;
        }
 
        ath9k_hw_set_interrupts(ah, ah->imask);
+       ath9k_ps_restore(sc);
 
-       if (vif->type == NL80211_IFTYPE_AP    ||
-           vif->type == NL80211_IFTYPE_ADHOC) {
+       /* Set up ANI */
+       if ((iter_data.naps + iter_data.nadhocs) > 0) {
                sc->sc_flags |= SC_OP_ANI_RUN;
                ath_start_ani(common);
+       } else {
+               sc->sc_flags &= ~SC_OP_ANI_RUN;
+               del_timer_sync(&common->ani.timer);
        }
+}
 
-out:
-       mutex_unlock(&sc->mutex);
-       return ret;
+/* Called with sc->mutex held, vif counts set up properly. */
+static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = hw->priv;
+
+       ath9k_calculate_summary_state(hw, vif);
+
+       if (ath9k_uses_beacons(vif->type)) {
+               int error;
+               /* This may fail because upper levels do not have beacons
+                * properly configured yet.  That's OK, we assume it
+                * will be properly configured and then we will be notified
+                * in the info_changed method and set up beacons properly
+                * there.
+                */
+               ath9k_set_beaconing_status(sc, false);
+               error = ath_beacon_alloc(sc, vif);
+               if (!error)
+                       ath_beacon_config(sc, vif);
+               ath9k_set_beaconing_status(sc, true);
+       }
 }
 
-static void ath9k_reclaim_beacon(struct ath_softc *sc,
-                                struct ieee80211_vif *vif)
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif)
 {
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
+       int ret = 0;
 
-       /* Disable SWBA interrupt */
-       sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
-       ath9k_ps_wakeup(sc);
-       ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
-       ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-       tasklet_kill(&sc->bcon_tasklet);
-       ath9k_ps_restore(sc);
+       mutex_lock(&sc->mutex);
 
-       ath_beacon_return(sc, avp);
-       sc->sc_flags &= ~SC_OP_BEACONS;
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_MESH_POINT:
+               break;
+       default:
+               ath_err(common, "Interface type %d not yet supported\n",
+                       vif->type);
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
 
-       if (sc->nbcnvifs > 0) {
-               /* Re-enable beaconing */
-               sc->sc_ah->imask |= ATH9K_INT_SWBA;
-               ath9k_ps_wakeup(sc);
-               ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
-               ath9k_ps_restore(sc);
+       if (ath9k_uses_beacons(vif->type)) {
+               if (sc->nbcnvifs >= ATH_BCBUF) {
+                       ath_err(common, "Not enough beacon buffers when adding"
+                               " new interface of type: %i\n",
+                               vif->type);
+                       ret = -ENOBUFS;
+                       goto out;
+               }
+       }
+
+       if ((vif->type == NL80211_IFTYPE_ADHOC) &&
+           sc->nvifs > 0) {
+               ath_err(common, "Cannot create ADHOC interface when other"
+                       " interfaces already exist.\n");
+               ret = -EINVAL;
+               goto out;
        }
+
+       ath_dbg(common, ATH_DBG_CONFIG,
+               "Attach a VIF of type: %d\n", vif->type);
+
+       /* Set the VIF opmode */
+       avp->av_opmode = vif->type;
+       avp->av_bslot = -1;
+
+       sc->nvifs++;
+
+       ath9k_do_vif_add_setup(hw, vif);
+out:
+       mutex_unlock(&sc->mutex);
+       return ret;
 }
 
 static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1457,40 +1507,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
                                  enum nl80211_iftype new_type,
                                  bool p2p)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
        ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
        mutex_lock(&sc->mutex);
 
-       switch (new_type) {
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_ADHOC:
+       /* See if new interface type is valid. */
+       if ((new_type == NL80211_IFTYPE_ADHOC) &&
+           (sc->nvifs > 1)) {
+               ath_err(common, "When using ADHOC, it must be the only"
+                       " interface.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (ath9k_uses_beacons(new_type) &&
+           !ath9k_uses_beacons(vif->type)) {
                if (sc->nbcnvifs >= ATH_BCBUF) {
                        ath_err(common, "No beacon slot available\n");
                        ret = -ENOBUFS;
                        goto out;
                }
-               break;
-       case NL80211_IFTYPE_STATION:
-               /* Stop ANI */
-               sc->sc_flags &= ~SC_OP_ANI_RUN;
-               del_timer_sync(&common->ani.timer);
-               if ((vif->type == NL80211_IFTYPE_AP) ||
-                   (vif->type == NL80211_IFTYPE_ADHOC))
-                       ath9k_reclaim_beacon(sc, vif);
-               break;
-       default:
-               ath_err(common, "Interface type %d not yet supported\n",
-                               vif->type);
-               ret = -ENOTSUPP;
-               goto out;
        }
+
+       /* Clean up old vif stuff */
+       if (ath9k_uses_beacons(vif->type))
+               ath9k_reclaim_beacon(sc, vif);
+
+       /* Add new settings */
        vif->type = new_type;
        vif->p2p = p2p;
 
+       ath9k_do_vif_add_setup(hw, vif);
 out:
        mutex_unlock(&sc->mutex);
        return ret;
@@ -1499,25 +1549,20 @@ out:
 static void ath9k_remove_interface(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
        ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
 
        mutex_lock(&sc->mutex);
 
-       /* Stop ANI */
-       sc->sc_flags &= ~SC_OP_ANI_RUN;
-       del_timer_sync(&common->ani.timer);
+       sc->nvifs--;
 
        /* Reclaim beacon resources */
-       if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
-           (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
-           (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
+       if (ath9k_uses_beacons(vif->type))
                ath9k_reclaim_beacon(sc, vif);
 
-       sc->nvifs--;
+       ath9k_calculate_summary_state(hw, NULL);
 
        mutex_unlock(&sc->mutex);
 }
@@ -1558,12 +1603,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
 
 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &hw->conf;
-       bool disable_radio;
+       bool disable_radio = false;
 
        mutex_lock(&sc->mutex);
 
@@ -1574,29 +1618,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
         * the end.
         */
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
-               bool enable_radio;
-               bool all_wiphys_idle;
-               bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
-               spin_lock_bh(&sc->wiphy_lock);
-               all_wiphys_idle =  ath9k_all_wiphys_idle(sc);
-               ath9k_set_wiphy_idle(aphy, idle);
-
-               enable_radio = (!idle && all_wiphys_idle);
-
-               /*
-                * After we unlock here its possible another wiphy
-                * can be re-renabled so to account for that we will
-                * only disable the radio toward the end of this routine
-                * if by then all wiphys are still idle.
-                */
-               spin_unlock_bh(&sc->wiphy_lock);
-
-               if (enable_radio) {
-                       sc->ps_idle = false;
+               sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+               if (!sc->ps_idle) {
                        ath_radio_enable(sc, hw);
                        ath_dbg(common, ATH_DBG_CONFIG,
                                "not-idle: enabling radio\n");
+               } else {
+                       disable_radio = true;
                }
        }
 
@@ -1637,29 +1665,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                if (ah->curchan)
                        old_pos = ah->curchan - &ah->channels[0];
 
-               aphy->chan_idx = pos;
-               aphy->chan_is_ht = conf_is_ht(conf);
                if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                        sc->sc_flags |= SC_OP_OFFCHANNEL;
                else
                        sc->sc_flags &= ~SC_OP_OFFCHANNEL;
 
-               if (aphy->state == ATH_WIPHY_SCAN ||
-                   aphy->state == ATH_WIPHY_ACTIVE)
-                       ath9k_wiphy_pause_all_forced(sc, aphy);
-               else {
-                       /*
-                        * Do not change operational channel based on a paused
-                        * wiphy changes.
-                        */
-                       goto skip_chan_change;
-               }
-
-               ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
-                       curchan->center_freq);
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Set channel: %d MHz type: %d\n",
+                       curchan->center_freq, conf->channel_type);
 
-               /* XXX: remove me eventualy */
-               ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
+               ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+                                         curchan, conf->channel_type);
 
                /* update survey stats for the old channel before switching */
                spin_lock_irqsave(&common->cc_lock, flags);
@@ -1701,21 +1717,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                        ath_update_survey_nf(sc, old_pos);
        }
 
-skip_chan_change:
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               ath_dbg(common, ATH_DBG_CONFIG,
+                       "Set power: %d\n", conf->power_level);
                sc->config.txpowlimit = 2 * conf->power_level;
                ath9k_ps_wakeup(sc);
-               ath_update_txpow(sc);
+               ath9k_cmn_update_txpow(ah, sc->curtxpow,
+                                      sc->config.txpowlimit, &sc->curtxpow);
                ath9k_ps_restore(sc);
        }
 
-       spin_lock_bh(&sc->wiphy_lock);
-       disable_radio = ath9k_all_wiphys_idle(sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-
        if (disable_radio) {
                ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
-               sc->ps_idle = true;
                ath_radio_disable(sc, hw);
        }
 
@@ -1740,8 +1753,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
                                   unsigned int *total_flags,
                                   u64 multicast)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        u32 rfilt;
 
        changed_flags &= SUPPORTED_FILTERS;
@@ -1761,8 +1773,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
                         struct ieee80211_sta *sta)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        ath_node_attach(sc, sta);
 
@@ -1773,8 +1784,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        ath_node_detach(sc, sta);
 
@@ -1784,8 +1794,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                         const struct ieee80211_tx_queue_params *params)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_txq *txq;
        struct ath9k_tx_queue_info qi;
@@ -1829,8 +1838,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
                         struct ieee80211_sta *sta,
                         struct ieee80211_key_conf *key)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
@@ -1874,8 +1882,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                                   struct ieee80211_bss_conf *bss_conf,
                                   u32 changed)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1904,10 +1912,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        /* Enable transmission of beacons (AP, IBSS, MESH) */
        if ((changed & BSS_CHANGED_BEACON) ||
            ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
-               ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-               error = ath_beacon_alloc(aphy, vif);
+               ath9k_set_beaconing_status(sc, false);
+               error = ath_beacon_alloc(sc, vif);
                if (!error)
                        ath_beacon_config(sc, vif);
+               ath9k_set_beaconing_status(sc, true);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1930,21 +1939,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        /* Disable transmission of beacons */
-       if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
-               ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+           !bss_conf->enable_beacon) {
+               ath9k_set_beaconing_status(sc, false);
+               avp->is_bslot_active = false;
+               ath9k_set_beaconing_status(sc, true);
+       }
 
        if (changed & BSS_CHANGED_BEACON_INT) {
-               sc->beacon_interval = bss_conf->beacon_int;
+               cur_conf->beacon_interval = bss_conf->beacon_int;
                /*
                 * In case of AP mode, the HW TSF has to be reset
                 * when the beacon interval changes.
                 */
                if (vif->type == NL80211_IFTYPE_AP) {
                        sc->sc_flags |= SC_OP_TSF_RESET;
-                       ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
-                       error = ath_beacon_alloc(aphy, vif);
+                       ath9k_set_beaconing_status(sc, false);
+                       error = ath_beacon_alloc(sc, vif);
                        if (!error)
                                ath_beacon_config(sc, vif);
+                       ath9k_set_beaconing_status(sc, true);
                } else {
                        ath_beacon_config(sc, vif);
                }
@@ -1980,9 +1994,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
 
 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
 {
+       struct ath_softc *sc = hw->priv;
        u64 tsf;
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
 
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
@@ -1995,8 +2008,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
 
 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
@@ -2007,8 +2019,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
 
 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        mutex_lock(&sc->mutex);
 
@@ -2023,10 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif,
                              enum ieee80211_ampdu_mlme_action action,
                              struct ieee80211_sta *sta,
-                             u16 tid, u16 *ssn)
+                             u16 tid, u16 *ssn, u8 buf_size)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        int ret = 0;
 
        local_bh_disable();
@@ -2071,8 +2081,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
 static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
                             struct survey_info *survey)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *chan;
@@ -2106,52 +2115,68 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
        return 0;
 }
 
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
 
        mutex_lock(&sc->mutex);
-       if (ath9k_wiphy_scanning(sc)) {
-               /*
-                * There is a race here in mac80211 but fixing it requires
-                * we revisit how we handle the scan complete callback.
-                * After mac80211 fixes we will not have configured hardware
-                * to the home channel nor would we have configured the RX
-                * filter yet.
-                */
-               mutex_unlock(&sc->mutex);
-               return;
-       }
-
-       aphy->state = ATH_WIPHY_SCAN;
-       ath9k_wiphy_pause_all_forced(sc, aphy);
+       ah->coverage_class = coverage_class;
+       ath9k_hw_init_global_settings(ah);
        mutex_unlock(&sc->mutex);
 }
 
-/*
- * XXX: this requires a revisit after the driver
- * scan_complete gets moved to another place/removed in mac80211.
- */
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+#define ATH_FLUSH_TIMEOUT      60 /* ms */
+       struct ath_softc *sc = hw->priv;
+       struct ath_txq *txq = NULL;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       int i, j, npend = 0;
 
        mutex_lock(&sc->mutex);
-       aphy->state = ATH_WIPHY_ACTIVE;
-       mutex_unlock(&sc->mutex);
-}
 
-static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
-{
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
-       struct ath_hw *ah = sc->sc_ah;
+       cancel_delayed_work_sync(&sc->tx_complete_work);
 
-       mutex_lock(&sc->mutex);
-       ah->coverage_class = coverage_class;
-       ath9k_hw_init_global_settings(ah);
+       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+               if (!ATH_TXQ_SETUP(sc, i))
+                       continue;
+               txq = &sc->tx.txq[i];
+
+               if (!drop) {
+                       for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
+                               if (!ath9k_has_pending_frames(sc, txq))
+                                       break;
+                               usleep_range(1000, 2000);
+                       }
+               }
+
+               if (drop || ath9k_has_pending_frames(sc, txq)) {
+                       ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
+                               txq->axq_qnum);
+                       spin_lock_bh(&txq->axq_lock);
+                       txq->txq_flush_inprogress = true;
+                       spin_unlock_bh(&txq->axq_lock);
+
+                       ath9k_ps_wakeup(sc);
+                       ath9k_hw_stoptxdma(ah, txq->axq_qnum);
+                       npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
+                       ath9k_ps_restore(sc);
+                       if (npend)
+                               break;
+
+                       ath_draintxq(sc, txq, false);
+                       txq->txq_flush_inprogress = false;
+               }
+       }
+
+       if (npend) {
+               ath_reset(sc, false);
+               txq->txq_flush_inprogress = false;
+       }
+
+       ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
        mutex_unlock(&sc->mutex);
 }
 
@@ -2174,8 +2199,7 @@ struct ieee80211_ops ath9k_ops = {
        .reset_tsf          = ath9k_reset_tsf,
        .ampdu_action       = ath9k_ampdu_action,
        .get_survey         = ath9k_get_survey,
-       .sw_scan_start      = ath9k_sw_scan_start,
-       .sw_scan_complete   = ath9k_sw_scan_complete,
        .rfkill_poll        = ath9k_rfkill_poll_state,
        .set_coverage_class = ath9k_set_coverage_class,
+       .flush              = ath9k_flush,
 };
index 78ef1f13386fa6b8c38e3674a53fa869b4b8518c..e83128c50f7b60bdf70f0f0eb3e674e8b62f6fe0 100644 (file)
@@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
 static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        void __iomem *mem;
-       struct ath_wiphy *aphy;
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
        u8 csz;
@@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_iomap;
        }
 
-       hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
-                               sizeof(struct ath_softc), &ath9k_ops);
+       hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
        if (!hw) {
                dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
                ret = -ENOMEM;
@@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        SET_IEEE80211_DEV(hw, &pdev->dev);
        pci_set_drvdata(pdev, hw);
 
-       aphy = hw->priv;
-       sc = (struct ath_softc *) (aphy + 1);
-       aphy->sc = sc;
-       aphy->hw = hw;
-       sc->pri_wiphy = aphy;
+       sc = hw->priv;
        sc->hw = hw;
        sc->dev = &pdev->dev;
        sc->mem = mem;
@@ -260,8 +254,7 @@ err_dma:
 static void ath_pci_remove(struct pci_dev *pdev)
 {
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        void __iomem *mem = sc->mem;
 
        if (!is_ath9k_unloaded)
@@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
 
        ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
@@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        u32 val;
 
        /*
@@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device)
        ath9k_ps_restore(sc);
 
        sc->ps_idle = true;
-       ath9k_set_wiphy_idle(aphy, true);
        ath_radio_disable(sc, hw);
 
        return 0;
index e45147820eae4cac44cece134585cc3bcd8f7865..960d717ca7c2458ea25788239c9fb4f25c19d41a 100644 (file)
@@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
 
 static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       return aphy->sc;
+       return hw->priv;
 }
 
 static void ath_rate_free(void *priv)
index b2497b8601e5bb6e4f5411968d2bc5faedac8eb7..cb559e345b865ebb8aba346a72a6dbefe89ac3b0 100644 (file)
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
               (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
 }
 
-static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
-                                            struct ieee80211_hdr *hdr)
-{
-       struct ieee80211_hw *hw = sc->pri_wiphy->hw;
-       int i;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy == NULL)
-                       continue;
-               if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
-                   == 0) {
-                       hw = aphy->hw;
-                       break;
-               }
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-       return hw;
-}
-
 /*
  * Setup and link descriptors.
  *
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
        int error = 0, i;
        u32 size;
 
-
-       common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
-                                    ah->caps.rx_status_len,
-                                    min(common->cachelsz, (u16)64));
-
        ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
                                    ah->caps.rx_status_len);
 
@@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
        sc->sc_flags &= ~SC_OP_RXFLUSH;
        spin_lock_init(&sc->rx.rxbuflock);
 
+       common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
+                            sc->sc_ah->caps.rx_status_len;
+
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                return ath_rx_edma_init(sc, nbufs);
        } else {
-               common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
-                               min(common->cachelsz, (u16)64));
-
                ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
                        common->cachelsz, common->rx_bufsize);
 
@@ -463,8 +437,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
        if (conf_is_ht(&sc->hw->conf))
                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
 
-       if (sc->sec_wiphy || (sc->nvifs > 1) ||
-           (sc->rx.rxfilter & FIF_OTHER_BSS)) {
+       if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
                /* The following may also be needed for other older chips */
                if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
                        rfilt |= ATH9K_RX_FILTER_PROM;
@@ -588,8 +561,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
                return;
 
        mgmt = (struct ieee80211_mgmt *)skb->data;
-       if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+       if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
+               /* TODO:  This doesn't work well if you have stations
+                * associated to two different APs because curbssid
+                * is just the last AP that any of the stations associated
+                * with.
+                */
                return; /* not from our current AP */
+       }
 
        sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
 
@@ -662,37 +641,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
        }
 }
 
-static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
-                                   struct ath_softc *sc, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-
-       /* Send the frame to mac80211 */
-       if (is_multicast_ether_addr(hdr->addr1)) {
-               int i;
-               /*
-                * Deliver broadcast/multicast frames to all suitable
-                * virtual wiphys.
-                */
-               /* TODO: filter based on channel configuration */
-               for (i = 0; i < sc->num_sec_wiphy; i++) {
-                       struct ath_wiphy *aphy = sc->sec_wiphy[i];
-                       struct sk_buff *nskb;
-                       if (aphy == NULL)
-                               continue;
-                       nskb = skb_copy(skb, GFP_ATOMIC);
-                       if (!nskb)
-                               continue;
-                       ieee80211_rx(aphy->hw, nskb);
-               }
-               ieee80211_rx(sc->hw, skb);
-       } else
-               /* Deliver unicast frames based on receiver address */
-               ieee80211_rx(hw, skb);
-}
-
 static bool ath_edma_get_buffers(struct ath_softc *sc,
                                 enum ath9k_rx_qtype qtype)
 {
@@ -862,15 +810,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
        if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
                return false;
 
-       /*
-        * rs_more indicates chained descriptors which can be used
-        * to link buffers together for a sort of scatter-gather
-        * operation.
-        * reject the frame, we don't support scatter-gather yet and
-        * the frame is probably corrupt anyway
-        */
+       /* Only use error bits from the last fragment */
        if (rx_stats->rs_more)
-               return false;
+               return true;
 
        /*
         * The rx_stats->rs_status will not be set until the end of the
@@ -974,7 +916,7 @@ static void ath9k_process_rssi(struct ath_common *common,
                               struct ieee80211_hdr *hdr,
                               struct ath_rx_status *rx_stats)
 {
-       struct ath_wiphy *aphy = hw->priv;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = common->ah;
        int last_rssi;
        __le16 fc;
@@ -984,13 +926,19 @@ static void ath9k_process_rssi(struct ath_common *common,
 
        fc = hdr->frame_control;
        if (!ieee80211_is_beacon(fc) ||
-           compare_ether_addr(hdr->addr3, common->curbssid))
+           compare_ether_addr(hdr->addr3, common->curbssid)) {
+               /* TODO:  This doesn't work well if you have stations
+                * associated to two different APs because curbssid
+                * is just the last AP that any of the stations associated
+                * with.
+                */
                return;
+       }
 
        if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
-               ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
+               ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
 
-       last_rssi = aphy->last_rssi;
+       last_rssi = sc->last_rssi;
        if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
                rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
                                              ATH_RSSI_EP_MULTIPLIER);
@@ -1022,6 +970,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
        if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
                return -EINVAL;
 
+       /* Only use status info from the last fragment */
+       if (rx_stats->rs_more)
+               return 0;
+
        ath9k_process_rssi(common, hw, hdr, rx_stats);
 
        if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1031,7 +983,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
        rx_status->freq = hw->conf.channel->center_freq;
        rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
        rx_status->antenna = rx_stats->rs_antenna;
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        return 0;
 }
@@ -1623,7 +1575,7 @@ div_comb_done:
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
        struct ath_buf *bf;
-       struct sk_buff *skb = NULL, *requeue_skb;
+       struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
        struct ieee80211_rx_status *rxs;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
@@ -1632,7 +1584,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
         * virtual wiphy so to account for that we iterate over the active
         * wiphys and find the appropriate wiphy and therefore hw.
         */
-       struct ieee80211_hw *hw = NULL;
+       struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        int retval;
        bool decrypt_error = false;
@@ -1674,10 +1626,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (!skb)
                        continue;
 
-               hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
-               rxs =  IEEE80211_SKB_RXCB(skb);
+               /*
+                * Take frame header from the first fragment and RX status from
+                * the last one.
+                */
+               if (sc->rx.frag)
+                       hdr_skb = sc->rx.frag;
+               else
+                       hdr_skb = skb;
 
-               hw = ath_get_virt_hw(sc, hdr);
+               hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
+               rxs = IEEE80211_SKB_RXCB(hdr_skb);
 
                ath_debug_stat_rx(sc, &rs);
 
@@ -1686,12 +1645,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                 * chain it back at the queue without processing it.
                 */
                if (flush)
-                       goto requeue;
+                       goto requeue_drop_frag;
 
                retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
                                                 rxs, &decrypt_error);
                if (retval)
-                       goto requeue;
+                       goto requeue_drop_frag;
 
                rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
                if (rs.rs_tstamp > tsf_lower &&
@@ -1711,7 +1670,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                 * skb and put it at the tail of the sc->rx.rxbuf list for
                 * processing. */
                if (!requeue_skb)
-                       goto requeue;
+                       goto requeue_drop_frag;
 
                /* Unmap the frame */
                dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1722,8 +1681,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (ah->caps.rx_status_len)
                        skb_pull(skb, ah->caps.rx_status_len);
 
-               ath9k_rx_skb_postprocess(common, skb, &rs,
-                                        rxs, decrypt_error);
+               if (!rs.rs_more)
+                       ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
+                                                rxs, decrypt_error);
 
                /* We will now give hardware our shiny new allocated skb */
                bf->bf_mpdu = requeue_skb;
@@ -1736,10 +1696,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        bf->bf_mpdu = NULL;
                        bf->bf_buf_addr = 0;
                        ath_err(common, "dma_mapping_error() on RX\n");
-                       ath_rx_send_to_mac80211(hw, sc, skb);
+                       ieee80211_rx(hw, skb);
                        break;
                }
 
+               if (rs.rs_more) {
+                       /*
+                        * rs_more indicates chained descriptors which can be
+                        * used to link buffers together for a sort of
+                        * scatter-gather operation.
+                        */
+                       if (sc->rx.frag) {
+                               /* too many fragments - cannot handle frame */
+                               dev_kfree_skb_any(sc->rx.frag);
+                               dev_kfree_skb_any(skb);
+                               skb = NULL;
+                       }
+                       sc->rx.frag = skb;
+                       goto requeue;
+               }
+
+               if (sc->rx.frag) {
+                       int space = skb->len - skb_tailroom(hdr_skb);
+
+                       sc->rx.frag = NULL;
+
+                       if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
+                               dev_kfree_skb(skb);
+                               goto requeue_drop_frag;
+                       }
+
+                       skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
+                                                 skb->len);
+                       dev_kfree_skb_any(skb);
+                       skb = hdr_skb;
+               }
+
                /*
                 * change the default rx antenna if rx diversity chooses the
                 * other antenna 3 times in a row.
@@ -1763,8 +1755,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
                        ath_ant_comb_scan(sc, &rs);
 
-               ath_rx_send_to_mac80211(hw, sc, skb);
+               ieee80211_rx(hw, skb);
 
+requeue_drop_frag:
+               if (sc->rx.frag) {
+                       dev_kfree_skb_any(sc->rx.frag);
+                       sc->rx.frag = NULL;
+               }
 requeue:
                if (edma) {
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
index 4df5659c6c165a3c77d4ccf037e64298e5f52b8f..8fa8acfde62ee22df8922134501020fa0afb320b 100644 (file)
 #define AR_SREV_REVISION_9300_20       2 /* 2.0 and 2.1 */
 #define AR_SREV_VERSION_9485           0x240
 #define AR_SREV_REVISION_9485_10       0
+#define AR_SREV_REVISION_9485_11        1
 
 #define AR_SREV_5416(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
 #define AR_SREV_9485_10(_ah) \
        (AR_SREV_9485(_ah) && \
         ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
+#define AR_SREV_9485_11(_ah) \
+       (AR_SREV_9485(_ah) && \
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
 
 #define AR_SREV_9285E_20(_ah) \
     (AR_SREV_9285_12_OR_LATER(_ah) && \
 enum ath_usb_dev {
        AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
        AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
+       STORAGE_DEVICE = 3,
 };
 
 #define AR_DEVID_7010(_ah) \
@@ -1083,6 +1088,17 @@ enum {
 #define AR_ENT_OTP               0x40d8
 #define AR_ENT_OTP_CHAIN2_DISABLE               0x00020000
 #define AR_ENT_OTP_MPSD                0x00800000
+#define AR_CH0_BB_DPLL2          0x16184
+#define AR_CH0_BB_DPLL3          0x16188
+#define AR_CH0_DDR_DPLL2         0x16244
+#define AR_CH0_DDR_DPLL3         0x16248
+#define AR_CH0_DPLL2_KD              0x03F80000
+#define AR_CH0_DPLL2_KD_S            19
+#define AR_CH0_DPLL2_KI              0x3C000000
+#define AR_CH0_DPLL2_KI_S            26
+#define AR_CH0_DPLL3_PHASE_SHIFT     0x3F800000
+#define AR_CH0_DPLL3_PHASE_SHIFT_S   23
+#define AR_PHY_CCA_NOM_VAL_2GHZ      -118
 
 #define AR_RTC_9300_PLL_DIV          0x000003ff
 #define AR_RTC_9300_PLL_DIV_S        0
@@ -1129,6 +1145,12 @@ enum {
 #define AR_RTC_PLL_CLKSEL       0x00000300
 #define AR_RTC_PLL_CLKSEL_S     8
 
+#define PLL3 0x16188
+#define PLL3_DO_MEAS_MASK 0x40000000
+#define PLL4 0x1618c
+#define PLL4_MEAS_DONE    0x8
+#define SQSUM_DVC_MASK 0x007ffff8
+
 #define AR_RTC_RESET \
        ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
 #define AR_RTC_RESET_EN                (0x00000001)
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644 (file)
index 2dc7095..0000000
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "ath9k.h"
-
-struct ath9k_vif_iter_data {
-       const u8 *hw_macaddr;
-       u8 mask[ETH_ALEN];
-};
-
-static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath9k_vif_iter_data *iter_data = data;
-       int i;
-
-       for (i = 0; i < ETH_ALEN; i++)
-               iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
-}
-
-void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath9k_vif_iter_data iter_data;
-       int i;
-
-       /*
-        * Use the hardware MAC address as reference, the hardware uses it
-        * together with the BSSID mask when matching addresses.
-        */
-       iter_data.hw_macaddr = common->macaddr;
-       memset(&iter_data.mask, 0xff, ETH_ALEN);
-
-       if (vif)
-               ath9k_vif_iter(&iter_data, vif->addr, vif);
-
-       /* Get list of all active MAC addresses */
-       spin_lock_bh(&sc->wiphy_lock);
-       ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
-                                                  &iter_data);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] == NULL)
-                       continue;
-               ieee80211_iterate_active_interfaces_atomic(
-                       sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
-       ath_hw_setbssidmask(common);
-}
-
-int ath9k_wiphy_add(struct ath_softc *sc)
-{
-       int i, error;
-       struct ath_wiphy *aphy;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ieee80211_hw *hw;
-       u8 addr[ETH_ALEN];
-
-       hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
-       if (hw == NULL)
-               return -ENOMEM;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] == NULL)
-                       break;
-       }
-
-       if (i == sc->num_sec_wiphy) {
-               /* No empty slot available; increase array length */
-               struct ath_wiphy **n;
-               n = krealloc(sc->sec_wiphy,
-                            (sc->num_sec_wiphy + 1) *
-                            sizeof(struct ath_wiphy *),
-                            GFP_ATOMIC);
-               if (n == NULL) {
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       ieee80211_free_hw(hw);
-                       return -ENOMEM;
-               }
-               n[i] = NULL;
-               sc->sec_wiphy = n;
-               sc->num_sec_wiphy++;
-       }
-
-       SET_IEEE80211_DEV(hw, sc->dev);
-
-       aphy = hw->priv;
-       aphy->sc = sc;
-       aphy->hw = hw;
-       sc->sec_wiphy[i] = aphy;
-       aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       memcpy(addr, common->macaddr, ETH_ALEN);
-       addr[0] |= 0x02; /* Locally managed address */
-       /*
-        * XOR virtual wiphy index into the least significant bits to generate
-        * a different MAC address for each virtual wiphy.
-        */
-       addr[5] ^= i & 0xff;
-       addr[4] ^= (i & 0xff00) >> 8;
-       addr[3] ^= (i & 0xff0000) >> 16;
-
-       SET_IEEE80211_PERM_ADDR(hw, addr);
-
-       ath9k_set_hw_capab(sc, hw);
-
-       error = ieee80211_register_hw(hw);
-
-       if (error == 0) {
-               /* Make sure wiphy scheduler is started (if enabled) */
-               ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
-       }
-
-       return error;
-}
-
-int ath9k_wiphy_del(struct ath_wiphy *aphy)
-{
-       struct ath_softc *sc = aphy->sc;
-       int i;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (aphy == sc->sec_wiphy[i]) {
-                       sc->sec_wiphy[i] = NULL;
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       ieee80211_unregister_hw(aphy->hw);
-                       ieee80211_free_hw(aphy->hw);
-                       return 0;
-               }
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-       return -ENOENT;
-}
-
-static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
-                              struct ieee80211_vif *vif, const u8 *bssid,
-                              int ps)
-{
-       struct ath_softc *sc = aphy->sc;
-       struct ath_tx_control txctl;
-       struct sk_buff *skb;
-       struct ieee80211_hdr *hdr;
-       __le16 fc;
-       struct ieee80211_tx_info *info;
-
-       skb = dev_alloc_skb(24);
-       if (skb == NULL)
-               return -ENOMEM;
-       hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
-       memset(hdr, 0, 24);
-       fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
-                        IEEE80211_FCTL_TODS);
-       if (ps)
-               fc |= cpu_to_le16(IEEE80211_FCTL_PM);
-       hdr->frame_control = fc;
-       memcpy(hdr->addr1, bssid, ETH_ALEN);
-       memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
-       memcpy(hdr->addr3, bssid, ETH_ALEN);
-
-       info = IEEE80211_SKB_CB(skb);
-       memset(info, 0, sizeof(*info));
-       info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
-       info->control.vif = vif;
-       info->control.rates[0].idx = 0;
-       info->control.rates[0].count = 4;
-       info->control.rates[1].idx = -1;
-
-       memset(&txctl, 0, sizeof(struct ath_tx_control));
-       txctl.txq = sc->tx.txq_map[WME_AC_VO];
-       txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
-
-       if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
-               goto exit;
-
-       return 0;
-exit:
-       dev_kfree_skb_any(skb);
-       return -1;
-}
-
-static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
-               return true;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
-                       return true;
-       }
-       return false;
-}
-
-static bool ath9k_wiphy_pausing(struct ath_softc *sc)
-{
-       bool ret;
-       spin_lock_bh(&sc->wiphy_lock);
-       ret = __ath9k_wiphy_pausing(sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-       return ret;
-}
-
-static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
-               return true;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
-                       return true;
-       }
-       return false;
-}
-
-bool ath9k_wiphy_scanning(struct ath_softc *sc)
-{
-       bool ret;
-       spin_lock_bh(&sc->wiphy_lock);
-       ret = __ath9k_wiphy_scanning(sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-       return ret;
-}
-
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
-{
-       if (aphy == NULL)
-               return;
-       if (aphy->chan_idx != aphy->sc->chan_idx)
-               return; /* wiphy not on the selected channel */
-       __ath9k_wiphy_unpause(aphy);
-}
-
-static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
-{
-       int i;
-       spin_lock_bh(&sc->wiphy_lock);
-       __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
-       for (i = 0; i < sc->num_sec_wiphy; i++)
-               __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
-       spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_chan_work(struct work_struct *work)
-{
-       struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_wiphy *aphy = sc->next_wiphy;
-
-       if (aphy == NULL)
-               return;
-
-       /*
-        * All pending interfaces paused; ready to change
-        * channels.
-        */
-
-       /* Change channels */
-       mutex_lock(&sc->mutex);
-       /* XXX: remove me eventually */
-       ath9k_update_ichannel(sc, aphy->hw,
-                             &sc->sc_ah->channels[sc->chan_idx]);
-
-       /* sync hw configuration for hw code */
-       common->hw = aphy->hw;
-
-       if (ath_set_channel(sc, aphy->hw,
-                           &sc->sc_ah->channels[sc->chan_idx]) < 0) {
-               printk(KERN_DEBUG "ath9k: Failed to set channel for new "
-                      "virtual wiphy\n");
-               mutex_unlock(&sc->mutex);
-               return;
-       }
-       mutex_unlock(&sc->mutex);
-
-       ath9k_wiphy_unpause_channel(sc);
-}
-
-/*
- * ath9k version of ieee80211_tx_status() for TX frames that are generated
- * internally in the driver.
- */
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
-{
-       struct ath_wiphy *aphy = hw->priv;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-
-       if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
-               if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
-                       printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
-                              "frame\n", wiphy_name(hw->wiphy));
-                       /*
-                        * The AP did not reply; ignore this to allow us to
-                        * continue.
-                        */
-               }
-               aphy->state = ATH_WIPHY_PAUSED;
-               if (!ath9k_wiphy_pausing(aphy->sc)) {
-                       /*
-                        * Drop from tasklet to work to allow mutex for channel
-                        * change.
-                        */
-                       ieee80211_queue_work(aphy->sc->hw,
-                                  &aphy->sc->chan_work);
-               }
-       }
-
-       dev_kfree_skb(skb);
-}
-
-static void ath9k_mark_paused(struct ath_wiphy *aphy)
-{
-       struct ath_softc *sc = aphy->sc;
-       aphy->state = ATH_WIPHY_PAUSED;
-       if (!__ath9k_wiphy_pausing(sc))
-               ieee80211_queue_work(sc->hw, &sc->chan_work);
-}
-
-static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath_wiphy *aphy = data;
-       struct ath_vif *avp = (void *) vif->drv_priv;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               if (!vif->bss_conf.assoc) {
-                       ath9k_mark_paused(aphy);
-                       break;
-               }
-               /* TODO: could avoid this if already in PS mode */
-               if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
-                       printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
-                              __func__);
-                       ath9k_mark_paused(aphy);
-               }
-               break;
-       case NL80211_IFTYPE_AP:
-               /* Beacon transmission is paused by aphy->state change */
-               ath9k_mark_paused(aphy);
-               break;
-       default:
-               break;
-       }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
-       ieee80211_stop_queues(aphy->hw);
-       aphy->state = ATH_WIPHY_PAUSING;
-       /*
-        * TODO: handle PAUSING->PAUSED for the case where there are multiple
-        * active vifs (now we do it on the first vif getting ready; should be
-        * on the last)
-        */
-       ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
-                                                  aphy);
-       return 0;
-}
-
-int ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
-       int ret;
-       spin_lock_bh(&aphy->sc->wiphy_lock);
-       ret = __ath9k_wiphy_pause(aphy);
-       spin_unlock_bh(&aphy->sc->wiphy_lock);
-       return ret;
-}
-
-static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath_wiphy *aphy = data;
-       struct ath_vif *avp = (void *) vif->drv_priv;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               if (!vif->bss_conf.assoc)
-                       break;
-               ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
-               break;
-       case NL80211_IFTYPE_AP:
-               /* Beacon transmission is re-enabled by aphy->state change */
-               break;
-       default:
-               break;
-       }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
-       ieee80211_iterate_active_interfaces_atomic(aphy->hw,
-                                                  ath9k_unpause_iter, aphy);
-       aphy->state = ATH_WIPHY_ACTIVE;
-       ieee80211_wake_queues(aphy->hw);
-       return 0;
-}
-
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
-       int ret;
-       spin_lock_bh(&aphy->sc->wiphy_lock);
-       ret = __ath9k_wiphy_unpause(aphy);
-       spin_unlock_bh(&aphy->sc->wiphy_lock);
-       return ret;
-}
-
-static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
-               sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
-                       sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
-       }
-}
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
-{
-       int i;
-       if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
-               __ath9k_wiphy_pause(sc->pri_wiphy);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
-                       __ath9k_wiphy_pause(sc->sec_wiphy[i]);
-       }
-}
-
-int ath9k_wiphy_select(struct ath_wiphy *aphy)
-{
-       struct ath_softc *sc = aphy->sc;
-       bool now;
-
-       spin_lock_bh(&sc->wiphy_lock);
-       if (__ath9k_wiphy_scanning(sc)) {
-               /*
-                * For now, we are using mac80211 sw scan and it expects to
-                * have full control over channel changes, so avoid wiphy
-                * scheduling during a scan. This could be optimized if the
-                * scanning control were moved into the driver.
-                */
-               spin_unlock_bh(&sc->wiphy_lock);
-               return -EBUSY;
-       }
-       if (__ath9k_wiphy_pausing(sc)) {
-               if (sc->wiphy_select_failures == 0)
-                       sc->wiphy_select_first_fail = jiffies;
-               sc->wiphy_select_failures++;
-               if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
-               {
-                       printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
-                              "out; disable/enable hw to recover\n");
-                       __ath9k_wiphy_mark_all_paused(sc);
-                       /*
-                        * TODO: this workaround to fix hardware is unlikely to
-                        * be specific to virtual wiphy changes. It can happen
-                        * on normal channel change, too, and as such, this
-                        * should really be made more generic. For example,
-                        * tricker radio disable/enable on GTT interrupt burst
-                        * (say, 10 GTT interrupts received without any TX
-                        * frame being completed)
-                        */
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       ath_radio_disable(sc, aphy->hw);
-                       ath_radio_enable(sc, aphy->hw);
-                       /* Only the primary wiphy hw is used for queuing work */
-                       ieee80211_queue_work(aphy->sc->hw,
-                                  &aphy->sc->chan_work);
-                       return -EBUSY; /* previous select still in progress */
-               }
-               spin_unlock_bh(&sc->wiphy_lock);
-               return -EBUSY; /* previous select still in progress */
-       }
-       sc->wiphy_select_failures = 0;
-
-       /* Store the new channel */
-       sc->chan_idx = aphy->chan_idx;
-       sc->chan_is_ht = aphy->chan_is_ht;
-       sc->next_wiphy = aphy;
-
-       __ath9k_wiphy_pause_all(sc);
-       now = !__ath9k_wiphy_pausing(aphy->sc);
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       if (now) {
-               /* Ready to request channel change immediately */
-               ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
-       }
-
-       /*
-        * wiphys will be unpaused in ath9k_tx_status() once channel has been
-        * changed if any wiphy needs time to become paused.
-        */
-
-       return 0;
-}
-
-bool ath9k_wiphy_started(struct ath_softc *sc)
-{
-       int i;
-       spin_lock_bh(&sc->wiphy_lock);
-       if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
-               spin_unlock_bh(&sc->wiphy_lock);
-               return true;
-       }
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
-                       spin_unlock_bh(&sc->wiphy_lock);
-                       return true;
-               }
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-       return false;
-}
-
-static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
-                                  struct ath_wiphy *selected)
-{
-       if (selected->state == ATH_WIPHY_SCAN) {
-               if (aphy == selected)
-                       return;
-               /*
-                * Pause all other wiphys for the duration of the scan even if
-                * they are on the current channel now.
-                */
-       } else if (aphy->chan_idx == selected->chan_idx)
-               return;
-       aphy->state = ATH_WIPHY_PAUSED;
-       ieee80211_stop_queues(aphy->hw);
-}
-
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
-                                 struct ath_wiphy *selected)
-{
-       int i;
-       spin_lock_bh(&sc->wiphy_lock);
-       if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
-               ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               if (sc->sec_wiphy[i] &&
-                   sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
-                       ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_work(struct work_struct *work)
-{
-       struct ath_softc *sc = container_of(work, struct ath_softc,
-                                           wiphy_work.work);
-       struct ath_wiphy *aphy = NULL;
-       bool first = true;
-
-       spin_lock_bh(&sc->wiphy_lock);
-
-       if (sc->wiphy_scheduler_int == 0) {
-               /* wiphy scheduler is disabled */
-               spin_unlock_bh(&sc->wiphy_lock);
-               return;
-       }
-
-try_again:
-       sc->wiphy_scheduler_index++;
-       while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
-               aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
-               if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
-                       break;
-
-               sc->wiphy_scheduler_index++;
-               aphy = NULL;
-       }
-       if (aphy == NULL) {
-               sc->wiphy_scheduler_index = 0;
-               if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
-                       if (first) {
-                               first = false;
-                               goto try_again;
-                       }
-                       /* No wiphy is ready to be scheduled */
-               } else
-                       aphy = sc->pri_wiphy;
-       }
-
-       spin_unlock_bh(&sc->wiphy_lock);
-
-       if (aphy &&
-           aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
-           ath9k_wiphy_select(aphy)) {
-               printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
-                      "change\n");
-       }
-
-       ieee80211_queue_delayed_work(sc->hw,
-                                    &sc->wiphy_work,
-                                    sc->wiphy_scheduler_int);
-}
-
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
-{
-       cancel_delayed_work_sync(&sc->wiphy_work);
-       sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
-       if (sc->wiphy_scheduler_int)
-               ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
-                                            sc->wiphy_scheduler_int);
-}
-
-/* caller must hold wiphy_lock */
-bool ath9k_all_wiphys_idle(struct ath_softc *sc)
-{
-       unsigned int i;
-       if (!sc->pri_wiphy->idle)
-               return false;
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (!aphy)
-                       continue;
-               if (!aphy->idle)
-                       return false;
-       }
-       return true;
-}
-
-/* caller must hold wiphy_lock */
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
-{
-       struct ath_softc *sc = aphy->sc;
-
-       aphy->idle = idle;
-       ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
-               "Marking %s as %sidle\n",
-               wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
-}
-/* Only bother starting a queue on an active virtual wiphy */
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
-{
-       struct ieee80211_hw *hw = sc->pri_wiphy->hw;
-       unsigned int i;
-       bool txq_started = false;
-
-       spin_lock_bh(&sc->wiphy_lock);
-
-       /* Start the primary wiphy */
-       if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
-               ieee80211_wake_queue(hw, skb_queue);
-               txq_started = true;
-               goto unlock;
-       }
-
-       /* Now start the secondary wiphy queues */
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (!aphy)
-                       continue;
-               if (aphy->state != ATH_WIPHY_ACTIVE)
-                       continue;
-
-               hw = aphy->hw;
-               ieee80211_wake_queue(hw, skb_queue);
-               txq_started = true;
-               break;
-       }
-
-unlock:
-       spin_unlock_bh(&sc->wiphy_lock);
-       return txq_started;
-}
-
-/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
-{
-       struct ieee80211_hw *hw = sc->pri_wiphy->hw;
-       unsigned int i;
-
-       spin_lock_bh(&sc->wiphy_lock);
-
-       /* Stop the primary wiphy */
-       ieee80211_stop_queue(hw, skb_queue);
-
-       /* Now stop the secondary wiphy queues */
-       for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (!aphy)
-                       continue;
-               hw = aphy->hw;
-               ieee80211_stop_queue(hw, skb_queue);
-       }
-       spin_unlock_bh(&sc->wiphy_lock);
-}
index dc862f5e1162b7b24f67efaf3e233a1cf8e69c4d..d3d24904f62f89d46ff270914f7946b9f43bdf5e 100644 (file)
@@ -123,12 +123,8 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
 void ath9k_swba_tasklet(unsigned long data)
 {
        struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-
-       ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n");
 
        ath9k_htc_swba(priv, priv->wmi->beacon_pending);
-
 }
 
 void ath9k_fatal_work(struct work_struct *work)
index 07b7804aec5babf15202ce17955d7727852c39e0..e16136d617999a03ea2032febc9c465e489623ee 100644 (file)
@@ -19,7 +19,6 @@
 
 #define BITS_PER_BYTE           8
 #define OFDM_PLCP_BITS          22
-#define HT_RC_2_MCS(_rc)        ((_rc) & 0x1f)
 #define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
 #define L_STF                   8
 #define L_LTF                   8
@@ -32,7 +31,6 @@
 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
 
-#define OFDM_SIFS_TIME             16
 
 static u16 bits_per_symbol[][2] = {
        /* 20MHz 40MHz */
@@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                             struct list_head *head);
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
-                            int nframes, int nbad, int txok, bool update_rc);
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+                            struct ath_tx_status *ts, int nframes, int nbad,
+                            int txok, bool update_rc);
 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
                              int seqno);
 
@@ -169,7 +168,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
                        ath_tx_update_baw(sc, tid, fi->seqno);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
                } else {
-                       ath_tx_send_normal(sc, txq, tid, &bf_head);
+                       ath_tx_send_normal(sc, txq, NULL, &bf_head);
                }
                spin_lock_bh(&txq->axq_lock);
        }
@@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
 
        ATH_TXBUF_RESET(tbf);
 
-       tbf->aphy = bf->aphy;
        tbf->bf_mpdu = bf->bf_mpdu;
        tbf->bf_buf_addr = bf->bf_buf_addr;
        memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        struct ath_node *an = NULL;
        struct sk_buff *skb;
        struct ieee80211_sta *sta;
-       struct ieee80211_hw *hw;
+       struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info;
        struct ath_atx_tid *tid = NULL;
@@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        hdr = (struct ieee80211_hdr *)skb->data;
 
        tx_info = IEEE80211_SKB_CB(skb);
-       hw = bf->aphy->hw;
 
        memcpy(rates, tx_info->control.rates, sizeof(rates));
 
@@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                            !bf->bf_stale || bf_next != NULL)
                                list_move_tail(&bf->list, &bf_head);
 
-                       ath_tx_rc_status(bf, ts, 1, 1, 0, false);
+                       ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
                                0, 0);
 
@@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
        ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
        while (bf) {
-               txfail = txpending = 0;
+               txfail = txpending = sendbar = 0;
                bf_next = bf->bf_next;
 
                skb = bf->bf_mpdu;
@@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
                        if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
                                memcpy(tx_info->control.rates, rates, sizeof(rates));
-                               ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
+                               ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
                                rc_update = false;
                        } else {
-                               ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
+                               ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
                        }
 
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
                                                bf->bf_state.bf_type |=
                                                        BUF_XRETRY;
-                                               ath_tx_rc_status(bf, ts, nframes,
+                                               ath_tx_rc_status(sc, bf, ts, nframes,
                                                                nbad, 0, false);
                                                ath_tx_complete_buf(sc, bf, txq,
                                                                    &bf_head,
@@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
        rcu_read_unlock();
 
-       if (needreset)
+       if (needreset) {
+               spin_unlock_bh(&sc->sc_pcu_lock);
                ath_reset(sc, false);
+               spin_lock_bh(&sc->sc_pcu_lock);
+       }
 }
 
 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
 
        txtid->state |= AGGR_ADDBA_PROGRESS;
        txtid->paused = true;
-       *ssn = txtid->seq_start;
+       *ssn = txtid->seq_start = txtid->seq_next;
+
+       memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
+       txtid->baw_head = txtid->baw_tail = 0;
 
        return 0;
 }
@@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                [WME_AC_VI] = ATH_TXQ_AC_VI,
                [WME_AC_VO] = ATH_TXQ_AC_VO,
        };
-       int qnum, i;
+       int axq_qnum, i;
 
        memset(&qi, 0, sizeof(qi));
        qi.tqi_subtype = subtype_txq_to_hwq[subtype];
@@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                        qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
                                        TXQ_FLAG_TXDESCINT_ENABLE;
        }
-       qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
-       if (qnum == -1) {
+       axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+       if (axq_qnum == -1) {
                /*
                 * NB: don't print a message, this happens
                 * normally on parts with too few tx queues
                 */
                return NULL;
        }
-       if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
+       if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
                ath_err(common, "qnum %u out of range, max %zu!\n",
-                       qnum, ARRAY_SIZE(sc->tx.txq));
-               ath9k_hw_releasetxqueue(ah, qnum);
+                       axq_qnum, ARRAY_SIZE(sc->tx.txq));
+               ath9k_hw_releasetxqueue(ah, axq_qnum);
                return NULL;
        }
-       if (!ATH_TXQ_SETUP(sc, qnum)) {
-               struct ath_txq *txq = &sc->tx.txq[qnum];
+       if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
+               struct ath_txq *txq = &sc->tx.txq[axq_qnum];
 
-               txq->axq_qnum = qnum;
+               txq->axq_qnum = axq_qnum;
+               txq->mac80211_qnum = -1;
                txq->axq_link = NULL;
                INIT_LIST_HEAD(&txq->axq_q);
                INIT_LIST_HEAD(&txq->axq_acq);
@@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                txq->axq_depth = 0;
                txq->axq_ampdu_depth = 0;
                txq->axq_tx_inprogress = false;
-               sc->tx.txqsetup |= 1<<qnum;
+               sc->tx.txqsetup |= 1<<axq_qnum;
 
                txq->txq_headidx = txq->txq_tailidx = 0;
                for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
                        INIT_LIST_HEAD(&txq->txq_fifo[i]);
                INIT_LIST_HEAD(&txq->txq_fifo_pending);
        }
-       return &sc->tx.txq[qnum];
+       return &sc->tx.txq[axq_qnum];
 }
 
 int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -1051,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 int ath_cabq_update(struct ath_softc *sc)
 {
        struct ath9k_tx_queue_info qi;
+       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        int qnum = sc->beacon.cabq->axq_qnum;
 
        ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1062,7 +1067,7 @@ int ath_cabq_update(struct ath_softc *sc)
        else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
                sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
 
-       qi.tqi_readyTime = (sc->beacon_interval *
+       qi.tqi_readyTime = (cur_conf->beacon_interval *
                            sc->config.cabqReadytime) / 100;
        ath_txq_update(sc, qnum, &qi);
 
@@ -1205,8 +1210,17 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
                ath_err(common, "Failed to stop TX DMA!\n");
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (ATH_TXQ_SETUP(sc, i))
-                       ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
+               if (!ATH_TXQ_SETUP(sc, i))
+                       continue;
+
+               /*
+                * The caller will resume queues with ieee80211_wake_queues.
+                * Mark the queue as not stopped to prevent ath_tx_complete
+                * from waking the queue too early.
+                */
+               txq = &sc->tx.txq[i];
+               txq->stopped = false;
+               ath_draintxq(sc, txq, retry_tx);
        }
 
        return !npend;
@@ -1218,46 +1232,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
        sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
 }
 
+/* For each axq_acq entry, for each tid, try to schedule packets
+ * for transmit until ampdu_depth has reached min Q depth.
+ */
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
-       struct ath_atx_ac *ac;
-       struct ath_atx_tid *tid;
+       struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+       struct ath_atx_tid *tid, *last_tid;
 
-       if (list_empty(&txq->axq_acq))
+       if (list_empty(&txq->axq_acq) ||
+           txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
                return;
 
        ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
-       list_del(&ac->list);
-       ac->sched = false;
+       last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
 
-       do {
-               if (list_empty(&ac->tid_q))
-                       return;
+       list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+               last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
+               list_del(&ac->list);
+               ac->sched = false;
 
-               tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
-               list_del(&tid->list);
-               tid->sched = false;
+               while (!list_empty(&ac->tid_q)) {
+                       tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+                                              list);
+                       list_del(&tid->list);
+                       tid->sched = false;
 
-               if (tid->paused)
-                       continue;
+                       if (tid->paused)
+                               continue;
 
-               ath_tx_sched_aggr(sc, txq, tid);
+                       ath_tx_sched_aggr(sc, txq, tid);
 
-               /*
-                * add tid to round-robin queue if more frames
-                * are pending for the tid
-                */
-               if (!list_empty(&tid->buf_q))
-                       ath_tx_queue_tid(txq, tid);
+                       /*
+                        * add tid to round-robin queue if more frames
+                        * are pending for the tid
+                        */
+                       if (!list_empty(&tid->buf_q))
+                               ath_tx_queue_tid(txq, tid);
 
-               break;
-       } while (!list_empty(&ac->tid_q));
+                       if (tid == last_tid ||
+                           txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+                               break;
+               }
 
-       if (!list_empty(&ac->tid_q)) {
-               if (!ac->sched) {
-                       ac->sched = true;
-                       list_add_tail(&ac->list, &txq->axq_acq);
+               if (!list_empty(&ac->tid_q)) {
+                       if (!ac->sched) {
+                               ac->sched = true;
+                               list_add_tail(&ac->list, &txq->axq_acq);
+                       }
                }
+
+               if (ac == last_ac ||
+                   txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+                       return;
        }
 }
 
@@ -1301,6 +1328,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
                list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
                INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
+               TX_STAT_INC(txq->axq_qnum, puttxbuf);
                ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
                        txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
@@ -1308,6 +1336,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                list_splice_tail_init(head, &txq->axq_q);
 
                if (txq->axq_link == NULL) {
+                       TX_STAT_INC(txq->axq_qnum, puttxbuf);
                        ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                        ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
                                txq->axq_qnum, ito64(bf->bf_daddr),
@@ -1321,6 +1350,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                }
                ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
                                       &txq->axq_link);
+               TX_STAT_INC(txq->axq_qnum, txstart);
                ath9k_hw_txstart(ah, txq->axq_qnum);
        }
        txq->axq_depth++;
@@ -1335,7 +1365,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
        struct list_head bf_head;
 
        bf->bf_state.bf_type |= BUF_AMPDU;
-       TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
 
        /*
         * Do not queue to h/w when any of the following conditions is true:
@@ -1351,6 +1380,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
                 * Add this frame to software queue for scheduling later
                 * for aggregation.
                 */
+               TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
                list_add_tail(&bf->list, &tid->buf_q);
                ath_tx_queue_tid(txctl->txq, tid);
                return;
@@ -1364,6 +1394,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
                ath_tx_addto_baw(sc, tid, fi->seqno);
 
        /* Queue to h/w without aggregation */
+       TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
        bf->bf_lastbf = bf;
        ath_buf_set_rate(sc, bf, fi->framelen);
        ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@ -1416,8 +1447,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
 static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
                             int framelen)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@ -1635,8 +1665,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
                                           struct ath_txq *txq,
                                           struct sk_buff *skb)
 {
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_frame_info *fi = get_frame_info(skb);
@@ -1652,7 +1681,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
 
        ATH_TXBUF_RESET(bf);
 
-       bf->aphy = aphy;
        bf->bf_flags = setup_tx_flags(skb);
        bf->bf_mpdu = skb;
 
@@ -1741,8 +1769,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = info->control.sta;
-       struct ath_wiphy *aphy = hw->priv;
-       struct ath_softc *sc = aphy->sc;
+       struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
        struct ath_buf *bf;
        int padpos, padsize;
@@ -1794,7 +1821,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        spin_lock_bh(&txq->axq_lock);
        if (txq == sc->tx.txq_map[q] &&
            ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
-               ath_mac80211_stop_queue(sc, q);
+               ieee80211_stop_queue(sc->hw, q);
                txq->stopped = 1;
        }
        spin_unlock_bh(&txq->axq_lock);
@@ -1809,8 +1836,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 /*****************/
 
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
-                           struct ath_wiphy *aphy, int tx_flags, int ftype,
-                           struct ath_txq *txq)
+                           int tx_flags, int ftype, struct ath_txq *txq)
 {
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1820,9 +1846,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
        ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
 
-       if (aphy)
-               hw = aphy->hw;
-
        if (tx_flags & ATH_TX_BAR)
                tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
@@ -1852,19 +1875,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                                        PS_WAIT_FOR_TX_ACK));
        }
 
-       if (unlikely(ftype))
-               ath9k_tx_status(hw, skb, ftype);
-       else {
-               q = skb_get_queue_mapping(skb);
-               if (txq == sc->tx.txq_map[q]) {
-                       spin_lock_bh(&txq->axq_lock);
-                       if (WARN_ON(--txq->pending_frames < 0))
-                               txq->pending_frames = 0;
-                       spin_unlock_bh(&txq->axq_lock);
-               }
+       q = skb_get_queue_mapping(skb);
+       if (txq == sc->tx.txq_map[q]) {
+               spin_lock_bh(&txq->axq_lock);
+               if (WARN_ON(--txq->pending_frames < 0))
+                       txq->pending_frames = 0;
 
-               ieee80211_tx_status(hw, skb);
+               if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+                       ieee80211_wake_queue(sc->hw, q);
+                       txq->stopped = 0;
+               }
+               spin_unlock_bh(&txq->axq_lock);
        }
+
+       ieee80211_tx_status(hw, skb);
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1896,8 +1920,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                else
                        complete(&sc->paprd_complete);
        } else {
-               ath_debug_stat_tx(sc, bf, ts);
-               ath_tx_complete(sc, skb, bf->aphy, tx_flags,
+               ath_debug_stat_tx(sc, bf, ts, txq);
+               ath_tx_complete(sc, skb, tx_flags,
                                bf->bf_state.bfs_ftype, txq);
        }
        /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -1913,14 +1937,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
        spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
 }
 
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
-                            int nframes, int nbad, int txok, bool update_rc)
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+                            struct ath_tx_status *ts, int nframes, int nbad,
+                            int txok, bool update_rc)
 {
        struct sk_buff *skb = bf->bf_mpdu;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hw *hw = bf->aphy->hw;
-       struct ath_softc *sc = bf->aphy->sc;
+       struct ieee80211_hw *hw = sc->hw;
        struct ath_hw *ah = sc->sc_ah;
        u8 i, tx_rateindex;
 
@@ -1971,19 +1995,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
        tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
 }
 
-static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
-{
-       struct ath_txq *txq;
-
-       txq = sc->tx.txq_map[qnum];
-       spin_lock_bh(&txq->axq_lock);
-       if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
-               if (ath_mac80211_start_queue(sc, qnum))
-                       txq->stopped = 0;
-       }
-       spin_unlock_bh(&txq->axq_lock);
-}
-
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -1994,7 +2005,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
        struct ath_tx_status ts;
        int txok;
        int status;
-       int qnum;
 
        ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
                txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2004,6 +2014,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                spin_lock_bh(&txq->axq_lock);
                if (list_empty(&txq->axq_q)) {
                        txq->axq_link = NULL;
+                       if (sc->sc_flags & SC_OP_TXAGGR &&
+                           !txq->txq_flush_inprogress)
+                               ath_txq_schedule(sc, txq);
                        spin_unlock_bh(&txq->axq_lock);
                        break;
                }
@@ -2038,6 +2051,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                        spin_unlock_bh(&txq->axq_lock);
                        break;
                }
+               TX_STAT_INC(txq->axq_qnum, txprocdesc);
 
                /*
                 * Remove ath_buf's of the same transmit unit from txq,
@@ -2058,6 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 
                if (bf_is_ampdu_not_probing(bf))
                        txq->axq_ampdu_depth--;
+
                spin_unlock_bh(&txq->axq_lock);
 
                if (bf_held)
@@ -2070,27 +2085,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                         */
                        if (ts.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
-                       ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
+                       ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
                }
 
-               qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
                                             true);
                else
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
 
-               if (txq == sc->tx.txq_map[qnum])
-                       ath_wake_mac80211_queue(sc, qnum);
-
                spin_lock_bh(&txq->axq_lock);
-               if (sc->sc_flags & SC_OP_TXAGGR)
+
+               if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
                        ath_txq_schedule(sc, txq);
                spin_unlock_bh(&txq->axq_lock);
        }
 }
 
+static void ath_hw_pll_work(struct work_struct *work)
+{
+       struct ath_softc *sc = container_of(work, struct ath_softc,
+                                           hw_pll_work.work);
+       static int count;
+
+       if (AR_SREV_9485(sc->sc_ah)) {
+               if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
+                       count++;
+
+                       if (count == 3) {
+                               /* Rx is hung for more than 500ms. Reset it */
+                               ath_reset(sc, true);
+                               count = 0;
+                       }
+               } else
+                       count = 0;
+
+               ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
+       }
+}
+
 static void ath_tx_complete_poll_work(struct work_struct *work)
 {
        struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2098,6 +2131,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
        struct ath_txq *txq;
        int i;
        bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+       sc->tx_complete_poll_work_seen++;
+#endif
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
                if (ATH_TXQ_SETUP(sc, i)) {
@@ -2111,6 +2147,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
                                } else {
                                        txq->axq_tx_inprogress = true;
                                }
+                       } else {
+                               /* If the queue has pending buffers, then it
+                                * should be doing tx work (and have axq_depth).
+                                * Shouldn't get to this state I think..but
+                                * we do.
+                                */
+                               if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
+                                   (txq->pending_frames > 0 ||
+                                    !list_empty(&txq->axq_acq) ||
+                                    txq->stopped)) {
+                                       ath_err(ath9k_hw_common(sc->sc_ah),
+                                               "txq: %p axq_qnum: %u,"
+                                               " mac80211_qnum: %i"
+                                               " axq_link: %p"
+                                               " pending frames: %i"
+                                               " axq_acq empty: %i"
+                                               " stopped: %i"
+                                               " axq_depth: 0  Attempting to"
+                                               " restart tx logic.\n",
+                                               txq, txq->axq_qnum,
+                                               txq->mac80211_qnum,
+                                               txq->axq_link,
+                                               txq->pending_frames,
+                                               list_empty(&txq->axq_acq),
+                                               txq->stopped);
+                                       ath_txq_schedule(sc, txq);
+                               }
                        }
                        spin_unlock_bh(&txq->axq_lock);
                }
@@ -2150,7 +2213,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
        struct list_head bf_head;
        int status;
        int txok;
-       int qnum;
 
        for (;;) {
                status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2193,11 +2255,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                if (!bf_isampdu(bf)) {
                        if (txs.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
-                       ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
+                       ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
                }
 
-               qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
                                             txok, true);
@@ -2205,19 +2265,20 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                        ath_tx_complete_buf(sc, bf, txq, &bf_head,
                                            &txs, txok, 0);
 
-               if (txq == sc->tx.txq_map[qnum])
-                       ath_wake_mac80211_queue(sc, qnum);
-
                spin_lock_bh(&txq->axq_lock);
-               if (!list_empty(&txq->txq_fifo_pending)) {
-                       INIT_LIST_HEAD(&bf_head);
-                       bf = list_first_entry(&txq->txq_fifo_pending,
-                               struct ath_buf, list);
-                       list_cut_position(&bf_head, &txq->txq_fifo_pending,
-                               &bf->bf_lastbf->list);
-                       ath_tx_txqaddbuf(sc, txq, &bf_head);
-               } else if (sc->sc_flags & SC_OP_TXAGGR)
-                       ath_txq_schedule(sc, txq);
+
+               if (!txq->txq_flush_inprogress) {
+                       if (!list_empty(&txq->txq_fifo_pending)) {
+                               INIT_LIST_HEAD(&bf_head);
+                               bf = list_first_entry(&txq->txq_fifo_pending,
+                                                     struct ath_buf, list);
+                               list_cut_position(&bf_head,
+                                                 &txq->txq_fifo_pending,
+                                                 &bf->bf_lastbf->list);
+                               ath_tx_txqaddbuf(sc, txq, &bf_head);
+                       } else if (sc->sc_flags & SC_OP_TXAGGR)
+                               ath_txq_schedule(sc, txq);
+               }
                spin_unlock_bh(&txq->axq_lock);
        }
 }
@@ -2285,6 +2346,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
        }
 
        INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+       INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                error = ath_tx_edma_init(sc);
index d07ff7f2fd92d653103538b86353c6c986f9aca9..c6a5fae634a0f14718ff60e212ad20c123a58ca0 100644 (file)
@@ -283,6 +283,7 @@ struct ar9170 {
                unsigned int mem_blocks;
                unsigned int mem_block_size;
                unsigned int rx_size;
+               unsigned int tx_seq_table;
        } fw;
 
        /* reset / stuck frames/queue detection */
@@ -533,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 
 /* TX */
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 void carl9170_tx_janitor(struct work_struct *work);
 void carl9170_tx_process_status(struct ar9170 *ar,
                                const struct carl9170_rsp *cmd);
index 546b4e4ec5ea090aaf6ed0ce7b77ed024e3c8fd6..9517ede9e2dfdbb104042c3f520e9cb36a18c778 100644 (file)
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
        const struct carl9170fw_otus_desc *otus_desc;
        const struct carl9170fw_chk_desc *chk_desc;
        const struct carl9170fw_last_desc *last_desc;
+       const struct carl9170fw_txsq_desc *txsq_desc;
 
        last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
                sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                        FIF_PROMISC_IN_BSS;
        }
 
+       if (SUPP(CARL9170FW_WOL))
+               device_set_wakeup_enable(&ar->udev->dev, true);
+
        ar->fw.vif_num = otus_desc->vif_num;
        ar->fw.cmd_bufs = otus_desc->cmd_bufs;
        ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                }
        }
 
+       txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
+               sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
+
+       if (txsq_desc) {
+               ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+               if (!valid_cpu_addr(ar->fw.tx_seq_table))
+                       return -EINVAL;
+       } else {
+               ar->fw.tx_seq_table = 0;
+       }
+
 #undef SUPPORTED
        return 0;
 }
index 3680dfc70f4659179977620a753dcfac05e987ce..30449d21b7624e9a356005830cea2cc80a60e396 100644 (file)
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
 #define CARL9170_RX_FILTER_CTL_BACKR   0x20
 #define CARL9170_RX_FILTER_MGMT                0x40
 #define CARL9170_RX_FILTER_DATA                0x80
+#define CARL9170_RX_FILTER_EVERYTHING  (~0)
 
 struct carl9170_bcn_ctrl_cmd {
        __le32          vif_id;
index 71f3821f60581d721bd203cea46ed31245e26679..921066822dd5a77529b9834fd958b1d7c7da3dbc 100644 (file)
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
        /* Firmware RX filter | CARL9170_CMD_RX_FILTER */
        CARL9170FW_RX_FILTER,
 
+       /* Wake up on WLAN */
+       CARL9170FW_WOL,
+
        /* KEEP LAST */
        __CARL9170FW_FEATURE_NUM
 };
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
 #define FIX_MAGIC      "FIX\0"
 #define DBG_MAGIC      "DBG\0"
 #define CHK_MAGIC      "CHK\0"
+#define TXSQ_MAGIC     "TXSQ"
 #define LAST_MAGIC     "LAST"
 
 #define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
 #define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
 #define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
 
+#define CARL9170FW_MAGIC_SIZE                  4
+
 struct carl9170fw_desc_head {
-       u8      magic[4];
+       u8      magic[CARL9170FW_MAGIC_SIZE];
        __le16 length;
        u8 min_ver;
        u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
 #define CARL9170FW_CHK_DESC_SIZE                       \
        (sizeof(struct carl9170fw_chk_desc))
 
+#define CARL9170FW_TXSQ_DESC_MIN_VER                   1
+#define CARL9170FW_TXSQ_DESC_CUR_VER                   1
+struct carl9170fw_txsq_desc {
+       struct carl9170fw_desc_head head;
+
+       __le32 seq_table_addr;
+} __packed;
+#define CARL9170FW_TXSQ_DESC_SIZE                      \
+       (sizeof(struct carl9170fw_txsq_desc))
+
 #define CARL9170FW_LAST_DESC_MIN_VER                   1
 #define CARL9170FW_LAST_DESC_CUR_VER                   2
 struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
        }
 
 static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
-                                        u8 magic[4], __le16 length,
-                                        u8 min_ver, u8 cur_ver)
+                                        u8 magic[CARL9170FW_MAGIC_SIZE],
+                                        __le16 length, u8 min_ver, u8 cur_ver)
 {
        head->magic[0] = magic[0];
        head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
 
 #define carl9170fw_for_each_hdr(desc, fw_desc)                         \
        for (desc = fw_desc;                                            \
-            memcmp(desc->magic, LAST_MAGIC, 4) &&                      \
+            memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) &&  \
             le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE &&  \
             le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH;    \
             desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
 }
 
 static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
-                                      const u8 descid[4], u16 min_len,
-                                      u8 compatible_revision)
+                                      const u8 descid[CARL9170FW_MAGIC_SIZE],
+                                      u16 min_len, u8 compatible_revision)
 {
        if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
            descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
index e85df6edfed32ed6d58888b7fb18476aab9d40cd..4e30762dd903de04a704789ac823e53f28311f1d 100644 (file)
 
 #define        AR9170_PWR_REG_CHIP_REVISION            (AR9170_PWR_REG_BASE + 0x010)
 #define AR9170_PWR_REG_PLL_ADDAC               (AR9170_PWR_REG_BASE + 0x014)
+#define                AR9170_PWR_PLL_ADDAC_DIV_S              2
+#define                AR9170_PWR_PLL_ADDAC_DIV                0xffc
 #define        AR9170_PWR_REG_WATCH_DOG_MAGIC          (AR9170_PWR_REG_BASE + 0x020)
 
 /* Faraday USB Controller */
 #define        AR9170_USB_REG_MAIN_CTRL                (AR9170_USB_REG_BASE + 0x000)
 #define                AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP      BIT(0)
 #define                AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT  BIT(2)
+#define                AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND      BIT(3)
+#define                AR9170_USB_MAIN_CTRL_RESET              BIT(4)
+#define                AR9170_USB_MAIN_CTRL_CHIP_ENABLE        BIT(5)
 #define                AR9170_USB_MAIN_CTRL_HIGHSPEED          BIT(6)
 
 #define        AR9170_USB_REG_DEVICE_ADDRESS           (AR9170_USB_REG_BASE + 0x001)
 #define        AR9170_USB_REG_INTR_GROUP               (AR9170_USB_REG_BASE + 0x020)
 
 #define        AR9170_USB_REG_INTR_SOURCE_0            (AR9170_USB_REG_BASE + 0x021)
+#define                AR9170_USB_INTR_SRC0_SETUP              BIT(0)
+#define                AR9170_USB_INTR_SRC0_IN                 BIT(1)
+#define                AR9170_USB_INTR_SRC0_OUT                BIT(2)
+#define                AR9170_USB_INTR_SRC0_FAIL               BIT(3) /* ??? */
+#define                AR9170_USB_INTR_SRC0_END                BIT(4) /* ??? */
+#define                AR9170_USB_INTR_SRC0_ABORT              BIT(7)
+
 #define        AR9170_USB_REG_INTR_SOURCE_1            (AR9170_USB_REG_BASE + 0x022)
 #define        AR9170_USB_REG_INTR_SOURCE_2            (AR9170_USB_REG_BASE + 0x023)
 #define        AR9170_USB_REG_INTR_SOURCE_3            (AR9170_USB_REG_BASE + 0x024)
 #define        AR9170_USB_REG_INTR_SOURCE_5            (AR9170_USB_REG_BASE + 0x026)
 #define        AR9170_USB_REG_INTR_SOURCE_6            (AR9170_USB_REG_BASE + 0x027)
 #define        AR9170_USB_REG_INTR_SOURCE_7            (AR9170_USB_REG_BASE + 0x028)
+#define                AR9170_USB_INTR_SRC7_USB_RESET          BIT(1)
+#define                AR9170_USB_INTR_SRC7_USB_SUSPEND        BIT(2)
+#define                AR9170_USB_INTR_SRC7_USB_RESUME         BIT(3)
+#define                AR9170_USB_INTR_SRC7_ISO_SEQ_ERR        BIT(4)
+#define                AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT      BIT(5)
+#define                AR9170_USB_INTR_SRC7_TX0BYTE            BIT(6)
+#define                AR9170_USB_INTR_SRC7_RX0BYTE            BIT(7)
+
+#define        AR9170_USB_REG_IDLE_COUNT               (AR9170_USB_REG_BASE + 0x02f)
 
 #define        AR9170_USB_REG_EP_MAP                   (AR9170_USB_REG_BASE + 0x030)
 #define        AR9170_USB_REG_EP1_MAP                  (AR9170_USB_REG_BASE + 0x030)
 
 #define        AR9170_USB_REG_MAX_AGG_UPLOAD           (AR9170_USB_REG_BASE + 0x110)
 #define        AR9170_USB_REG_UPLOAD_TIME_CTL          (AR9170_USB_REG_BASE + 0x114)
+
+#define AR9170_USB_REG_WAKE_UP                 (AR9170_USB_REG_BASE + 0x120)
+#define                AR9170_USB_WAKE_UP_WAKE                 BIT(0)
+
 #define        AR9170_USB_REG_CBUS_CTRL                (AR9170_USB_REG_BASE + 0x1f0)
 #define                AR9170_USB_CBUS_CTRL_BUFFER_END         (BIT(1))
 
index 870df8c42622bab94db2aab3da880ad61381bb16..ede3d7e5a048d3a356cc56890318ea391df81241 100644 (file)
@@ -662,6 +662,13 @@ init:
                        goto unlock;
        }
 
+       if (ar->fw.tx_seq_table) {
+               err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
+                                        0);
+               if (err)
+                       goto unlock;
+       }
+
 unlock:
        if (err && (vif_id >= 0)) {
                vif_priv->active = false;
@@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
                                    enum ieee80211_ampdu_mlme_action action,
                                    struct ieee80211_sta *sta,
-                                   u16 tid, u16 *ssn)
+                                   u16 tid, u16 *ssn, u8 buf_size)
 {
        struct ar9170 *ar = hw->priv;
        struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
index 6cc58e052d101727c00e084667ba4527d7d74ef1..0ef70b6fc512b672d97360e0b48bab40796a2794 100644 (file)
@@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
        if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
                txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
 
+       if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+               txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
+
        if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
                txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
 
@@ -1336,7 +1339,7 @@ err_unlock_rcu:
        return false;
 }
 
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
@@ -1370,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        carl9170_tx(ar);
-       return NETDEV_TX_OK;
+       return;
 
 err_free:
        ar->tx_dropped++;
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 void carl9170_tx_scheduler(struct ar9170 *ar)
index ee0f84f2a2f6e791b64ac627e1934597d19b2d53..15095c035169c71c2f0b0576329d50e45be3e004 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __CARL9170_SHARED_VERSION_H
 #define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 10
-#define CARL9170FW_VERSION_MONTH 10
-#define CARL9170FW_VERSION_DAY 29
-#define CARL9170FW_VERSION_GIT "1.9.0"
+#define CARL9170FW_VERSION_YEAR 11
+#define CARL9170FW_VERSION_MONTH 1
+#define CARL9170FW_VERSION_DAY 22
+#define CARL9170FW_VERSION_GIT "1.9.2"
 #endif /* __CARL9170_SHARED_VERSION_H */
index 24d63b583b6b08459d657055d70836c621106177..9e1324b67e08e19cc0bea538b74ed9ab0b5fb924 100644 (file)
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
        u8 ampdu_commit_factor:1;
        u8 ampdu_unused_bit:1;
        u8 queue:2;
-       u8 reserved:1;
+       u8 assign_seq:1;
        u8 vif_id:3;
        u8 fill_in_tsf:1;
        u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
 
 #define CARL9170_TX_SUPER_MISC_QUEUE                   0x3
 #define CARL9170_TX_SUPER_MISC_QUEUE_S                 0
+#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ              0x4
 #define        CARL9170_TX_SUPER_MISC_VIF_ID                   0x38
 #define        CARL9170_TX_SUPER_MISC_VIF_ID_S                 3
 #define        CARL9170_TX_SUPER_MISC_FILL_IN_TSF              0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
        __AR9170_NUM_TXQ,
 };
 
+/*
+ * This is an workaround for several undocumented bugs.
+ * Don't mess with the QoS/AC <-> HW Queue map, if you don't
+ * know what you are doing.
+ *
+ * Known problems [hardware]:
+ *  * The MAC does not aggregate frames on anything other
+ *    than the first HW queue.
+ *  * when an AMPDU is placed [in the first hw queue] and
+ *    additional frames are already queued on a different
+ *    hw queue, the MAC will ALWAYS freeze.
+ *
+ * In a nutshell: The hardware can either do QoS or
+ * Aggregation but not both at the same time. As a
+ * result, this makes the device pretty much useless
+ * for any serious 802.11n setup.
+ */
 static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
 
 #define        AR9170_TXQ_DEPTH                        32
index 5d465e5fcf24560850eba244f3d266cbbda0c8da..37b8e115375ad84f2137afa28734faba4220637a 100644 (file)
@@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
                REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
                REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
                REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
-               if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)
+               if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
                        REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+                       REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+                                 AR_KEYTABLE_TYPE_CLR);
+               }
 
        }
 
index 2b14775e6bc63adc44e5a46faa790123f97271bf..f828f294ba89f90efc966db7ec922739b8767980 100644 (file)
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
        }
 }
 
+bool ath_is_49ghz_allowed(u16 regdomain)
+{
+       /* possibly more */
+       return regdomain == MKK9_MKKC;
+}
+EXPORT_SYMBOL(ath_is_49ghz_allowed);
+
 /* Frequency is one where radar detection is required */
 static bool ath_is_radar_freq(u16 center_freq)
 {
index 345dd9721b415972d82d8309ce04aeec6c6bbc37..172f63f671cff13ba1ec728b933caddeff41d6b9 100644 (file)
@@ -250,6 +250,7 @@ enum CountryCode {
 };
 
 bool ath_is_world_regd(struct ath_regulatory *reg);
+bool ath_is_49ghz_allowed(u16 redomain);
 int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
                  int (*reg_notifier)(struct wiphy *wiphy,
                  struct regulatory_request *request));
index 22bc9f17f634dff29054bd38ad2d99e684f078ca..57eb5b6497308d7cbb6b77816915ab15132ac8d6 100644 (file)
@@ -3203,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work)
        mutex_unlock(&wl->mutex);
 }
 
-static int b43_op_tx(struct ieee80211_hw *hw,
+static void b43_op_tx(struct ieee80211_hw *hw,
                     struct sk_buff *skb)
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3211,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
        if (unlikely(skb->len < 2 + 2 + 6)) {
                /* Too short, this can't be a valid frame. */
                dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
+               return;
        }
        B43_WARN_ON(skb_shinfo(skb)->nr_frags);
 
        skb_queue_tail(&wl->tx_queue, skb);
        ieee80211_queue_work(wl->hw, &wl->tx_work);
-
-       return NETDEV_TX_OK;
 }
 
 static void b43_qos_params_upload(struct b43_wldev *dev,
index ab81ed8b19d7e9030327123b5417be4d6ae10f9c..9f5a3c99323900ddc0b35de9d45b7d773fbe95fe 100644 (file)
@@ -430,9 +430,9 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
        bool workaround = false;
 
        if (sprom->revision < 4)
-               workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
-                               binfo->type != 0x46D ||
-                               binfo->rev < 0x41);
+               workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
+                               binfo->type == 0x46D &&
+                               binfo->rev >= 0x41);
        else
                workaround =
                        !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -1281,17 +1281,17 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
                                                B43_NPHY_TABLE_DATALO, tmp);
                                }
                        }
+               }
 
-                       b43_nphy_set_rf_sequence(dev, 5,
-                                       rfseq_events, rfseq_delays, 3);
-                       b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
-                               ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
-                               0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+               b43_nphy_set_rf_sequence(dev, 5,
+                               rfseq_events, rfseq_delays, 3);
+               b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+                       ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+                       0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
 
-                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
-                               b43_phy_maskset(dev, B43_PHY_N(0xC5D),
-                                               0xFF80, 4);
-               }
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                       b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+                                       0xFF80, 4);
        }
 }
 
@@ -2128,7 +2128,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
                save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
                save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
                save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
-       } else if (dev->phy.rev == 2) {
+       } else {
                save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
                save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
                save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
@@ -2179,7 +2179,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
                b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
                b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
                b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
-       } else if (dev->phy.rev == 2) {
+       } else {
                b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
                b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
                b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
index dc8ef09a85529d846dd2407fc5a70e40ecbbf789..c42b2acea24e7450bdc99eff360e9e355678b023 100644 (file)
@@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = {
        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 };
 
+/* static tables, PHY revision >= 3 */
+static const u32 b43_ntab_framestruct_r3[] = {
+       0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+       0x09804506, 0x00100030, 0x09804507, 0x00100030,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024,
+       0x0980450e, 0x00100034, 0x0980450f, 0x00100034,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+       0x1980c506, 0x00100030, 0x21810506, 0x00100030,
+       0x21810506, 0x00100030, 0x01800504, 0x00100030,
+       0x11808505, 0x00100030, 0x29814507, 0x01100030,
+       0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+       0x21810506, 0x00100030, 0x21810506, 0x00100030,
+       0x29814507, 0x01100030, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+       0x1980c50e, 0x00100038, 0x2181050e, 0x00100038,
+       0x2181050e, 0x00100038, 0x0180050c, 0x00100038,
+       0x1180850d, 0x00100038, 0x2981450f, 0x01100038,
+       0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+       0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+       0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+       0x1980c506, 0x00100030, 0x1980c506, 0x00100030,
+       0x11808504, 0x00100030, 0x3981ca05, 0x00100030,
+       0x29814507, 0x01100030, 0x00000000, 0x00000000,
+       0x10008a04, 0x00100000, 0x3981ca05, 0x00100030,
+       0x1980c506, 0x00100030, 0x29814507, 0x01100030,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028,
+       0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038,
+       0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038,
+       0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+       0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038,
+       0x1980c50e, 0x00100038, 0x2981450f, 0x01100038,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x00100000, 0x02001405, 0x00100040,
+       0x0b004a06, 0x01900060, 0x13008a06, 0x01900060,
+       0x13008a06, 0x01900060, 0x43020a04, 0x00100060,
+       0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060,
+       0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+       0x13008a06, 0x01900060, 0x13008a06, 0x01900060,
+       0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x0200140d, 0x00100050,
+       0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070,
+       0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070,
+       0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+       0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x50029404, 0x00100000, 0x32019405, 0x00100040,
+       0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060,
+       0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060,
+       0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+       0x5802d404, 0x00100000, 0x3b01d405, 0x00100060,
+       0x0b004a06, 0x01900060, 0x23010a07, 0x01500060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x5002940c, 0x00100010, 0x3201940d, 0x00100050,
+       0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070,
+       0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070,
+       0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+       0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070,
+       0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x000f4800, 0x62031405, 0x00100040,
+       0x53028a06, 0x01900060, 0x53028a07, 0x01900060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x000f4808, 0x6203140d, 0x00100048,
+       0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024,
+       0x1980c50e, 0x00100034, 0x2181050e, 0x00100034,
+       0x2181050e, 0x00100034, 0x0180050c, 0x00100038,
+       0x1180850d, 0x00100038, 0x1181850d, 0x00100038,
+       0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+       0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+       0x1181850d, 0x00100038, 0x2981450f, 0x01100038,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+       0x0180c506, 0x00100030, 0x0180c506, 0x00100030,
+       0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130,
+       0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130,
+       0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130,
+       0x2981450f, 0x01100030, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100008, 0x0200140d, 0x00100048,
+       0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068,
+       0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070,
+       0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070,
+       0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+       0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x50029404, 0x00100000, 0x32019405, 0x00100040,
+       0x03004a06, 0x01900060, 0x03004a06, 0x01900060,
+       0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160,
+       0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160,
+       0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160,
+       0x23010a0f, 0x01500060, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+       0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060,
+       0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060,
+       0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+       0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070,
+       0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+       0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060,
+       0x53028a07, 0x0190c060, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+       0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+       0x53028a0f, 0x0190c070, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_pilot_r3[] = {
+       0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08,
+       0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5,
+       0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82,
+       0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff,
+       0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff,
+       0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5,
+       0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815,
+       0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff,
+       0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02,
+       0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295,
+       0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a,
+       0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff,
+       0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008,
+       0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280,
+       0xf0a0, 0xf028, 0xffff, 0xffff,
+};
+
+static const u32 b43_ntab_tmap_r3[] = {
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+       0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
+       0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+       0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
+       0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
+       0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
+       0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
+       0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
+       0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
+       0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
+       0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
+       0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+       0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
+       0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
+       0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
+       0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+       0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+       0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+       0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
+       0x22222222, 0x22222222, 0x22f22222, 0x00000222,
+       0x11000000, 0x1111f111, 0x11111111, 0x11111111,
+       0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
+       0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
+       0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
+       0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
+       0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
+       0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
+       0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
+       0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
+       0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
+       0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
+       0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
+       0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
+       0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
+       0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
+       0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
+       0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
+       0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+       0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+       0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+       0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
+       0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
+       0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+       0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+       0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+       0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_intlevel_r3[] = {
+       0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46,
+       0x00c1188d, 0x080024d2, 0x00000070,
+};
+
+static const u32 b43_ntab_tdtrn_r3[] = {
+       0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+       0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+       0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+       0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+       0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+       0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+       0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+       0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+       0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+       0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+       0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+       0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+       0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+       0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+       0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+       0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+       0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8,
+       0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6,
+       0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7,
+       0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd,
+       0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9,
+       0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7,
+       0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798,
+       0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895,
+       0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94,
+       0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8,
+       0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87,
+       0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8,
+       0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8,
+       0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a,
+       0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4,
+       0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de,
+       0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a,
+       0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014,
+       0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991,
+       0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498,
+       0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960,
+       0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c,
+       0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d,
+       0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+       0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+       0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+       0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+       0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+       0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+       0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+       0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+       0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+       0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+       0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+       0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+       0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+       0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+       0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+       0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+       0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54,
+       0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6,
+       0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2,
+       0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3,
+       0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da,
+       0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6,
+       0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12,
+       0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0,
+       0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac,
+       0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a,
+       0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e,
+       0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d,
+       0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826,
+       0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a,
+       0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee,
+       0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30,
+       0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+       0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+       0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+       0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+       0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+       0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+       0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+       0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+       0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+       0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+       0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+       0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+       0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+       0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+       0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+       0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+       0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46,
+       0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0,
+       0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1,
+       0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693,
+       0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341,
+       0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0,
+       0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d,
+       0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6,
+       0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90,
+       0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a,
+       0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039,
+       0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433,
+       0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94,
+       0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a,
+       0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d,
+       0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0,
+       0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157,
+       0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277,
+       0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c,
+       0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8,
+       0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9,
+       0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157,
+       0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52,
+       0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b,
+       0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69,
+       0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374,
+       0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328,
+       0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9,
+       0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457,
+       0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022,
+       0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a,
+       0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0,
+       0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68,
+       0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d,
+       0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44,
+       0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d,
+       0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125,
+       0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25,
+       0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c,
+       0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31,
+       0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5,
+       0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438,
+       0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b,
+       0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2,
+       0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313,
+       0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927,
+       0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21,
+       0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500,
+       0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+       0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+       0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+       0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+       0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+       0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+       0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+       0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+       0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+       0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+       0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+       0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+       0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+       0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+       0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+       0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+};
+
+static const u32 b43_ntab_noisevar0_r3[] = {
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u32 b43_ntab_noisevar1_r3[] = {
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u16 b43_ntab_mcs_r3[] = {
+       0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
+       0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
+       0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
+       0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
+       0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
+       0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
+       0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
+       0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
+       0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
+       0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
+       0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
+       0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
+       0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
+       0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
+       0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
+       0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
+       0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+       0x0007, 0x0007,
+};
+
+static const u32 b43_ntab_tdi20a0_r3[] = {
+       0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0,
+       0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d,
+       0x00020301, 0x00030504, 0x00040708, 0x0005090b,
+       0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718,
+       0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31,
+       0x000101b4, 0x000243b7, 0x000345bb, 0x000447be,
+       0x00058982, 0x00068c05, 0x00099309, 0x000a950c,
+       0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99,
+       0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632,
+       0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf,
+       0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d,
+       0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a,
+       0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00,
+       0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi20a1_r3[] = {
+       0x00014b26, 0x00028d29, 0x000393ad, 0x00049630,
+       0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d,
+       0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b,
+       0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418,
+       0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1,
+       0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e,
+       0x000f4702, 0x00008905, 0x00020c09, 0x0003128c,
+       0x0004148f, 0x00051712, 0x00065916, 0x00091b19,
+       0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2,
+       0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf,
+       0x00035303, 0x00045506, 0x0005978a, 0x0006998d,
+       0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a,
+       0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00,
+       0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a0_r3[] = {
+       0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2,
+       0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c,
+       0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2,
+       0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3,
+       0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d,
+       0x00153717, 0x00168320, 0x00180ca9, 0x00199633,
+       0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4,
+       0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f,
+       0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734,
+       0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5,
+       0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010,
+       0x001ef999, 0x00010522, 0x00028eac, 0x00045835,
+       0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6,
+       0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111,
+       0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936,
+       0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7,
+       0x00031070, 0x000499fa, 0x00062888, 0x0007f212,
+       0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37,
+       0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868,
+       0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313,
+       0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38,
+       0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969,
+       0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414,
+       0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39,
+       0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a,
+       0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515,
+       0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a,
+       0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a1_r3[] = {
+       0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd,
+       0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07,
+       0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d,
+       0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde,
+       0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88,
+       0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e,
+       0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf,
+       0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89,
+       0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f,
+       0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260,
+       0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a,
+       0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0,
+       0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361,
+       0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b,
+       0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1,
+       0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462,
+       0x00130deb, 0x00149775, 0x00162603, 0x0017af8c,
+       0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2,
+       0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563,
+       0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d,
+       0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3,
+       0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664,
+       0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f,
+       0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4,
+       0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5,
+       0x001390ee, 0x00151a78, 0x0016a906, 0x00183290,
+       0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5,
+       0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_pilotlt_r3[] = {
+       0x76540213, 0x62407351, 0x76543210, 0x76540213,
+       0x76540213, 0x76430521,
+};
+
+static const u32 b43_ntab_channelest_r3[] = {
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x44444444, 0x44444444, 0x44444444, 0x44444444,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+       0x10101010, 0x10101010, 0x10101010, 0x10101010,
+};
+
+static const u8 b43_ntab_framelookup_r3[] = {
+       0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
+       0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e,
+       0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a,
+       0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a,
+};
+
+static const u8 b43_ntab_estimatepowerlt0_r3[] = {
+       0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+       0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+       0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+       0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+       0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+       0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+       0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+       0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_estimatepowerlt1_r3[] = {
+       0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+       0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+       0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+       0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+       0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+       0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+       0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+       0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_adjustpower0_r3[] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 b43_ntab_adjustpower1_r3[] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u32 b43_ntab_gainctl0_r3[] = {
+       0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+       0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+       0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+       0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+       0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+       0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+       0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+       0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+       0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+       0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+       0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+       0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+       0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+       0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+       0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+       0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+       0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+       0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+       0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+       0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+       0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+       0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+       0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+       0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+       0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+       0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+       0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+       0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+       0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+       0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+       0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+       0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_gainctl1_r3[] = {
+       0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+       0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+       0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+       0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+       0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+       0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+       0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+       0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+       0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+       0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+       0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+       0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+       0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+       0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+       0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+       0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+       0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+       0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+       0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+       0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+       0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+       0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+       0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+       0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+       0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+       0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+       0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+       0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+       0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+       0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+       0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+       0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_iqlt0_r3[] = {
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_iqlt1_r3[] = {
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_loftlt0_r3[] = {
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000,
+};
+
+static const u16 b43_ntab_loftlt1_r3[] = {
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+       0x0000, 0x0000,
+};
+
+/* TX gain tables */
 const u32 b43_ntab_tx_gain_rev0_1_2[] = {
        0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
        0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -1813,7 +2887,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
 #define ntab_upload(dev, offset, data) do { \
                b43_ntab_write_bulk(dev, offset, offset##_SIZE, data);  \
        } while (0)
-
 void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
 {
        /* Static tables */
@@ -1847,10 +2920,39 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
        ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
 }
 
+#define ntab_upload_r3(dev, offset, data) do { \
+               b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
+       } while (0)
 void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
 {
        /* Static tables */
-       /* TODO */
+       ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+       ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+       ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
+       ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+       ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
+       ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+       ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+       ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+       ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+       ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+       ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
+                      b43_ntab_estimatepowerlt0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
+                      b43_ntab_estimatepowerlt1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+       ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+       ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
 
        /* Volatile tables */
        /* TODO */
index 4ec593ba3eef16930974ee0aeaf4dea64e2eaa53..016a480b2dc6cccc109e65b7488e2f216e55664d 100644 (file)
@@ -109,6 +109,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
 #define B43_NTAB_C1_LOFEEDTH           B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
 #define B43_NTAB_C1_LOFEEDTH_SIZE      128
 
+/* Static N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_FRAMESTRUCT_R3                B43_NTAB32(10, 000) /* frame struct  */
+#define B43_NTAB_PILOT_R3              B43_NTAB16(11, 000) /* pilot  */
+#define B43_NTAB_TMAP_R3               B43_NTAB32(12, 000) /* TM AP  */
+#define B43_NTAB_INTLEVEL_R3           B43_NTAB32(13, 000) /* INT LV  */
+#define B43_NTAB_TDTRN_R3              B43_NTAB32(14, 000) /* TD TRN  */
+#define B43_NTAB_NOISEVAR0_R3          B43_NTAB32(16, 000) /* noise variance 0  */
+#define B43_NTAB_NOISEVAR1_R3          B43_NTAB32(16, 128) /* noise variance 1  */
+#define B43_NTAB_MCS_R3                        B43_NTAB16(18, 000) /* MCS  */
+#define B43_NTAB_TDI20A0_R3            B43_NTAB32(19, 128) /* TDI 20/0  */
+#define B43_NTAB_TDI20A1_R3            B43_NTAB32(19, 256) /* TDI 20/1  */
+#define B43_NTAB_TDI40A0_R3            B43_NTAB32(19, 640) /* TDI 40/0  */
+#define B43_NTAB_TDI40A1_R3            B43_NTAB32(19, 768) /* TDI 40/1  */
+#define B43_NTAB_PILOTLT_R3            B43_NTAB32(20, 000) /* PLT lookup  */
+#define B43_NTAB_CHANEST_R3            B43_NTAB32(22, 000) /* channel estimate  */
+#define B43_NTAB_FRAMELT_R3            B43_NTAB8 (24, 000) /* frame lookup  */
+#define B43_NTAB_C0_ESTPLT_R3          B43_NTAB8 (26, 000) /* estimated power lookup 0  */
+#define B43_NTAB_C1_ESTPLT_R3          B43_NTAB8 (27, 000) /* estimated power lookup 1  */
+#define B43_NTAB_C0_ADJPLT_R3          B43_NTAB8 (26, 064) /* adjusted power lookup 0  */
+#define B43_NTAB_C1_ADJPLT_R3          B43_NTAB8 (27, 064) /* adjusted power lookup 1  */
+#define B43_NTAB_C0_GAINCTL_R3         B43_NTAB32(26, 192) /* gain control lookup 0  */
+#define B43_NTAB_C1_GAINCTL_R3         B43_NTAB32(27, 192) /* gain control lookup 1  */
+#define B43_NTAB_C0_IQLT_R3            B43_NTAB32(26, 320) /* I/Q lookup 0  */
+#define B43_NTAB_C1_IQLT_R3            B43_NTAB32(27, 320) /* I/Q lookup 1  */
+#define B43_NTAB_C0_LOFEEDTH_R3                B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0  */
+#define B43_NTAB_C1_LOFEEDTH_R3                B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
+
 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE       18
 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE       18
 #define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE      18
index e6b0528f3b52a31d38fb2c93e3a609f1eae14d2e..e5be381c17bce02ff4e43269f56c80feb787a8b4 100644 (file)
 #include "dma.h"
 #include "pio.h"
 
+static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = {
+       { B43_CCK_RATE_1MB,     0x0,                    0x0 },
+       { B43_CCK_RATE_2MB,     0x0,                    0x1 },
+       { B43_CCK_RATE_5MB,     0x0,                    0x2 },
+       { B43_CCK_RATE_11MB,    0x0,                    0x3 },
+       { B43_OFDM_RATE_6MB,    B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK },
+       { B43_OFDM_RATE_9MB,    B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK },
+       { B43_OFDM_RATE_12MB,   B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK },
+       { B43_OFDM_RATE_18MB,   B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK },
+       { B43_OFDM_RATE_24MB,   B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 },
+       { B43_OFDM_RATE_36MB,   B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 },
+       { B43_OFDM_RATE_48MB,   B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 },
+       { B43_OFDM_RATE_54MB,   B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 },
+};
+
+static const struct b43_tx_legacy_rate_phy_ctl_entry *
+b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate)
+{
+       const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) {
+               e = &(b43_tx_legacy_rate_phy_ctl[i]);
+               if (e->bitrate == bitrate)
+                       return e;
+       }
+
+       B43_WARN_ON(1);
+       return NULL;
+}
 
 /* Extract the bitrate index out of a CCK PLCP header. */
 static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
@@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
        }
 }
 
+static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
+{
+       const struct b43_phy *phy = &dev->phy;
+       const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+       u16 control = 0;
+       u16 bw;
+
+       if (phy->type == B43_PHYTYPE_LP)
+               bw = B43_TXH_PHY1_BW_20;
+       else /* FIXME */
+               bw = B43_TXH_PHY1_BW_20;
+
+       if (0) { /* FIXME: MIMO */
+       } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) {
+               control = bw;
+       } else {
+               control = bw;
+               e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
+               if (e) {
+                       control |= e->coding_rate;
+                       control |= e->modulation;
+               }
+               control |= B43_TXH_PHY1_MODE_SISO;
+       }
+
+       return control;
+}
+
 static u8 b43_calc_fallback_rate(u8 bitrate)
 {
        switch (bitrate) {
@@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
                        extra_ft |= B43_TXH_EFT_RTSFB_OFDM;
                else
                        extra_ft |= B43_TXH_EFT_RTSFB_CCK;
+
+               if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
+                   phy->type == B43_PHYTYPE_N) {
+                       txhdr->phy_ctl1_rts = cpu_to_le16(
+                               b43_generate_tx_phy_ctl1(dev, rts_rate));
+                       txhdr->phy_ctl1_rts_fb = cpu_to_le16(
+                               b43_generate_tx_phy_ctl1(dev, rts_rate_fb));
+               }
        }
 
        /* Magic cookie */
@@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
        else
                txhdr->new_format.cookie = cpu_to_le16(cookie);
 
+       if (phy->type == B43_PHYTYPE_N) {
+               txhdr->phy_ctl1 =
+                       cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
+               txhdr->phy_ctl1_fb =
+                       cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb));
+       }
+
        /* Apply the bitfields */
        txhdr->mac_ctl = cpu_to_le32(mac_ctl);
        txhdr->phy_ctl = cpu_to_le16(phy_ctl);
@@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
                status.mactime += mactime;
                if (low_mactime_now <= mactime)
                        status.mactime -= 0x10000;
-               status.flag |= RX_FLAG_TSFT;
+               status.flag |= RX_FLAG_MACTIME_MPDU;
        }
 
        chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
index d4cf9b390af3348fdef0c13e00d52d6f0b87e944..42debb5cd6fad0b042c579079ae325f2ecbb0a50 100644 (file)
@@ -73,6 +73,12 @@ struct b43_txhdr {
        } __packed;
 } __packed;
 
+struct b43_tx_legacy_rate_phy_ctl_entry {
+       u8 bitrate;
+       u16 coding_rate;
+       u16 modulation;
+};
+
 /* MAC TX control */
 #define B43_TXH_MAC_USEFBR             0x10000000 /* Use fallback rate for this AMPDU */
 #define B43_TXH_MAC_KEYIDX             0x0FF00000 /* Security key index */
index 1f11e1670bf08feb809b1271819486dec64d98fd..c7fd73e3ad76a1fae828aaef60cf1fcfeffcf8b1 100644 (file)
@@ -2442,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
        return err;
 }
 
-static int b43legacy_op_tx(struct ieee80211_hw *hw,
-                          struct sk_buff *skb)
+static void b43legacy_op_tx(struct ieee80211_hw *hw,
+                           struct sk_buff *skb)
 {
        struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
        struct b43legacy_wldev *dev = wl->current_dev;
@@ -2466,7 +2466,6 @@ out:
                /* Drop the packet. */
                dev_kfree_skb_any(skb);
        }
-       return NETDEV_TX_OK;
 }
 
 static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
index 7d177d97f1f7ff82803ed6dbbeabd4ce3bdb594f..3a95541708a6f2e244890c0e73da940534986185 100644 (file)
@@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
                status.mactime += mactime;
                if (low_mactime_now <= mactime)
                        status.mactime -= 0x10000;
-               status.flag |= RX_FLAG_TSFT;
+               status.flag |= RX_FLAG_MACTIME_MPDU;
        }
 
        chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
index 61915f371416e490cbf73d89d65595aadc1e6a98..da60faee74fc18e8e0f93a3b20942bf1bd09a875 100644 (file)
@@ -1397,7 +1397,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
 }
 
 /*
- * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
+ * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
  *
  * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
  *
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644 (file)
index 0000000..2a45dd4
--- /dev/null
@@ -0,0 +1,116 @@
+config IWLWIFI_LEGACY
+       tristate "Intel Wireless Wifi legacy devices"
+       depends on PCI && MAC80211
+       select FW_LOADER
+       select NEW_LEDS
+       select LEDS_CLASS
+       select LEDS_TRIGGERS
+       select MAC80211_LEDS
+
+menu "Debugging Options"
+       depends on IWLWIFI_LEGACY
+
+config IWLWIFI_LEGACY_DEBUG
+       bool "Enable full debugging output in 4965 and 3945 drivers"
+       depends on IWLWIFI_LEGACY
+       ---help---
+         This option will enable debug tracing output for the iwlwifilegacy
+         drivers.
+
+         This will result in the kernel module being ~100k larger.  You can
+         control which debug output is sent to the kernel log by setting the
+         value in
+
+               /sys/class/net/wlan0/device/debug_level
+
+         This entry will only exist if this option is enabled.
+
+         To set a value, simply echo an 8-byte hex value to the same file:
+
+                 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+         You can find the list of debug mask values in:
+                 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+
+         If this is your first time using this driver, you should say Y here
+         as the debug information can assist others in helping you resolve
+         any problems you may encounter.
+
+config IWLWIFI_LEGACY_DEBUGFS
+        bool "4965 and 3945 debugfs support"
+        depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+        ---help---
+         Enable creation of debugfs files for the iwlwifilegacy drivers. This
+         is a low-impact option that allows getting insight into the
+         driver's state at runtime.
+
+config IWLWIFI_LEGACY_DEVICE_TRACING
+       bool "iwlwifilegacy legacy device access tracing"
+       depends on IWLWIFI_LEGACY
+       depends on EVENT_TRACING
+       help
+         Say Y here to trace all commands, including TX frames and IO
+         accesses, sent to the device. If you say yes, iwlwifilegacy will
+         register with the ftrace framework for event tracing and dump
+         all this information to the ringbuffer, you may need to
+         increase the ringbuffer size. See the ftrace documentation
+         for more information.
+
+         When tracing is not enabled, this option still has some
+         (though rather small) overhead.
+
+         If unsure, say Y so we can help you better when problems
+         occur.
+endmenu
+
+config IWL4965
+       tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
+       depends on IWLWIFI_LEGACY
+       ---help---
+         This option enables support for
+
+         Select to build the driver supporting the:
+
+         Intel Wireless WiFi Link 4965AGN
+
+         This driver uses the kernel's mac80211 subsystem.
+
+         In order to use this driver, you will need a microcode (uCode)
+         image for it. You can obtain the microcode from:
+
+                 <http://intellinuxwireless.org/>.
+
+         The microcode is typically installed in /lib/firmware. You can
+         look in the hotplug script /etc/hotplug/firmware.agent to
+         determine which directory FIRMWARE_DIR is set to when the script
+         runs.
+
+         If you want to compile the driver as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/kbuild/modules.txt>.  The
+         module will be called iwl4965.
+
+config IWL3945
+       tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
+       depends on IWLWIFI_LEGACY
+       ---help---
+         Select to build the driver supporting the:
+
+         Intel PRO/Wireless 3945ABG/BG Network Connection
+
+         This driver uses the kernel's mac80211 subsystem.
+
+         In order to use this driver, you will need a microcode (uCode)
+         image for it. You can obtain the microcode from:
+
+                 <http://intellinuxwireless.org/>.
+
+         The microcode is typically installed in /lib/firmware. You can
+         look in the hotplug script /etc/hotplug/firmware.agent to
+         determine which directory FIRMWARE_DIR is set to when the script
+         runs.
+
+         If you want to compile the driver as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/kbuild/modules.txt>.  The
+         module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644 (file)
index 0000000..d56aeb3
--- /dev/null
@@ -0,0 +1,25 @@
+obj-$(CONFIG_IWLWIFI_LEGACY)   += iwl-legacy.o
+iwl-legacy-objs                := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwl-legacy-objs                += iwl-rx.o iwl-tx.o iwl-sta.o
+iwl-legacy-objs                += iwl-scan.o iwl-led.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+
+iwl-legacy-objs += $(iwl-legacy-m)
+
+CFLAGS_iwl-devtrace.o := -I$(src)
+
+# 4965
+obj-$(CONFIG_IWL4965)  += iwl4965.o
+iwl4965-objs           := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
+iwl4965-objs           += iwl-4965-ucode.o iwl-4965-tx.o
+iwl4965-objs           += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
+iwl4965-objs           += iwl-4965-sta.o iwl-4965-eeprom.o
+iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+
+# 3945
+obj-$(CONFIG_IWL3945)  += iwl3945.o
+iwl3945-objs           := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
new file mode 100644 (file)
index 0000000..cfabb38
--- /dev/null
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-3945-debugfs.h"
+
+
+static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+       int p = 0;
+
+       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+                      le32_to_cpu(priv->_3945.statistics.flag));
+       if (le32_to_cpu(priv->_3945.statistics.flag) &
+                       UCODE_STATISTICS_CLEAR_MSK)
+               p += scnprintf(buf + p, bufsz - p,
+                              "\tStatistics have been cleared\n");
+       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+                      (le32_to_cpu(priv->_3945.statistics.flag) &
+                       UCODE_STATISTICS_FREQUENCY_MSK)
+                       ? "2.4 GHz" : "5.2 GHz");
+       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+                      (le32_to_cpu(priv->_3945.statistics.flag) &
+                       UCODE_STATISTICS_NARROW_BAND_MSK)
+                       ? "enabled" : "disabled");
+       return p;
+}
+
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+                                   char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
+                   sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
+       ssize_t ret;
+       struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
+                                       *max_ofdm;
+       struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+       struct iwl39_statistics_rx_non_phy *general, *accum_general;
+       struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The statistic information display here is based on
+        * the last statistics notification from uCode
+        * might not reflect the current uCode activity
+        */
+       ofdm = &priv->_3945.statistics.rx.ofdm;
+       cck = &priv->_3945.statistics.rx.cck;
+       general = &priv->_3945.statistics.rx.general;
+       accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
+       accum_cck = &priv->_3945.accum_statistics.rx.cck;
+       accum_general = &priv->_3945.accum_statistics.rx.general;
+       delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
+       delta_cck = &priv->_3945.delta_statistics.rx.cck;
+       delta_general = &priv->_3945.delta_statistics.rx.general;
+       max_ofdm = &priv->_3945.max_delta.rx.ofdm;
+       max_cck = &priv->_3945.max_delta.rx.cck;
+       max_general = &priv->_3945.max_delta.rx.general;
+
+       pos += iwl3945_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
+                        "acumulative       delta         max\n",
+                        "Statistics_Rx - OFDM:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+                        accum_ofdm->ina_cnt,
+                        delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "fina_cnt:",
+                        le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+                        delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+                        le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+                        delta_ofdm->plcp_err, max_ofdm->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",  "crc32_err:",
+                        le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+                        delta_ofdm->crc32_err, max_ofdm->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+                        le32_to_cpu(ofdm->overrun_err),
+                        accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+                        max_ofdm->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "early_overrun_err:",
+                        le32_to_cpu(ofdm->early_overrun_err),
+                        accum_ofdm->early_overrun_err,
+                        delta_ofdm->early_overrun_err,
+                        max_ofdm->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+                        accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+                        max_ofdm->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+                        le32_to_cpu(ofdm->false_alarm_cnt),
+                        accum_ofdm->false_alarm_cnt,
+                        delta_ofdm->false_alarm_cnt,
+                        max_ofdm->false_alarm_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "fina_sync_err_cnt:",
+                        le32_to_cpu(ofdm->fina_sync_err_cnt),
+                        accum_ofdm->fina_sync_err_cnt,
+                        delta_ofdm->fina_sync_err_cnt,
+                        max_ofdm->fina_sync_err_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sfd_timeout:",
+                        le32_to_cpu(ofdm->sfd_timeout),
+                        accum_ofdm->sfd_timeout,
+                        delta_ofdm->sfd_timeout,
+                        max_ofdm->sfd_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "fina_timeout:",
+                        le32_to_cpu(ofdm->fina_timeout),
+                        accum_ofdm->fina_timeout,
+                        delta_ofdm->fina_timeout,
+                        max_ofdm->fina_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "unresponded_rts:",
+                        le32_to_cpu(ofdm->unresponded_rts),
+                        accum_ofdm->unresponded_rts,
+                        delta_ofdm->unresponded_rts,
+                        max_ofdm->unresponded_rts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "rxe_frame_lmt_ovrun:",
+                        le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+                        accum_ofdm->rxe_frame_limit_overrun,
+                        delta_ofdm->rxe_frame_limit_overrun,
+                        max_ofdm->rxe_frame_limit_overrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sent_ack_cnt:",
+                        le32_to_cpu(ofdm->sent_ack_cnt),
+                        accum_ofdm->sent_ack_cnt,
+                        delta_ofdm->sent_ack_cnt,
+                        max_ofdm->sent_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sent_cts_cnt:",
+                        le32_to_cpu(ofdm->sent_cts_cnt),
+                        accum_ofdm->sent_cts_cnt,
+                        delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
+                        "acumulative       delta         max\n",
+                        "Statistics_Rx - CCK:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "ina_cnt:",
+                        le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+                        delta_cck->ina_cnt, max_cck->ina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "fina_cnt:",
+                        le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+                        delta_cck->fina_cnt, max_cck->fina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "plcp_err:",
+                        le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+                        delta_cck->plcp_err, max_cck->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "crc32_err:",
+                        le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+                        delta_cck->crc32_err, max_cck->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "overrun_err:",
+                        le32_to_cpu(cck->overrun_err),
+                        accum_cck->overrun_err,
+                        delta_cck->overrun_err, max_cck->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "early_overrun_err:",
+                        le32_to_cpu(cck->early_overrun_err),
+                        accum_cck->early_overrun_err,
+                        delta_cck->early_overrun_err,
+                        max_cck->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "crc32_good:",
+                        le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+                        delta_cck->crc32_good,
+                        max_cck->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "false_alarm_cnt:",
+                        le32_to_cpu(cck->false_alarm_cnt),
+                        accum_cck->false_alarm_cnt,
+                        delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "fina_sync_err_cnt:",
+                        le32_to_cpu(cck->fina_sync_err_cnt),
+                        accum_cck->fina_sync_err_cnt,
+                        delta_cck->fina_sync_err_cnt,
+                        max_cck->fina_sync_err_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sfd_timeout:",
+                        le32_to_cpu(cck->sfd_timeout),
+                        accum_cck->sfd_timeout,
+                        delta_cck->sfd_timeout, max_cck->sfd_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "fina_timeout:",
+                        le32_to_cpu(cck->fina_timeout),
+                        accum_cck->fina_timeout,
+                        delta_cck->fina_timeout, max_cck->fina_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "unresponded_rts:",
+                        le32_to_cpu(cck->unresponded_rts),
+                        accum_cck->unresponded_rts,
+                        delta_cck->unresponded_rts,
+                        max_cck->unresponded_rts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "rxe_frame_lmt_ovrun:",
+                        le32_to_cpu(cck->rxe_frame_limit_overrun),
+                        accum_cck->rxe_frame_limit_overrun,
+                        delta_cck->rxe_frame_limit_overrun,
+                        max_cck->rxe_frame_limit_overrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sent_ack_cnt:",
+                        le32_to_cpu(cck->sent_ack_cnt),
+                        accum_cck->sent_ack_cnt,
+                        delta_cck->sent_ack_cnt,
+                        max_cck->sent_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sent_cts_cnt:",
+                        le32_to_cpu(cck->sent_cts_cnt),
+                        accum_cck->sent_cts_cnt,
+                        delta_cck->sent_cts_cnt,
+                        max_cck->sent_cts_cnt);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
+                        "acumulative       delta         max\n",
+                        "Statistics_Rx - GENERAL:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "bogus_cts:",
+                        le32_to_cpu(general->bogus_cts),
+                        accum_general->bogus_cts,
+                        delta_general->bogus_cts, max_general->bogus_cts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "bogus_ack:",
+                        le32_to_cpu(general->bogus_ack),
+                        accum_general->bogus_ack,
+                        delta_general->bogus_ack, max_general->bogus_ack);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "non_bssid_frames:",
+                        le32_to_cpu(general->non_bssid_frames),
+                        accum_general->non_bssid_frames,
+                        delta_general->non_bssid_frames,
+                        max_general->non_bssid_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "filtered_frames:",
+                        le32_to_cpu(general->filtered_frames),
+                        accum_general->filtered_frames,
+                        delta_general->filtered_frames,
+                        max_general->filtered_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "non_channel_beacons:",
+                        le32_to_cpu(general->non_channel_beacons),
+                        accum_general->non_channel_beacons,
+                        delta_general->non_channel_beacons,
+                        max_general->non_channel_beacons);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+                                   char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
+       ssize_t ret;
+       struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The statistic information display here is based on
+        * the last statistics notification from uCode
+        * might not reflect the current uCode activity
+        */
+       tx = &priv->_3945.statistics.tx;
+       accum_tx = &priv->_3945.accum_statistics.tx;
+       delta_tx = &priv->_3945.delta_statistics.tx;
+       max_tx = &priv->_3945.max_delta.tx;
+       pos += iwl3945_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
+                        "acumulative       delta         max\n",
+                        "Statistics_Tx:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "preamble:",
+                        le32_to_cpu(tx->preamble_cnt),
+                        accum_tx->preamble_cnt,
+                        delta_tx->preamble_cnt, max_tx->preamble_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "rx_detected_cnt:",
+                        le32_to_cpu(tx->rx_detected_cnt),
+                        accum_tx->rx_detected_cnt,
+                        delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "bt_prio_defer_cnt:",
+                        le32_to_cpu(tx->bt_prio_defer_cnt),
+                        accum_tx->bt_prio_defer_cnt,
+                        delta_tx->bt_prio_defer_cnt,
+                        max_tx->bt_prio_defer_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "bt_prio_kill_cnt:",
+                        le32_to_cpu(tx->bt_prio_kill_cnt),
+                        accum_tx->bt_prio_kill_cnt,
+                        delta_tx->bt_prio_kill_cnt,
+                        max_tx->bt_prio_kill_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "few_bytes_cnt:",
+                        le32_to_cpu(tx->few_bytes_cnt),
+                        accum_tx->few_bytes_cnt,
+                        delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "cts_timeout:",
+                        le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+                        delta_tx->cts_timeout, max_tx->cts_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "ack_timeout:",
+                        le32_to_cpu(tx->ack_timeout),
+                        accum_tx->ack_timeout,
+                        delta_tx->ack_timeout, max_tx->ack_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "expected_ack_cnt:",
+                        le32_to_cpu(tx->expected_ack_cnt),
+                        accum_tx->expected_ack_cnt,
+                        delta_tx->expected_ack_cnt,
+                        max_tx->expected_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "actual_ack_cnt:",
+                        le32_to_cpu(tx->actual_ack_cnt),
+                        accum_tx->actual_ack_cnt,
+                        delta_tx->actual_ack_cnt,
+                        max_tx->actual_ack_cnt);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
+       ssize_t ret;
+       struct iwl39_statistics_general *general, *accum_general;
+       struct iwl39_statistics_general *delta_general, *max_general;
+       struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+       struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The statistic information display here is based on
+        * the last statistics notification from uCode
+        * might not reflect the current uCode activity
+        */
+       general = &priv->_3945.statistics.general;
+       dbg = &priv->_3945.statistics.general.dbg;
+       div = &priv->_3945.statistics.general.div;
+       accum_general = &priv->_3945.accum_statistics.general;
+       delta_general = &priv->_3945.delta_statistics.general;
+       max_general = &priv->_3945.max_delta.general;
+       accum_dbg = &priv->_3945.accum_statistics.general.dbg;
+       delta_dbg = &priv->_3945.delta_statistics.general.dbg;
+       max_dbg = &priv->_3945.max_delta.general.dbg;
+       accum_div = &priv->_3945.accum_statistics.general.div;
+       delta_div = &priv->_3945.delta_statistics.general.div;
+       max_div = &priv->_3945.max_delta.general.div;
+       pos += iwl3945_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
+                        "acumulative       delta         max\n",
+                        "Statistics_General:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "burst_check:",
+                        le32_to_cpu(dbg->burst_check),
+                        accum_dbg->burst_check,
+                        delta_dbg->burst_check, max_dbg->burst_check);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "burst_count:",
+                        le32_to_cpu(dbg->burst_count),
+                        accum_dbg->burst_count,
+                        delta_dbg->burst_count, max_dbg->burst_count);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "sleep_time:",
+                        le32_to_cpu(general->sleep_time),
+                        accum_general->sleep_time,
+                        delta_general->sleep_time, max_general->sleep_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "slots_out:",
+                        le32_to_cpu(general->slots_out),
+                        accum_general->slots_out,
+                        delta_general->slots_out, max_general->slots_out);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "slots_idle:",
+                        le32_to_cpu(general->slots_idle),
+                        accum_general->slots_idle,
+                        delta_general->slots_idle, max_general->slots_idle);
+       pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+                        le32_to_cpu(general->ttl_timestamp));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "tx_on_a:",
+                        le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+                        delta_div->tx_on_a, max_div->tx_on_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "tx_on_b:",
+                        le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+                        delta_div->tx_on_b, max_div->tx_on_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "exec_time:",
+                        le32_to_cpu(div->exec_time), accum_div->exec_time,
+                        delta_div->exec_time, max_div->exec_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "  %-30s %10u  %10u  %10u  %10u\n",
+                        "probe_time:",
+                        le32_to_cpu(div->probe_time), accum_div->probe_time,
+                        delta_div->probe_time, max_div->probe_time);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
new file mode 100644 (file)
index 0000000..8fef4b3
--- /dev/null
@@ -0,0 +1,60 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+                                        char __user *user_buf, size_t count,
+                                        loff_t *ppos);
+#else
+static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+                                          char __user *user_buf, size_t count,
+                                          loff_t *ppos)
+{
+       return 0;
+}
+static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+                                          char __user *user_buf, size_t count,
+                                          loff_t *ppos)
+{
+       return 0;
+}
+static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
new file mode 100644 (file)
index 0000000..836c991
--- /dev/null
@@ -0,0 +1,187 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_3945_fh_h__
+#define __iwl_3945_fh_h__
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND                   (0x0800)
+#define FH39_MEM_UPPER_BOUND                   (0x1000)
+
+#define FH39_CBCC_TABLE                (FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TABLE                (FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf)                    (FH39_TFDB_TABLE + \
+                                                ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)       (FH39_TFDB_TABLE + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch)         (FH39_CBCC_TABLE + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch)    (FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch)    (FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch)                 (FH39_RCSR_TABLE + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch)          (FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch)                (FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch)            (FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch)       (FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR          (FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL                 (FH39_RSSR_TABLE + 0x000)
+#define FH39_RSSR_STATUS               (FH39_RSSR_TABLE + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch)                 (FH39_TCSR_TABLE + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch)          (FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch)          (FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch)       (FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TABLE + 0x000)
+#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TABLE + 0x008)
+#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TABLE + 0x010)
+
+
+/* DBM */
+
+#define FH39_SRVC_CHNL                            (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128          (0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST          (0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH                        (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF               (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER            (0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL     (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL      (0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD            (0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT             (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE             (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE            (0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID           (0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR            (0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON       (0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON       (0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B     (0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON                (0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON                (0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH      (0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH            (0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)    (BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)   (BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+       (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+        FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE                    (0x01000000)
+
+struct iwl3945_tfd_tb {
+       __le32 addr;
+       __le32 len;
+} __packed;
+
+struct iwl3945_tfd {
+       __le32 control_flags;
+       struct iwl3945_tfd_tb tbs[4];
+       u8 __pad[28];
+} __packed;
+
+
+#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
new file mode 100644 (file)
index 0000000..779d3cb
--- /dev/null
@@ -0,0 +1,293 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
+ * Please use iwl-commands.h for uCode API definitions.
+ * Please use iwl-3945.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_3945_hw__
+#define __iwl_3945_hw__
+
+#include "iwl-eeprom.h"
+
+/* RSSI to dBm */
+#define IWL39_RSSI_OFFSET      95
+
+#define IWL_DEFAULT_TX_POWER   0x0F
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ *   to a radio/DSP gain table index.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ *   (PA) output voltage detector.  This same detector is used during Tx of
+ *   long packets in normal operation to provide feedback as to proper output
+ *   level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct iwl3945_eeprom_txpower_sample {
+       u8 gain_index;          /* index into power (gain) setup table ... */
+       s8 power;               /* ... for this pwr level for this chnl group */
+       u16 v_det;              /* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ *    to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct iwl3945_eeprom_txpower_group {
+       struct iwl3945_eeprom_txpower_sample samples[5];  /* 5 power levels */
+       s32 a, b, c, d, e;      /* coefficients for voltage->power
+                                * formula (signed) */
+       s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
+                                * frequency (signed) */
+       s8 saturation_power;    /* highest power possible by h/w in this
+                                * band */
+       u8 group_channel;       /* "representative" channel # in this band */
+       s16 temperature;        /* h/w temperature at factory calib this band
+                                * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ *   difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct iwl3945_eeprom_temperature_corr {
+       u32 Ta;
+       u32 Tb;
+       u32 Tc;
+       u32 Td;
+       u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct iwl3945_eeprom {
+       u8 reserved0[16];
+       u16 device_id;  /* abs.ofs: 16 */
+       u8 reserved1[2];
+       u16 pmc;                /* abs.ofs: 20 */
+       u8 reserved2[20];
+       u8 mac_address[6];      /* abs.ofs: 42 */
+       u8 reserved3[58];
+       u16 board_revision;     /* abs.ofs: 106 */
+       u8 reserved4[11];
+       u8 board_pba_number[9]; /* abs.ofs: 119 */
+       u8 reserved5[8];
+       u16 version;            /* abs.ofs: 136 */
+       u8 sku_cap;             /* abs.ofs: 138 */
+       u8 leds_mode;           /* abs.ofs: 139 */
+       u16 oem_mode;
+       u16 wowlan_mode;        /* abs.ofs: 142 */
+       u16 leds_time_interval; /* abs.ofs: 144 */
+       u8 leds_off_time;       /* abs.ofs: 146 */
+       u8 leds_on_time;        /* abs.ofs: 147 */
+       u8 almgor_m_version;    /* abs.ofs: 148 */
+       u8 antenna_switch_type; /* abs.ofs: 149 */
+       u8 reserved6[42];
+       u8 sku_id[4];           /* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+       u16 band_1_count;       /* abs.ofs: 196 */
+       struct iwl_eeprom_channel band_1_channels[14];  /* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+       u16 band_2_count;       /* abs.ofs: 226 */
+       struct iwl_eeprom_channel band_2_channels[13];  /* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+       u16 band_3_count;       /* abs.ofs: 254 */
+       struct iwl_eeprom_channel band_3_channels[12];  /* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+       u16 band_4_count;       /* abs.ofs: 280 */
+       struct iwl_eeprom_channel band_4_channels[11];  /* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+       u16 band_5_count;       /* abs.ofs: 304 */
+       struct iwl_eeprom_channel band_5_channels[6];  /* abs.ofs: 306 */
+
+       u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IWL_NUM_TX_CALIB_GROUPS 5
+       struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+       struct iwl3945_eeprom_temperature_corr corrections;  /* abs.ofs: 832 */
+       u8 reserved16[172];     /* fill out to full 1024 byte block */
+} __packed;
+
+#define IWL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)     /* bit 6    */
+#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)     /* bit 7    */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IWL39_NUM_QUEUES        5
+#define IWL39_CMD_QUEUE_NUM    4
+
+#define IWL_DEFAULT_TX_RETRY  15
+
+/*********************************************/
+
+#define RFD_SIZE                              4
+#define NUM_TFD_CHUNKS                        4
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+#define U32_PAD(n)             ((4-(n))&0x3)
+
+#define TFD_CTL_COUNT_SET(n)       (n << 24)
+#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n)         (n << 28)
+#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IWL39_RTC_INST_LOWER_BOUND             (0x000000)
+#define IWL39_RTC_INST_UPPER_BOUND             (0x014000)
+
+#define IWL39_RTC_DATA_LOWER_BOUND             (0x800000)
+#define IWL39_RTC_DATA_UPPER_BOUND             (0x808000)
+
+#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
+                               IWL39_RTC_INST_LOWER_BOUND)
+#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
+                               IWL39_RTC_DATA_LOWER_BOUND)
+
+#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
+#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
+
+static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
+{
+       return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
+              (addr < IWL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
+ * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
+struct iwl3945_shared {
+       __le32 tx_base_ptr[8];
+} __packed;
+
+static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
+{
+       return le16_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
+{
+       return le16_to_cpu(rate_n_flags);
+}
+
+static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
+{
+       return cpu_to_le16((u16)rate|flags);
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
new file mode 100644 (file)
index 0000000..abd9235
--- /dev/null
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-commands.h"
+#include "iwl-3945.h"
+#include "iwl-core.h"
+#include "iwl-dev.h"
+#include "iwl-3945-led.h"
+
+
+/* Send led command */
+static int iwl3945_send_led_cmd(struct iwl_priv *priv,
+                               struct iwl_led_cmd *led_cmd)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_LEDS_CMD,
+               .len = sizeof(struct iwl_led_cmd),
+               .data = led_cmd,
+               .flags = CMD_ASYNC,
+               .callback = NULL,
+       };
+
+       return iwl_legacy_send_cmd(priv, &cmd);
+}
+
+const struct iwl_led_ops iwl3945_led_ops = {
+       .cmd = iwl3945_send_led_cmd,
+};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
new file mode 100644 (file)
index 0000000..9671627
--- /dev/null
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_3945_led_h__
+#define __iwl_3945_led_h__
+
+extern const struct iwl_led_ops iwl3945_led_ops;
+
+#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
new file mode 100644 (file)
index 0000000..977bd24
--- /dev/null
@@ -0,0 +1,994 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "iwl-commands.h"
+#include "iwl-3945.h"
+#include "iwl-sta.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
+       7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
+       7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
+       0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
+       7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct iwl3945_tpt_entry {
+       s8 min_rssi;
+       u8 index;
+};
+
+static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
+       {-60, IWL_RATE_54M_INDEX},
+       {-64, IWL_RATE_48M_INDEX},
+       {-72, IWL_RATE_36M_INDEX},
+       {-80, IWL_RATE_24M_INDEX},
+       {-84, IWL_RATE_18M_INDEX},
+       {-85, IWL_RATE_12M_INDEX},
+       {-87, IWL_RATE_9M_INDEX},
+       {-89, IWL_RATE_6M_INDEX}
+};
+
+static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
+       {-60, IWL_RATE_54M_INDEX},
+       {-64, IWL_RATE_48M_INDEX},
+       {-68, IWL_RATE_36M_INDEX},
+       {-80, IWL_RATE_24M_INDEX},
+       {-84, IWL_RATE_18M_INDEX},
+       {-85, IWL_RATE_12M_INDEX},
+       {-86, IWL_RATE_11M_INDEX},
+       {-88, IWL_RATE_5M_INDEX},
+       {-90, IWL_RATE_2M_INDEX},
+       {-92, IWL_RATE_1M_INDEX}
+};
+
+#define IWL_RATE_MAX_WINDOW          62
+#define IWL_RATE_FLUSH         (3*HZ)
+#define IWL_RATE_WIN_FLUSH       (HZ/2)
+#define IWL39_RATE_HIGH_TH          11520
+#define IWL_SUCCESS_UP_TH         8960
+#define IWL_SUCCESS_DOWN_TH      10880
+#define IWL_RATE_MIN_FAILURE_TH       6
+#define IWL_RATE_MIN_SUCCESS_TH       8
+#define IWL_RATE_DECREASE_TH       1920
+#define IWL_RATE_RETRY_TH           15
+
+static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+       u32 index = 0;
+       u32 table_size = 0;
+       struct iwl3945_tpt_entry *tpt_table = NULL;
+
+       if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
+               rssi = IWL_MIN_RSSI_VAL;
+
+       switch (band) {
+       case IEEE80211_BAND_2GHZ:
+               tpt_table = iwl3945_tpt_table_g;
+               table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
+               break;
+
+       case IEEE80211_BAND_5GHZ:
+               tpt_table = iwl3945_tpt_table_a;
+               table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+
+       while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
+               index++;
+
+       index = min(index, (table_size - 1));
+
+       return tpt_table[index].index;
+}
+
+static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
+{
+       window->data = 0;
+       window->success_counter = 0;
+       window->success_ratio = -1;
+       window->counter = 0;
+       window->average_tpt = IWL_INVALID_VALUE;
+       window->stamp = 0;
+}
+
+/**
+ * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
+ *
+ * Returns the number of windows that have gathered data but were
+ * not flushed.  If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
+{
+       int unflushed = 0;
+       int i;
+       unsigned long flags;
+       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+
+       /*
+        * For each rate, if we have collected data on that rate
+        * and it has been more than IWL_RATE_WIN_FLUSH
+        * since we flushed, clear out the gathered statistics
+        */
+       for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
+               if (!rs_sta->win[i].counter)
+                       continue;
+
+               spin_lock_irqsave(&rs_sta->lock, flags);
+               if (time_after(jiffies, rs_sta->win[i].stamp +
+                              IWL_RATE_WIN_FLUSH)) {
+                       IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
+                                      "index %d\n",
+                                      rs_sta->win[i].counter, i);
+                       iwl3945_clear_window(&rs_sta->win[i]);
+               } else
+                       unflushed++;
+               spin_unlock_irqrestore(&rs_sta->lock, flags);
+       }
+
+       return unflushed;
+}
+
+#define IWL_RATE_FLUSH_MAX              5000   /* msec */
+#define IWL_RATE_FLUSH_MIN              50     /* msec */
+#define IWL_AVERAGE_PACKETS             1500
+
+static void iwl3945_bg_rate_scale_flush(unsigned long data)
+{
+       struct iwl3945_rs_sta *rs_sta = (void *)data;
+       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+       int unflushed = 0;
+       unsigned long flags;
+       u32 packet_count, duration, pps;
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+
+       unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       /* Number of packets Rx'd since last time this timer ran */
+       packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+       rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+       if (unflushed) {
+               duration =
+                   jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+               IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
+                              packet_count, duration);
+
+               /* Determine packets per second */
+               if (duration)
+                       pps = (packet_count * 1000) / duration;
+               else
+                       pps = 0;
+
+               if (pps) {
+                       duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
+                       if (duration < IWL_RATE_FLUSH_MIN)
+                               duration = IWL_RATE_FLUSH_MIN;
+                       else if (duration > IWL_RATE_FLUSH_MAX)
+                               duration = IWL_RATE_FLUSH_MAX;
+               } else
+                       duration = IWL_RATE_FLUSH_MAX;
+
+               rs_sta->flush_time = msecs_to_jiffies(duration);
+
+               IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
+                              duration, packet_count);
+
+               mod_timer(&rs_sta->rate_scale_flush, jiffies +
+                         rs_sta->flush_time);
+
+               rs_sta->last_partial_flush = jiffies;
+       } else {
+               rs_sta->flush_time = IWL_RATE_FLUSH;
+               rs_sta->flush_pending = 0;
+       }
+       /* If there weren't any unflushed entries, we don't schedule the timer
+        * to run again */
+
+       rs_sta->last_flush = jiffies;
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+/**
+ * iwl3945_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 64 packets transmitted
+ * at this rate.  window->data contains the bitmask of successful
+ * packets.
+ */
+static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
+                               struct iwl3945_rate_scale_data *window,
+                               int success, int retries, int index)
+{
+       unsigned long flags;
+       s32 fail_count;
+       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+
+       if (!retries) {
+               IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
+               return;
+       }
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       /*
+        * Keep track of only the latest 62 tx frame attempts in this rate's
+        * history window; anything older isn't really relevant any more.
+        * If we have filled up the sliding window, drop the oldest attempt;
+        * if the oldest attempt (highest bit in bitmap) shows "success",
+        * subtract "1" from the success counter (this is the main reason
+        * we keep these bitmaps!).
+        * */
+       while (retries > 0) {
+               if (window->counter >= IWL_RATE_MAX_WINDOW) {
+
+                       /* remove earliest */
+                       window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+                       if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
+                               window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
+                               window->success_counter--;
+                       }
+               }
+
+               /* Increment frames-attempted counter */
+               window->counter++;
+
+               /* Shift bitmap by one frame (throw away oldest history),
+                * OR in "1", and increment "success" if this
+                * frame was successful. */
+               window->data <<= 1;
+               if (success > 0) {
+                       window->success_counter++;
+                       window->data |= 0x1;
+                       success--;
+               }
+
+               retries--;
+       }
+
+       /* Calculate current success ratio, avoid divide-by-0! */
+       if (window->counter > 0)
+               window->success_ratio = 128 * (100 * window->success_counter)
+                                       / window->counter;
+       else
+               window->success_ratio = IWL_INVALID_VALUE;
+
+       fail_count = window->counter - window->success_counter;
+
+       /* Calculate average throughput, if we have enough history. */
+       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+               window->average_tpt = ((window->success_ratio *
+                               rs_sta->expected_tpt[index] + 64) / 128);
+       else
+               window->average_tpt = IWL_INVALID_VALUE;
+
+       /* Tag this window as having been updated */
+       window->stamp = jiffies;
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
+{
+       struct ieee80211_hw *hw = priv->hw;
+       struct ieee80211_conf *conf = &priv->hw->conf;
+       struct iwl3945_sta_priv *psta;
+       struct iwl3945_rs_sta *rs_sta;
+       struct ieee80211_supported_band *sband;
+       int i;
+
+       IWL_DEBUG_INFO(priv, "enter\n");
+       if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
+               goto out;
+
+       psta = (struct iwl3945_sta_priv *) sta->drv_priv;
+       rs_sta = &psta->rs_sta;
+       sband = hw->wiphy->bands[conf->channel->band];
+
+       rs_sta->priv = priv;
+
+       rs_sta->start_rate = IWL_RATE_INVALID;
+
+       /* default to just 802.11b */
+       rs_sta->expected_tpt = iwl3945_expected_tpt_b;
+
+       rs_sta->last_partial_flush = jiffies;
+       rs_sta->last_flush = jiffies;
+       rs_sta->flush_time = IWL_RATE_FLUSH;
+       rs_sta->last_tx_packets = 0;
+
+       rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+       rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
+
+       for (i = 0; i < IWL_RATE_COUNT_3945; i++)
+               iwl3945_clear_window(&rs_sta->win[i]);
+
+       /* TODO: what is a good starting rate for STA? About middle? Maybe not
+        * the lowest or the highest rate.. Could consider using RSSI from
+        * previous packets? Need to have IEEE 802.1X auth succeed immediately
+        * after assoc.. */
+
+       for (i = sband->n_bitrates - 1; i >= 0; i--) {
+               if (sta->supp_rates[sband->band] & (1 << i)) {
+                       rs_sta->last_txrate_idx = i;
+                       break;
+               }
+       }
+
+       priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+       /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
+       if (sband->band == IEEE80211_BAND_5GHZ) {
+               rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+               priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
+                                               IWL_FIRST_OFDM_RATE;
+       }
+
+out:
+       priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+
+       IWL_DEBUG_INFO(priv, "leave\n");
+}
+
+static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+       return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void iwl3945_rs_free(void *priv)
+{
+       return;
+}
+
+static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+       struct iwl3945_rs_sta *rs_sta;
+       struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
+       struct iwl_priv *priv __maybe_unused = iwl_priv;
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+
+       rs_sta = &psta->rs_sta;
+
+       spin_lock_init(&rs_sta->lock);
+       init_timer(&rs_sta->rate_scale_flush);
+
+       IWL_DEBUG_RATE(priv, "leave\n");
+
+       return rs_sta;
+}
+
+static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
+                       void *priv_sta)
+{
+       struct iwl3945_rs_sta *rs_sta = priv_sta;
+
+       /*
+        * Be careful not to use any members of iwl3945_rs_sta (like trying
+        * to use iwl_priv to print out debugging) since it may not be fully
+        * initialized at this point.
+        */
+       del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+
+/**
+ * iwl3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta,
+                        struct sk_buff *skb)
+{
+       s8 retries = 0, current_count;
+       int scale_rate_index, first_index, last_index;
+       unsigned long flags;
+       struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
+       struct iwl3945_rs_sta *rs_sta = priv_sta;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+
+       retries = info->status.rates[0].count;
+       /* Sanity Check for retries */
+       if (retries > IWL_RATE_RETRY_TH)
+               retries = IWL_RATE_RETRY_TH;
+
+       first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
+       if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
+               IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
+               return;
+       }
+
+       if (!priv_sta) {
+               IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
+               return;
+       }
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (!rs_sta->priv) {
+               IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
+               return;
+       }
+
+
+       rs_sta->tx_packets++;
+
+       scale_rate_index = first_index;
+       last_index = first_index;
+
+       /*
+        * Update the window for each rate.  We determine which rates
+        * were Tx'd based on the total number of retries vs. the number
+        * of retries configured for each rate -- currently set to the
+        * priv value 'retry_rate' vs. rate specific
+        *
+        * On exit from this while loop last_index indicates the rate
+        * at which the frame was finally transmitted (or failed if no
+        * ACK)
+        */
+       while (retries > 1) {
+               if ((retries - 1) < priv->retry_rate) {
+                       current_count = (retries - 1);
+                       last_index = scale_rate_index;
+               } else {
+                       current_count = priv->retry_rate;
+                       last_index = iwl3945_rs_next_rate(priv,
+                                                        scale_rate_index);
+               }
+
+               /* Update this rate accounting for as many retries
+                * as was used for it (per current_count) */
+               iwl3945_collect_tx_data(rs_sta,
+                                   &rs_sta->win[scale_rate_index],
+                                   0, current_count, scale_rate_index);
+               IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
+                              scale_rate_index, current_count);
+
+               retries -= current_count;
+
+               scale_rate_index = last_index;
+       }
+
+
+       /* Update the last index window with success/failure based on ACK */
+       IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
+                      last_index,
+                      (info->flags & IEEE80211_TX_STAT_ACK) ?
+                      "success" : "failure");
+       iwl3945_collect_tx_data(rs_sta,
+                           &rs_sta->win[last_index],
+                           info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
+
+       /* We updated the rate scale window -- if its been more than
+        * flush_time since the last run, schedule the flush
+        * again */
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       if (!rs_sta->flush_pending &&
+           time_after(jiffies, rs_sta->last_flush +
+                      rs_sta->flush_time)) {
+
+               rs_sta->last_partial_flush = jiffies;
+               rs_sta->flush_pending = 1;
+               mod_timer(&rs_sta->rate_scale_flush,
+                         jiffies + rs_sta->flush_time);
+       }
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
+                                u8 index, u16 rate_mask, enum ieee80211_band band)
+{
+       u8 high = IWL_RATE_INVALID;
+       u8 low = IWL_RATE_INVALID;
+       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+
+       /* 802.11A walks to the next literal adjacent rate in
+        * the rate table */
+       if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+               int i;
+               u32 mask;
+
+               /* Find the previous rate that is in the rate mask */
+               i = index - 1;
+               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+                       if (rate_mask & mask) {
+                               low = i;
+                               break;
+                       }
+               }
+
+               /* Find the next rate that is in the rate mask */
+               i = index + 1;
+               for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
+                    i++, mask <<= 1) {
+                       if (rate_mask & mask) {
+                               high = i;
+                               break;
+                       }
+               }
+
+               return (high << 8) | low;
+       }
+
+       low = index;
+       while (low != IWL_RATE_INVALID) {
+               if (rs_sta->tgg)
+                       low = iwl3945_rates[low].prev_rs_tgg;
+               else
+                       low = iwl3945_rates[low].prev_rs;
+               if (low == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << low))
+                       break;
+               IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
+       }
+
+       high = index;
+       while (high != IWL_RATE_INVALID) {
+               if (rs_sta->tgg)
+                       high = iwl3945_rates[high].next_rs_tgg;
+               else
+                       high = iwl3945_rates[high].next_rs;
+               if (high == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << high))
+                       break;
+               IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
+       }
+
+       return (high << 8) | low;
+}
+
+/**
+ * iwl3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the index obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
+                       void *priv_sta, struct ieee80211_tx_rate_control *txrc)
+{
+       struct ieee80211_supported_band *sband = txrc->sband;
+       struct sk_buff *skb = txrc->skb;
+       u8 low = IWL_RATE_INVALID;
+       u8 high = IWL_RATE_INVALID;
+       u16 high_low;
+       int index;
+       struct iwl3945_rs_sta *rs_sta = priv_sta;
+       struct iwl3945_rate_scale_data *window = NULL;
+       int current_tpt = IWL_INVALID_VALUE;
+       int low_tpt = IWL_INVALID_VALUE;
+       int high_tpt = IWL_INVALID_VALUE;
+       u32 fail_count;
+       s8 scale_action = 0;
+       unsigned long flags;
+       u16 rate_mask;
+       s8 max_rate_idx = -1;
+       struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (rs_sta && !rs_sta->priv) {
+               IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
+               priv_sta = NULL;
+       }
+
+       if (rate_control_send_low(sta, priv_sta, txrc))
+               return;
+
+       rate_mask = sta->supp_rates[sband->band];
+
+       /* get user max rate if set */
+       max_rate_idx = txrc->max_rate_idx;
+       if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
+               max_rate_idx += IWL_FIRST_OFDM_RATE;
+       if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
+               max_rate_idx = -1;
+
+       index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
+
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       /* for recent assoc, choose best rate regarding
+        * to rssi value
+        */
+       if (rs_sta->start_rate != IWL_RATE_INVALID) {
+               if (rs_sta->start_rate < index &&
+                  (rate_mask & (1 << rs_sta->start_rate)))
+                       index = rs_sta->start_rate;
+               rs_sta->start_rate = IWL_RATE_INVALID;
+       }
+
+       /* force user max rate if set by user */
+       if ((max_rate_idx != -1) && (max_rate_idx < index)) {
+               if (rate_mask & (1 << max_rate_idx))
+                       index = max_rate_idx;
+       }
+
+       window = &(rs_sta->win[index]);
+
+       fail_count = window->counter - window->success_counter;
+
+       if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+            (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
+               spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+               IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
+                              "counter: %d, success_counter: %d, "
+                              "expected_tpt is %sNULL\n",
+                              index,
+                              window->counter,
+                              window->success_counter,
+                              rs_sta->expected_tpt ? "not " : "");
+
+          /* Can't calculate this yet; not enough history */
+               window->average_tpt = IWL_INVALID_VALUE;
+               goto out;
+
+       }
+
+       current_tpt = window->average_tpt;
+
+       high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
+                                            sband->band);
+       low = high_low & 0xff;
+       high = (high_low >> 8) & 0xff;
+
+       /* If user set max rate, dont allow higher than user constrain */
+       if ((max_rate_idx != -1) && (max_rate_idx < high))
+               high = IWL_RATE_INVALID;
+
+       /* Collect Measured throughputs of adjacent rates */
+       if (low != IWL_RATE_INVALID)
+               low_tpt = rs_sta->win[low].average_tpt;
+
+       if (high != IWL_RATE_INVALID)
+               high_tpt = rs_sta->win[high].average_tpt;
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       scale_action = 0;
+
+       /* Low success ratio , need to drop the rate */
+       if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
+               IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
+               scale_action = -1;
+       /* No throughput measured yet for adjacent rates,
+        * try increase */
+       } else if ((low_tpt == IWL_INVALID_VALUE) &&
+                  (high_tpt == IWL_INVALID_VALUE)) {
+
+               if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
+                       scale_action = 1;
+               else if (low != IWL_RATE_INVALID)
+                       scale_action = 0;
+
+       /* Both adjacent throughputs are measured, but neither one has
+        * better throughput; we're using the best rate, don't change
+        * it! */
+       } else if ((low_tpt != IWL_INVALID_VALUE) &&
+                (high_tpt != IWL_INVALID_VALUE) &&
+                (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
+
+               IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
+                              "current_tpt [%d]\n",
+                              low_tpt, high_tpt, current_tpt);
+               scale_action = 0;
+
+       /* At least one of the rates has better throughput */
+       } else {
+               if (high_tpt != IWL_INVALID_VALUE) {
+
+                       /* High rate has better throughput, Increase
+                        * rate */
+                       if (high_tpt > current_tpt &&
+                               window->success_ratio >= IWL_RATE_INCREASE_TH)
+                               scale_action = 1;
+                       else {
+                               IWL_DEBUG_RATE(priv,
+                                   "decrease rate because of high tpt\n");
+                               scale_action = 0;
+                       }
+               } else if (low_tpt != IWL_INVALID_VALUE) {
+                       if (low_tpt > current_tpt) {
+                               IWL_DEBUG_RATE(priv,
+                                   "decrease rate because of low tpt\n");
+                               scale_action = -1;
+                       } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
+                               /* Lower rate has better
+                                * throughput,decrease rate */
+                               scale_action = 1;
+                       }
+               }
+       }
+
+       /* Sanity check; asked for decrease, but success rate or throughput
+        * has been good at old rate.  Don't change it. */
+       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+                   ((window->success_ratio > IWL_RATE_HIGH_TH) ||
+                    (current_tpt > (100 * rs_sta->expected_tpt[low]))))
+               scale_action = 0;
+
+       switch (scale_action) {
+       case -1:
+
+               /* Decrese rate */
+               if (low != IWL_RATE_INVALID)
+                       index = low;
+               break;
+
+       case 1:
+               /* Increase rate */
+               if (high != IWL_RATE_INVALID)
+                       index = high;
+
+               break;
+
+       case 0:
+       default:
+               /* No change */
+               break;
+       }
+
+       IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
+                      index, scale_action, low, high);
+
+ out:
+
+       rs_sta->last_txrate_idx = index;
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               info->control.rates[0].idx = rs_sta->last_txrate_idx -
+                               IWL_FIRST_OFDM_RATE;
+       else
+               info->control.rates[0].idx = rs_sta->last_txrate_idx;
+
+       IWL_DEBUG_RATE(priv, "leave: %d\n", index);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
+                                                 char __user *user_buf,
+                                                 size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int j;
+       ssize_t ret;
+       struct iwl3945_rs_sta *lq_sta = file->private_data;
+
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
+                       "rate=0x%X flush time %d\n",
+                       lq_sta->tx_packets,
+                       lq_sta->last_txrate_idx,
+                       lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
+       for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
+               desc += sprintf(buff+desc,
+                               "counter=%d success=%d %%=%d\n",
+                               lq_sta->win[j].counter,
+                               lq_sta->win[j].success_counter,
+                               lq_sta->win[j].success_ratio);
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+       .read = iwl3945_sta_dbgfs_stats_table_read,
+       .open = iwl3945_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static void iwl3945_add_debugfs(void *priv, void *priv_sta,
+                               struct dentry *dir)
+{
+       struct iwl3945_rs_sta *lq_sta = priv_sta;
+
+       lq_sta->rs_sta_dbgfs_stats_table_file =
+               debugfs_create_file("rate_stats_table", 0600, dir,
+               lq_sta, &rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
+{
+       struct iwl3945_rs_sta *lq_sta = priv_sta;
+       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void iwl3945_rs_rate_init_stub(void *priv_r,
+                               struct ieee80211_supported_band *sband,
+                             struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+       .module = NULL,
+       .name = RS_NAME,
+       .tx_status = iwl3945_rs_tx_status,
+       .get_rate = iwl3945_rs_get_rate,
+       .rate_init = iwl3945_rs_rate_init_stub,
+       .alloc = iwl3945_rs_alloc,
+       .free = iwl3945_rs_free,
+       .alloc_sta = iwl3945_rs_alloc_sta,
+       .free_sta = iwl3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+       .add_sta_debugfs = iwl3945_add_debugfs,
+       .remove_sta_debugfs = iwl3945_remove_debugfs,
+#endif
+
+};
+void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+       struct iwl_priv *priv = hw->priv;
+       s32 rssi = 0;
+       unsigned long flags;
+       struct iwl3945_rs_sta *rs_sta;
+       struct ieee80211_sta *sta;
+       struct iwl3945_sta_priv *psta;
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+
+       rcu_read_lock();
+
+       sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
+                                priv->stations[sta_id].sta.sta.addr);
+       if (!sta) {
+               IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
+               rcu_read_unlock();
+               return;
+       }
+
+       psta = (void *) sta->drv_priv;
+       rs_sta = &psta->rs_sta;
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       rs_sta->tgg = 0;
+       switch (priv->band) {
+       case IEEE80211_BAND_2GHZ:
+               /* TODO: this always does G, not a regression */
+               if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+                                               RXON_FLG_TGG_PROTECT_MSK) {
+                       rs_sta->tgg = 1;
+                       rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
+               } else
+                       rs_sta->expected_tpt = iwl3945_expected_tpt_g;
+               break;
+
+       case IEEE80211_BAND_5GHZ:
+               rs_sta->expected_tpt = iwl3945_expected_tpt_a;
+               break;
+       case IEEE80211_NUM_BANDS:
+               BUG();
+               break;
+       }
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       rssi = priv->_3945.last_rx_rssi;
+       if (rssi == 0)
+               rssi = IWL_MIN_RSSI_VAL;
+
+       IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
+
+       rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
+
+       IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
+                      "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
+                      iwl3945_rates[rs_sta->start_rate].plcp);
+       rcu_read_unlock();
+}
+
+int iwl3945_rate_control_register(void)
+{
+       return ieee80211_rate_control_register(&rs_ops);
+}
+
+void iwl3945_rate_control_unregister(void)
+{
+       ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
new file mode 100644 (file)
index 0000000..d096dc2
--- /dev/null
@@ -0,0 +1,2742 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "iwl-fh.h"
+#include "iwl-3945-fh.h"
+#include "iwl-commands.h"
+#include "iwl-sta.h"
+#include "iwl-3945.h"
+#include "iwl-eeprom.h"
+#include "iwl-core.h"
+#include "iwl-helpers.h"
+#include "iwl-led.h"
+#include "iwl-3945-led.h"
+#include "iwl-3945-debugfs.h"
+
+#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
+       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
+                                   IWL_RATE_##r##M_IEEE,   \
+                                   IWL_RATE_##ip##M_INDEX, \
+                                   IWL_RATE_##in##M_INDEX, \
+                                   IWL_RATE_##rp##M_INDEX, \
+                                   IWL_RATE_##rn##M_INDEX, \
+                                   IWL_RATE_##pp##M_INDEX, \
+                                   IWL_RATE_##np##M_INDEX, \
+                                   IWL_RATE_##r##M_INDEX_TABLE, \
+                                   IWL_RATE_##ip##M_INDEX_TABLE }
+
+/*
+ * Parameter order:
+ *   rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
+       IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),    /*  1mbps */
+       IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),          /*  2mbps */
+       IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
+       IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),      /* 11mbps */
+       IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
+       IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),       /*  9mbps */
+       IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
+       IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
+       IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
+       IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
+       IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
+       IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+};
+
+static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
+{
+       u8 rate = iwl3945_rates[rate_index].prev_ieee;
+
+       if (rate == IWL_RATE_INVALID)
+               rate = rate_index;
+       return rate;
+}
+
+/* 1 = enable the iwl3945_disable_events() function */
+#define IWL_EVT_DISABLE (0)
+#define IWL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * iwl3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
+ *   Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging.  This function is just a placeholder as-is,
+ *   you'll need to provide the special bits! ...
+ *   ... and set IWL_EVT_DISABLE to 1. */
+void iwl3945_disable_events(struct iwl_priv *priv)
+{
+       int i;
+       u32 base;               /* SRAM address of event log header */
+       u32 disable_ptr;        /* SRAM address of event-disable bitmap array */
+       u32 array_size;         /* # of u32 entries in array */
+       static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
+               0x00000000,     /*   31 -    0  Event id numbers */
+               0x00000000,     /*   63 -   32 */
+               0x00000000,     /*   95 -   64 */
+               0x00000000,     /*  127 -   96 */
+               0x00000000,     /*  159 -  128 */
+               0x00000000,     /*  191 -  160 */
+               0x00000000,     /*  223 -  192 */
+               0x00000000,     /*  255 -  224 */
+               0x00000000,     /*  287 -  256 */
+               0x00000000,     /*  319 -  288 */
+               0x00000000,     /*  351 -  320 */
+               0x00000000,     /*  383 -  352 */
+               0x00000000,     /*  415 -  384 */
+               0x00000000,     /*  447 -  416 */
+               0x00000000,     /*  479 -  448 */
+               0x00000000,     /*  511 -  480 */
+               0x00000000,     /*  543 -  512 */
+               0x00000000,     /*  575 -  544 */
+               0x00000000,     /*  607 -  576 */
+               0x00000000,     /*  639 -  608 */
+               0x00000000,     /*  671 -  640 */
+               0x00000000,     /*  703 -  672 */
+               0x00000000,     /*  735 -  704 */
+               0x00000000,     /*  767 -  736 */
+               0x00000000,     /*  799 -  768 */
+               0x00000000,     /*  831 -  800 */
+               0x00000000,     /*  863 -  832 */
+               0x00000000,     /*  895 -  864 */
+               0x00000000,     /*  927 -  896 */
+               0x00000000,     /*  959 -  928 */
+               0x00000000,     /*  991 -  960 */
+               0x00000000,     /* 1023 -  992 */
+               0x00000000,     /* 1055 - 1024 */
+               0x00000000,     /* 1087 - 1056 */
+               0x00000000,     /* 1119 - 1088 */
+               0x00000000,     /* 1151 - 1120 */
+               0x00000000,     /* 1183 - 1152 */
+               0x00000000,     /* 1215 - 1184 */
+               0x00000000,     /* 1247 - 1216 */
+               0x00000000,     /* 1279 - 1248 */
+               0x00000000,     /* 1311 - 1280 */
+               0x00000000,     /* 1343 - 1312 */
+               0x00000000,     /* 1375 - 1344 */
+               0x00000000,     /* 1407 - 1376 */
+               0x00000000,     /* 1439 - 1408 */
+               0x00000000,     /* 1471 - 1440 */
+               0x00000000,     /* 1503 - 1472 */
+       };
+
+       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
+               return;
+       }
+
+       disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
+       array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
+
+       if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
+               IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
+                              disable_ptr);
+               for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
+                       iwl_legacy_write_targ_mem(priv,
+                                          disable_ptr + (i * sizeof(u32)),
+                                          evt_disable[i]);
+
+       } else {
+               IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
+               IWL_DEBUG_INFO(priv, "  by writing \"1\"s into disable bitmap\n");
+               IWL_DEBUG_INFO(priv, "  in SRAM at 0x%x, size %d u32s\n",
+                              disable_ptr, array_size);
+       }
+
+}
+
+static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
+{
+       int idx;
+
+       for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
+               if (iwl3945_rates[idx].plcp == plcp)
+                       return idx;
+       return -1;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *iwl3945_get_tx_fail_reason(u32 status)
+{
+       switch (status & TX_STATUS_MSK) {
+       case TX_3945_STATUS_SUCCESS:
+               return "SUCCESS";
+               TX_STATUS_ENTRY(SHORT_LIMIT);
+               TX_STATUS_ENTRY(LONG_LIMIT);
+               TX_STATUS_ENTRY(FIFO_UNDERRUN);
+               TX_STATUS_ENTRY(MGMNT_ABORT);
+               TX_STATUS_ENTRY(NEXT_FRAG);
+               TX_STATUS_ENTRY(LIFE_EXPIRE);
+               TX_STATUS_ENTRY(DEST_PS);
+               TX_STATUS_ENTRY(ABORTED);
+               TX_STATUS_ENTRY(BT_RETRY);
+               TX_STATUS_ENTRY(STA_INVALID);
+               TX_STATUS_ENTRY(FRAG_DROPPED);
+               TX_STATUS_ENTRY(TID_DISABLE);
+               TX_STATUS_ENTRY(FRAME_FLUSHED);
+               TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+               TX_STATUS_ENTRY(TX_LOCKED);
+               TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+       }
+
+       return "UNKNOWN";
+}
+#else
+static inline const char *iwl3945_get_tx_fail_reason(u32 status)
+{
+       return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
+{
+       int next_rate = iwl3945_get_prev_ieee_rate(rate);
+
+       switch (priv->band) {
+       case IEEE80211_BAND_5GHZ:
+               if (rate == IWL_RATE_12M_INDEX)
+                       next_rate = IWL_RATE_9M_INDEX;
+               else if (rate == IWL_RATE_6M_INDEX)
+                       next_rate = IWL_RATE_6M_INDEX;
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+                   iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
+                       if (rate == IWL_RATE_11M_INDEX)
+                               next_rate = IWL_RATE_5M_INDEX;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return next_rate;
+}
+
+
+/**
+ * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
+                                    int txq_id, int index)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+       struct iwl_tx_info *tx_info;
+
+       BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
+
+       for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+               q->read_ptr != index;
+               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               tx_info = &txq->txb[txq->q.read_ptr];
+               ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+               tx_info->skb = NULL;
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+       }
+
+       if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
+                       (txq_id != IWL39_CMD_QUEUE_NUM) &&
+                       priv->mac80211_registered)
+               iwl_legacy_wake_queue(priv, txq);
+}
+
+/**
+ * iwl3945_rx_reply_tx - Handle Tx response
+ */
+static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int index = SEQ_TO_INDEX(sequence);
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct ieee80211_tx_info *info;
+       struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+       u32  status = le32_to_cpu(tx_resp->status);
+       int rate_idx;
+       int fail;
+
+       if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
+               IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
+                         "is out of range [0-%d] %d %d\n", txq_id,
+                         index, txq->q.n_bd, txq->q.write_ptr,
+                         txq->q.read_ptr);
+               return;
+       }
+
+       txq->time_stamp = jiffies;
+       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+       ieee80211_tx_info_clear_status(info);
+
+       /* Fill the MRR chain with some info about on-chip retransmissions */
+       rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rate_idx -= IWL_FIRST_OFDM_RATE;
+
+       fail = tx_resp->failure_frame;
+
+       info->status.rates[0].idx = rate_idx;
+       info->status.rates[0].count = fail + 1; /* add final attempt */
+
+       /* tx_status->rts_retry_count = tx_resp->failure_rts; */
+       info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
+                               IEEE80211_TX_STAT_ACK : 0;
+
+       IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
+                       txq_id, iwl3945_get_tx_fail_reason(status), status,
+                       tx_resp->rate, tx_resp->failure_frame);
+
+       IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
+       iwl3945_tx_queue_reclaim(priv, txq_id, index);
+
+       if (status & TX_ABORT_REQUIRED_MSK)
+               IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
+}
+
+
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ *  RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
+                                           __le32 *stats)
+{
+       int i;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+
+       prev_stats = (__le32 *)&priv->_3945.statistics;
+       accum_stats = (u32 *)&priv->_3945.accum_statistics;
+       delta = (u32 *)&priv->_3945.delta_statistics;
+       max_delta = (u32 *)&priv->_3945.max_delta;
+
+       for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
+            i += sizeof(__le32), stats++, prev_stats++, delta++,
+            max_delta++, accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta = (le32_to_cpu(*stats) -
+                               le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
+               }
+       }
+
+       /* reset accumulative statistics for "no-counter" type statistics */
+       priv->_3945.accum_statistics.general.temperature =
+               priv->_3945.statistics.general.temperature;
+       priv->_3945.accum_statistics.general.ttl_timestamp =
+               priv->_3945.statistics.general.ttl_timestamp;
+}
+#endif
+
+void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
+               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+                    (int)sizeof(struct iwl3945_notif_statistics),
+                    le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
+#endif
+       iwl_legacy_recover_from_statistics(priv, pkt);
+
+       memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
+}
+
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       __le32 *flag = (__le32 *)&pkt->u.raw;
+
+       if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+               memset(&priv->_3945.accum_statistics, 0,
+                       sizeof(struct iwl3945_notif_statistics));
+               memset(&priv->_3945.delta_statistics, 0,
+                       sizeof(struct iwl3945_notif_statistics));
+               memset(&priv->_3945.max_delta, 0,
+                       sizeof(struct iwl3945_notif_statistics));
+#endif
+               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+       }
+       iwl3945_hw_rx_statistics(priv, rxb);
+}
+
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of statistics, see the caller. */
+static int iwl3945_is_network_packet(struct iwl_priv *priv,
+               struct ieee80211_hdr *header)
+{
+       /* Filter incoming packets to determine if they are targeted toward
+        * this network, discarding packets coming from ourselves */
+       switch (priv->iw_mode) {
+       case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source    | BSSID */
+               /* packets to our IBSS update information */
+               return !compare_ether_addr(header->addr3, priv->bssid);
+       case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
+               /* packets to our IBSS update information */
+               return !compare_ether_addr(header->addr2, priv->bssid);
+       default:
+               return 1;
+       }
+}
+
+static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
+                                  struct iwl_rx_mem_buffer *rxb,
+                                  struct ieee80211_rx_status *stats)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
+       struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
+       struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
+       u16 len = le16_to_cpu(rx_hdr->len);
+       struct sk_buff *skb;
+       __le16 fc = hdr->frame_control;
+
+       /* We received data from the HW, so stop the watchdog */
+       if (unlikely(len + IWL39_RX_FRAME_SIZE >
+                    PAGE_SIZE << priv->hw_params.rx_page_order)) {
+               IWL_DEBUG_DROP(priv, "Corruption detected!\n");
+               return;
+       }
+
+       /* We only process data packets if the interface is open */
+       if (unlikely(!priv->is_open)) {
+               IWL_DEBUG_DROP_LIMIT(priv,
+                       "Dropping packet while interface is not open.\n");
+               return;
+       }
+
+       skb = dev_alloc_skb(128);
+       if (!skb) {
+               IWL_ERR(priv, "dev_alloc_skb failed\n");
+               return;
+       }
+
+       if (!iwl3945_mod_params.sw_crypto)
+               iwl_legacy_set_decrypted_flag(priv,
+                                      (struct ieee80211_hdr *)rxb_addr(rxb),
+                                      le32_to_cpu(rx_end->status), stats);
+
+       skb_add_rx_frag(skb, 0, rxb->page,
+                       (void *)rx_hdr->payload - (void *)pkt, len);
+
+       iwl_legacy_update_stats(priv, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(priv->hw, skb);
+       priv->alloc_rxb_page--;
+       rxb->page = NULL;
+}
+
+#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct ieee80211_hdr *header;
+       struct ieee80211_rx_status rx_status;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
+       struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
+       struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
+       u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+       u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
+       u8 network_packet;
+
+       rx_status.flag = 0;
+       rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+       rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+                                              rx_status.band);
+
+       rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
+       if (rx_status.band == IEEE80211_BAND_5GHZ)
+               rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
+
+       rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
+                                       RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
+
+       /* set the preamble flag if appropriate */
+       if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       if ((unlikely(rx_stats->phy_count > 20))) {
+               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+                               rx_stats->phy_count);
+               return;
+       }
+
+       if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
+           || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+               return;
+       }
+
+
+
+       /* Convert 3945's rssi indicator to dBm */
+       rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
+
+       IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
+                       rx_status.signal, rx_stats_sig_avg,
+                       rx_stats_noise_diff);
+
+       header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
+
+       network_packet = iwl3945_is_network_packet(priv, header);
+
+       IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+                             network_packet ? '*' : ' ',
+                             le16_to_cpu(rx_hdr->channel),
+                             rx_status.signal, rx_status.signal,
+                             rx_status.rate_idx);
+
+       iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
+                                               header);
+
+       if (network_packet) {
+               priv->_3945.last_beacon_time =
+                       le32_to_cpu(rx_end->beacon_timestamp);
+               priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+               priv->_3945.last_rx_rssi = rx_status.signal;
+       }
+
+       iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
+}
+
+int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                    struct iwl_tx_queue *txq,
+                                    dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+       int count;
+       struct iwl_queue *q;
+       struct iwl3945_tfd *tfd, *tfd_tmp;
+
+       q = &txq->q;
+       tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
+       tfd = &tfd_tmp[q->write_ptr];
+
+       if (reset)
+               memset(tfd, 0, sizeof(*tfd));
+
+       count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+       if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
+               IWL_ERR(priv, "Error can not send more than %d chunks\n",
+                         NUM_TFD_CHUNKS);
+               return -EINVAL;
+       }
+
+       tfd->tbs[count].addr = cpu_to_le32(addr);
+       tfd->tbs[count].len = cpu_to_le32(len);
+
+       count++;
+
+       tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
+                                        TFD_CTL_PAD_SET(pad));
+
+       return 0;
+}
+
+/**
+ * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
+ *
+ * Does NOT advance any indexes
+ */
+void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
+       int index = txq->q.read_ptr;
+       struct iwl3945_tfd *tfd = &tfd_tmp[index];
+       struct pci_dev *dev = priv->pci_dev;
+       int i;
+       int counter;
+
+       /* sanity check */
+       counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+       if (counter > NUM_TFD_CHUNKS) {
+               IWL_ERR(priv, "Too many chunks: %i\n", counter);
+               /* @todo issue fatal error, it is quite serious situation */
+               return;
+       }
+
+       /* Unmap tx_cmd */
+       if (counter)
+               pci_unmap_single(dev,
+                               dma_unmap_addr(&txq->meta[index], mapping),
+                               dma_unmap_len(&txq->meta[index], len),
+                               PCI_DMA_TODEVICE);
+
+       /* unmap chunks if any */
+
+       for (i = 1; i < counter; i++)
+               pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+                        le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
+
+       /* free SKB */
+       if (txq->txb) {
+               struct sk_buff *skb;
+
+               skb = txq->txb[txq->q.read_ptr].skb;
+
+               /* can be called from irqs-disabled context */
+               if (skb) {
+                       dev_kfree_skb_any(skb);
+                       txq->txb[txq->q.read_ptr].skb = NULL;
+               }
+       }
+}
+
+/**
+ * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
+                                 struct iwl_device_cmd *cmd,
+                                 struct ieee80211_tx_info *info,
+                                 struct ieee80211_hdr *hdr,
+                                 int sta_id, int tx_id)
+{
+       u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
+       u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
+       u16 rate_mask;
+       int rate;
+       u8 rts_retry_limit;
+       u8 data_retry_limit;
+       __le32 tx_flags;
+       __le16 fc = hdr->frame_control;
+       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+
+       rate = iwl3945_rates[rate_index].plcp;
+       tx_flags = tx_cmd->tx_flags;
+
+       /* We need to figure out how to get the sta->supp_rates while
+        * in this running context */
+       rate_mask = IWL_RATES_MASK_3945;
+
+       /* Set retry limit on DATA packets and Probe Responses*/
+       if (ieee80211_is_probe_resp(fc))
+               data_retry_limit = 3;
+       else
+               data_retry_limit = IWL_DEFAULT_TX_RETRY;
+       tx_cmd->data_retry_limit = data_retry_limit;
+
+       if (tx_id >= IWL39_CMD_QUEUE_NUM)
+               rts_retry_limit = 3;
+       else
+               rts_retry_limit = 7;
+
+       if (data_retry_limit < rts_retry_limit)
+               rts_retry_limit = data_retry_limit;
+       tx_cmd->rts_retry_limit = rts_retry_limit;
+
+       tx_cmd->rate = rate;
+       tx_cmd->tx_flags = tx_flags;
+
+       /* OFDM */
+       tx_cmd->supp_rates[0] =
+          ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
+
+       /* CCK */
+       tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+       IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+                      "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
+                      tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
+                      tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
+}
+
+static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
+{
+       unsigned long flags_spin;
+       struct iwl_station_entry *station;
+
+       if (sta_id == IWL_INVALID_STATION)
+               return IWL_INVALID_STATION;
+
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       station = &priv->stations[sta_id];
+
+       station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+       station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+       station->sta.mode = STA_CONTROL_MODIFY_MSK;
+       iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
+                       sta_id, tx_rate);
+       return sta_id;
+}
+
+static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+               if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
+                       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+                       iwl_poll_bit(priv, CSR_GPIO_IN,
+                                    CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+                                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+               }
+ */
+
+       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+                       ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+       iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);  /* uS */
+}
+
+static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
+                                       rxq->rb_stts_dma);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
+       iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
+               FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+               FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+               FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+               FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
+               (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
+               FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
+               (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
+               FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+       /* fake read to flush all prev I/O */
+       iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
+
+       return 0;
+}
+
+static int iwl3945_tx_reset(struct iwl_priv *priv)
+{
+
+       /* bypass mode */
+       iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
+
+       /* RA 0 is active */
+       iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
+
+       /* all 6 fifo are active */
+       iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
+
+       iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+       iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
+
+       iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
+                            priv->_3945.shared_phys);
+
+       iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+
+       return 0;
+}
+
+/**
+ * iwl3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
+{
+       int rc;
+       int txq_id, slots_num;
+
+       iwl3945_hw_txq_ctx_free(priv);
+
+       /* allocate tx queue structure */
+       rc = iwl_legacy_alloc_txq_mem(priv);
+       if (rc)
+               return rc;
+
+       /* Tx CMD queue */
+       rc = iwl3945_tx_reset(priv);
+       if (rc)
+               goto error;
+
+       /* Tx queue(s) */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
+                               TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
+                                               slots_num, txq_id);
+               if (rc) {
+                       IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+                       goto error;
+               }
+       }
+
+       return rc;
+
+ error:
+       iwl3945_hw_txq_ctx_free(priv);
+       return rc;
+}
+
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int iwl3945_apm_init(struct iwl_priv *priv)
+{
+       int ret = iwl_legacy_apm_init(priv);
+
+       /* Clear APMG (NIC's internal power management) interrupts */
+       iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+       iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+       /* Reset radio chip */
+       iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
+                               APMG_PS_CTRL_VAL_RESET_REQ);
+       udelay(5);
+       iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+                               APMG_PS_CTRL_VAL_RESET_REQ);
+
+       return ret;
+}
+
+static void iwl3945_nic_config(struct iwl_priv *priv)
+{
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       unsigned long flags;
+       u8 rev_id = priv->pci_dev->revision;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Determine HW type */
+       IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
+
+       if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+               IWL_DEBUG_INFO(priv, "RTP type\n");
+       else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+               IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+       } else {
+               IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+       }
+
+       if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+               IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+       } else
+               IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
+
+       if ((eeprom->board_revision & 0xF0) == 0xD0) {
+               IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
+                              eeprom->board_revision);
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+       } else {
+               IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
+                              eeprom->board_revision);
+               iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+                             CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+       }
+
+       if (eeprom->almgor_m_version <= 1) {
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+               IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
+                              eeprom->almgor_m_version);
+       } else {
+               IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
+                              eeprom->almgor_m_version);
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+               IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
+
+       if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+               IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
+}
+
+int iwl3945_hw_nic_init(struct iwl_priv *priv)
+{
+       int rc;
+       unsigned long flags;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->cfg->ops->lib->apm_ops.init(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl3945_set_pwr_vmain(priv);
+
+       priv->cfg->ops->lib->apm_ops.config(priv);
+
+       /* Allocate the RX queue, or reset if it is already allocated */
+       if (!rxq->bd) {
+               rc = iwl_legacy_rx_queue_alloc(priv);
+               if (rc) {
+                       IWL_ERR(priv, "Unable to initialize Rx queue\n");
+                       return -ENOMEM;
+               }
+       } else
+               iwl3945_rx_queue_reset(priv, rxq);
+
+       iwl3945_rx_replenish(priv);
+
+       iwl3945_rx_init(priv, rxq);
+
+
+       /* Look at using this instead:
+       rxq->need_update = 1;
+       iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+       */
+
+       iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+       rc = iwl3945_txq_ctx_reset(priv);
+       if (rc)
+               return rc;
+
+       set_bit(STATUS_INIT, &priv->status);
+
+       return 0;
+}
+
+/**
+ * iwl3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+       int txq_id;
+
+       /* Tx queues */
+       if (priv->txq)
+               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
+                    txq_id++)
+                       if (txq_id == IWL39_CMD_QUEUE_NUM)
+                               iwl_legacy_cmd_queue_free(priv);
+                       else
+                               iwl_legacy_tx_queue_free(priv, txq_id);
+
+       /* free tx queue structure */
+       iwl_legacy_txq_mem(priv);
+}
+
+void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
+{
+       int txq_id;
+
+       /* stop SCD */
+       iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
+       iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
+
+       /* reset TFD queues */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
+               iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
+                               FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+                               1000);
+       }
+
+       iwl3945_hw_txq_ctx_free(priv);
+}
+
+/**
+ * iwl3945_hw_reg_adjust_power_by_temp
+ * return index delta into power gain settings table
+*/
+static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+       return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
+{
+       return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
+}
+
+int iwl3945_hw_get_temperature(struct iwl_priv *priv)
+{
+       return iwl_read32(priv, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * iwl3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
+{
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       int temperature;
+
+       temperature = iwl3945_hw_get_temperature(priv);
+
+       /* driver's okay range is -260 to +25.
+        *   human readable okay range is 0 to +285 */
+       IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
+
+       /* handle insane temp reading */
+       if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
+               IWL_ERR(priv, "Error bad temperature value  %d\n", temperature);
+
+               /* if really really hot(?),
+                *   substitute the 3rd band/group's temp measured at factory */
+               if (priv->last_temperature > 100)
+                       temperature = eeprom->groups[2].temperature;
+               else /* else use most recent "sane" value from driver */
+                       temperature = priv->last_temperature;
+       }
+
+       return temperature;     /* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IWL_TEMPERATURE_LIMIT_TIMER   6
+
+/**
+ * iwl3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ *    (assumes caller will actually do the calibration!). */
+static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
+{
+       int temp_diff;
+
+       priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
+       temp_diff = priv->temperature - priv->last_temperature;
+
+       /* get absolute value */
+       if (temp_diff < 0) {
+               IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
+               temp_diff = -temp_diff;
+       } else if (temp_diff == 0)
+               IWL_DEBUG_POWER(priv, "Same temp,\n");
+       else
+               IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
+
+       /* if we don't need calibration, *don't* update last_temperature */
+       if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
+               IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
+               return 0;
+       }
+
+       IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
+
+       /* assume that caller will actually do calib ...
+        *   update the "last temperature" value */
+       priv->last_temperature = priv->temperature;
+       return 1;
+}
+
+#define IWL_MAX_GAIN_ENTRIES 78
+#define IWL_CCK_FROM_OFDM_POWER_DIFF  -5
+#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
+       {
+        {251, 127},            /* 2.4 GHz, highest power */
+        {251, 127},
+        {251, 127},
+        {251, 127},
+        {251, 125},
+        {251, 110},
+        {251, 105},
+        {251, 98},
+        {187, 125},
+        {187, 115},
+        {187, 108},
+        {187, 99},
+        {243, 119},
+        {243, 111},
+        {243, 105},
+        {243, 97},
+        {243, 92},
+        {211, 106},
+        {211, 100},
+        {179, 120},
+        {179, 113},
+        {179, 107},
+        {147, 125},
+        {147, 119},
+        {147, 112},
+        {147, 106},
+        {147, 101},
+        {147, 97},
+        {147, 91},
+        {115, 107},
+        {235, 121},
+        {235, 115},
+        {235, 109},
+        {203, 127},
+        {203, 121},
+        {203, 115},
+        {203, 108},
+        {203, 102},
+        {203, 96},
+        {203, 92},
+        {171, 110},
+        {171, 104},
+        {171, 98},
+        {139, 116},
+        {227, 125},
+        {227, 119},
+        {227, 113},
+        {227, 107},
+        {227, 101},
+        {227, 96},
+        {195, 113},
+        {195, 106},
+        {195, 102},
+        {195, 95},
+        {163, 113},
+        {163, 106},
+        {163, 102},
+        {163, 95},
+        {131, 113},
+        {131, 106},
+        {131, 102},
+        {131, 95},
+        {99, 113},
+        {99, 106},
+        {99, 102},
+        {99, 95},
+        {67, 113},
+        {67, 106},
+        {67, 102},
+        {67, 95},
+        {35, 113},
+        {35, 106},
+        {35, 102},
+        {35, 95},
+        {3, 113},
+        {3, 106},
+        {3, 102},
+        {3, 95} },             /* 2.4 GHz, lowest power */
+       {
+        {251, 127},            /* 5.x GHz, highest power */
+        {251, 120},
+        {251, 114},
+        {219, 119},
+        {219, 101},
+        {187, 113},
+        {187, 102},
+        {155, 114},
+        {155, 103},
+        {123, 117},
+        {123, 107},
+        {123, 99},
+        {123, 92},
+        {91, 108},
+        {59, 125},
+        {59, 118},
+        {59, 109},
+        {59, 102},
+        {59, 96},
+        {59, 90},
+        {27, 104},
+        {27, 98},
+        {27, 92},
+        {115, 118},
+        {115, 111},
+        {115, 104},
+        {83, 126},
+        {83, 121},
+        {83, 113},
+        {83, 105},
+        {83, 99},
+        {51, 118},
+        {51, 111},
+        {51, 104},
+        {51, 98},
+        {19, 116},
+        {19, 109},
+        {19, 102},
+        {19, 98},
+        {19, 93},
+        {171, 113},
+        {171, 107},
+        {171, 99},
+        {139, 120},
+        {139, 113},
+        {139, 107},
+        {139, 99},
+        {107, 120},
+        {107, 113},
+        {107, 107},
+        {107, 99},
+        {75, 120},
+        {75, 113},
+        {75, 107},
+        {75, 99},
+        {43, 120},
+        {43, 113},
+        {43, 107},
+        {43, 99},
+        {11, 120},
+        {11, 113},
+        {11, 107},
+        {11, 99},
+        {131, 107},
+        {131, 99},
+        {99, 120},
+        {99, 113},
+        {99, 107},
+        {99, 99},
+        {67, 120},
+        {67, 113},
+        {67, 107},
+        {67, 99},
+        {35, 120},
+        {35, 113},
+        {35, 107},
+        {35, 99},
+        {3, 120} }             /* 5.x GHz, lowest power */
+};
+
+static inline u8 iwl3945_hw_reg_fix_power_index(int index)
+{
+       if (index < 0)
+               return 0;
+       if (index >= IWL_MAX_GAIN_ENTRIES)
+               return IWL_MAX_GAIN_ENTRIES - 1;
+       return (u8) index;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
+                              s32 rate_index, const s8 *clip_pwrs,
+                              struct iwl_channel_info *ch_info,
+                              int band_index)
+{
+       struct iwl3945_scan_power_info *scan_power_info;
+       s8 power;
+       u8 power_index;
+
+       scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
+
+       /* use this channel group's 6Mbit clipping/saturation pwr,
+        *   but cap at regulatory scan power restriction (set during init
+        *   based on eeprom channel data) for this channel.  */
+       power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
+
+       power = min(power, priv->tx_power_user_lmt);
+       scan_power_info->requested_power = power;
+
+       /* find difference between new scan *power* and current "normal"
+        *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
+        *   current "normal" temperature-compensated Tx power *index* for
+        *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+        *   *index*. */
+       power_index = ch_info->power_info[rate_index].power_table_index
+           - (power - ch_info->power_info
+              [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
+
+       /* store reference index that we use when adjusting *all* scan
+        *   powers.  So we can accommodate user (all channel) or spectrum
+        *   management (single channel) power changes "between" temperature
+        *   feedback compensation procedures.
+        * don't force fit this reference index into gain table; it may be a
+        *   negative number.  This will help avoid errors when we're at
+        *   the lower bounds (highest gains, for warmest temperatures)
+        *   of the table. */
+
+       /* don't exceed table bounds for "real" setting */
+       power_index = iwl3945_hw_reg_fix_power_index(power_index);
+
+       scan_power_info->power_table_index = power_index;
+       scan_power_info->tpc.tx_gain =
+           power_gain_table[band_index][power_index].tx_gain;
+       scan_power_info->tpc.dsp_atten =
+           power_gain_table[band_index][power_index].dsp_atten;
+}
+
+/**
+ * iwl3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int iwl3945_send_tx_power(struct iwl_priv *priv)
+{
+       int rate_idx, i;
+       const struct iwl_channel_info *ch_info = NULL;
+       struct iwl3945_txpowertable_cmd txpower = {
+               .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
+       };
+       u16 chan;
+
+       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+                     "TX Power requested while scanning!\n"))
+               return -EAGAIN;
+
+       chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
+
+       txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
+       if (!ch_info) {
+               IWL_ERR(priv,
+                       "Failed to get channel info for channel %d [%d]\n",
+                       chan, priv->band);
+               return -EINVAL;
+       }
+
+       if (!iwl_legacy_is_channel_valid(ch_info)) {
+               IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
+                               "non-Tx channel.\n");
+               return 0;
+       }
+
+       /* fill cmd with power settings for all rates for current channel */
+       /* Fill OFDM rate */
+       for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
+            rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+               txpower.power[i].tpc = ch_info->power_info[i].tpc;
+               txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
+
+               IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+                               le16_to_cpu(txpower.channel),
+                               txpower.band,
+                               txpower.power[i].tpc.tx_gain,
+                               txpower.power[i].tpc.dsp_atten,
+                               txpower.power[i].rate);
+       }
+       /* Fill CCK rates */
+       for (rate_idx = IWL_FIRST_CCK_RATE;
+            rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
+               txpower.power[i].tpc = ch_info->power_info[i].tpc;
+               txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
+
+               IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+                               le16_to_cpu(txpower.channel),
+                               txpower.band,
+                               txpower.power[i].tpc.tx_gain,
+                               txpower.power[i].tpc.dsp_atten,
+                               txpower.power[i].rate);
+       }
+
+       return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
+                               sizeof(struct iwl3945_txpowertable_cmd),
+                               &txpower);
+
+}
+
+/**
+ * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update.  Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_index ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ *      properly fill out the scan powers, and actual h/w gain settings,
+ *      and send changes to NIC
+ */
+static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
+                            struct iwl_channel_info *ch_info)
+{
+       struct iwl3945_channel_power_info *power_info;
+       int power_changed = 0;
+       int i;
+       const s8 *clip_pwrs;
+       int power;
+
+       /* Get this chnlgrp's rate-to-max/clip-powers table */
+       clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
+
+       /* Get this channel's rate-to-current-power settings table */
+       power_info = ch_info->power_info;
+
+       /* update OFDM Txpower settings */
+       for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
+            i++, ++power_info) {
+               int delta_idx;
+
+               /* limit new power to be no more than h/w capability */
+               power = min(ch_info->curr_txpow, clip_pwrs[i]);
+               if (power == power_info->requested_power)
+                       continue;
+
+               /* find difference between old and new requested powers,
+                *    update base (non-temp-compensated) power index */
+               delta_idx = (power - power_info->requested_power) * 2;
+               power_info->base_power_index -= delta_idx;
+
+               /* save new requested power value */
+               power_info->requested_power = power;
+
+               power_changed = 1;
+       }
+
+       /* update CCK Txpower settings, based on OFDM 12M setting ...
+        *    ... all CCK power settings for a given channel are the *same*. */
+       if (power_changed) {
+               power =
+                   ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
+                   requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
+
+               /* do all CCK rates' iwl3945_channel_power_info structures */
+               for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
+                       power_info->requested_power = power;
+                       power_info->base_power_index =
+                           ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
+                           base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
+                       ++power_info;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ *      based strictly on regulatory (eeprom and spectrum mgt) limitations
+ *      (no consideration for h/w clipping limitations).
+ */
+static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
+{
+       s8 max_power;
+
+#if 0
+       /* if we're using TGd limits, use lower of TGd or EEPROM */
+       if (ch_info->tgd_data.max_power != 0)
+               max_power = min(ch_info->tgd_data.max_power,
+                               ch_info->eeprom.max_power_avg);
+
+       /* else just use EEPROM limits */
+       else
+#endif
+               max_power = ch_info->eeprom.max_power_avg;
+
+       return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ *   and the factory calibration temperatures, and bases the new settings
+ *   on the channel's base_power_index.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
+{
+       struct iwl_channel_info *ch_info = NULL;
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       int delta_index;
+       const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
+       u8 a_band;
+       u8 rate_index;
+       u8 scan_tbl_index;
+       u8 i;
+       int ref_temp;
+       int temperature = priv->temperature;
+
+       if (priv->disable_tx_power_cal ||
+           test_bit(STATUS_SCANNING, &priv->status)) {
+               /* do not perform tx power calibration */
+               return 0;
+       }
+       /* set up new Tx power info for each and every channel, 2.4 and 5.x */
+       for (i = 0; i < priv->channel_count; i++) {
+               ch_info = &priv->channel_info[i];
+               a_band = iwl_legacy_is_channel_a_band(ch_info);
+
+               /* Get this chnlgrp's factory calibration temperature */
+               ref_temp = (s16)eeprom->groups[ch_info->group_index].
+                   temperature;
+
+               /* get power index adjustment based on current and factory
+                * temps */
+               delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
+                                                             ref_temp);
+
+               /* set tx power value for all rates, OFDM and CCK */
+               for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
+                    rate_index++) {
+                       int power_idx =
+                           ch_info->power_info[rate_index].base_power_index;
+
+                       /* temperature compensate */
+                       power_idx += delta_index;
+
+                       /* stay within table range */
+                       power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
+                       ch_info->power_info[rate_index].
+                           power_table_index = (u8) power_idx;
+                       ch_info->power_info[rate_index].tpc =
+                           power_gain_table[a_band][power_idx];
+               }
+
+               /* Get this chnlgrp's rate-to-max/clip-powers table */
+               clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
+
+               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+               for (scan_tbl_index = 0;
+                    scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
+                       s32 actual_index = (scan_tbl_index == 0) ?
+                           IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
+                       iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
+                                          actual_index, clip_pwrs,
+                                          ch_info, a_band);
+               }
+       }
+
+       /* send Txpower command for current channel to ucode */
+       return priv->cfg->ops->lib->send_tx_power(priv);
+}
+
+int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
+{
+       struct iwl_channel_info *ch_info;
+       s8 max_power;
+       u8 a_band;
+       u8 i;
+
+       if (priv->tx_power_user_lmt == power) {
+               IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
+                               "limit: %ddBm.\n", power);
+               return 0;
+       }
+
+       IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
+       priv->tx_power_user_lmt = power;
+
+       /* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+       for (i = 0; i < priv->channel_count; i++) {
+               ch_info = &priv->channel_info[i];
+               a_band = iwl_legacy_is_channel_a_band(ch_info);
+
+               /* find minimum power of all user and regulatory constraints
+                *    (does not consider h/w clipping limitations) */
+               max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
+               max_power = min(power, max_power);
+               if (max_power != ch_info->curr_txpow) {
+                       ch_info->curr_txpow = max_power;
+
+                       /* this considers the h/w clipping limitations */
+                       iwl3945_hw_reg_set_new_power(priv, ch_info);
+               }
+       }
+
+       /* update txpower settings for all channels,
+        *   send to NIC if associated. */
+       iwl3945_is_temp_calib_needed(priv);
+       iwl3945_hw_reg_comp_txpower_temp(priv);
+
+       return 0;
+}
+
+static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
+                                  struct iwl_rxon_context *ctx)
+{
+       int rc = 0;
+       struct iwl_rx_packet *pkt;
+       struct iwl3945_rxon_assoc_cmd rxon_assoc;
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_RXON_ASSOC,
+               .len = sizeof(rxon_assoc),
+               .flags = CMD_WANT_SKB,
+               .data = &rxon_assoc,
+       };
+       const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+       const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
+
+       if ((rxon1->flags == rxon2->flags) &&
+           (rxon1->filter_flags == rxon2->filter_flags) &&
+           (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+           (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+               IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
+               return 0;
+       }
+
+       rxon_assoc.flags = ctx->staging.flags;
+       rxon_assoc.filter_flags = ctx->staging.filter_flags;
+       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+       rxon_assoc.reserved = 0;
+
+       rc = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (rc)
+               return rc;
+
+       pkt = (struct iwl_rx_packet *)cmd.reply_page;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
+               rc = -EIO;
+       }
+
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+
+       return rc;
+}
+
+/**
+ * iwl3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data.  This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       /* cast away the const for active_rxon in this function */
+       struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+       struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+       int rc = 0;
+       bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EINVAL;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -1;
+
+       /* always get timestamp with Rx frame */
+       staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+       /* select antenna */
+       staging_rxon->flags &=
+           ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+       staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
+
+       rc = iwl_legacy_check_rxon_cmd(priv, ctx);
+       if (rc) {
+               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
+               return -EINVAL;
+       }
+
+       /* If we don't need to send a full RXON, we can use
+        * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
+        * and other flags for the current radio configuration. */
+       if (!iwl_legacy_full_rxon_required(priv,
+                       &priv->contexts[IWL_RXON_CTX_BSS])) {
+               rc = iwl_legacy_send_rxon_assoc(priv,
+                                        &priv->contexts[IWL_RXON_CTX_BSS]);
+               if (rc) {
+                       IWL_ERR(priv, "Error setting RXON_ASSOC "
+                                 "configuration (%d).\n", rc);
+                       return rc;
+               }
+
+               memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+               return 0;
+       }
+
+       /* If we are currently associated and the new config requires
+        * an RXON_ASSOC and the new config wants the associated mask enabled,
+        * we must clear the associated from the active configuration
+        * before we apply the new config */
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
+               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+               /*
+                * reserved4 and 5 could have been filled by the iwlcore code.
+                * Let's clear them before pushing to the 3945.
+                */
+               active_rxon->reserved4 = 0;
+               active_rxon->reserved5 = 0;
+               rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
+                                     sizeof(struct iwl3945_rxon_cmd),
+                                     &priv->contexts[IWL_RXON_CTX_BSS].active);
+
+               /* If the mask clearing failed then we set
+                * active_rxon back to what it was previously */
+               if (rc) {
+                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+                       IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
+                                 "configuration (%d).\n", rc);
+                       return rc;
+               }
+               iwl_legacy_clear_ucode_stations(priv,
+                                        &priv->contexts[IWL_RXON_CTX_BSS]);
+               iwl_legacy_restore_stations(priv,
+                                        &priv->contexts[IWL_RXON_CTX_BSS]);
+       }
+
+       IWL_DEBUG_INFO(priv, "Sending RXON\n"
+                      "* with%s RXON_FILTER_ASSOC_MSK\n"
+                      "* channel = %d\n"
+                      "* bssid = %pM\n",
+                      (new_assoc ? "" : "out"),
+                      le16_to_cpu(staging_rxon->channel),
+                      staging_rxon->bssid_addr);
+
+       /*
+        * reserved4 and 5 could have been filled by the iwlcore code.
+        * Let's clear them before pushing to the 3945.
+        */
+       staging_rxon->reserved4 = 0;
+       staging_rxon->reserved5 = 0;
+
+       iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
+
+       /* Apply the new configuration */
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
+                             sizeof(struct iwl3945_rxon_cmd),
+                             staging_rxon);
+       if (rc) {
+               IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
+               return rc;
+       }
+
+       memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+       if (!new_assoc) {
+               iwl_legacy_clear_ucode_stations(priv,
+                                        &priv->contexts[IWL_RXON_CTX_BSS]);
+               iwl_legacy_restore_stations(priv,
+                                       &priv->contexts[IWL_RXON_CTX_BSS]);
+       }
+
+       /* If we issue a new RXON command which required a tune then we must
+        * send a new TXPOWER command or we won't be able to Tx any frames */
+       rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
+       if (rc) {
+               IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
+               return rc;
+       }
+
+       /* Init the hardware's rate fallback order based on the band */
+       rc = iwl3945_init_hw_rate_table(priv);
+       if (rc) {
+               IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * iwl3945_reg_txpower_periodic -  called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ *     -- correct coeffs for temp (can reset temp timer)
+ *     -- save this temp as "last",
+ *     -- send new set of gain settings to NIC
+ * NOTE:  This should continue working, even when we're not associated,
+ *   so we can keep our internal table of scan powers current. */
+void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
+{
+       /* This will kick in the "brute force"
+        * iwl3945_hw_reg_comp_txpower_temp() below */
+       if (!iwl3945_is_temp_calib_needed(priv))
+               goto reschedule;
+
+       /* Set up a new set of temp-adjusted TxPowers, send to NIC.
+        * This is based *only* on current temperature,
+        * ignoring any previous power measurements */
+       iwl3945_hw_reg_comp_txpower_temp(priv);
+
+ reschedule:
+       queue_delayed_work(priv->workqueue,
+                          &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
+}
+
+static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                                            _3945.thermal_periodic.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl3945_reg_txpower_periodic(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+/**
+ * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
+ *                                for the channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ *      These channel groups are based on factory-tested channels;
+ *      on A-band, EEPROM's "group frequency" entries represent the top
+ *      channel in each group 1-4.  Group 5 All B/G channels are in group 0.
+ */
+static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
+                                      const struct iwl_channel_info *ch_info)
+{
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+       u8 group;
+       u16 group_index = 0;    /* based on factory calib frequencies */
+       u8 grp_channel;
+
+       /* Find the group index for the channel ... don't use index 1(?) */
+       if (iwl_legacy_is_channel_a_band(ch_info)) {
+               for (group = 1; group < 5; group++) {
+                       grp_channel = ch_grp[group].group_channel;
+                       if (ch_info->channel <= grp_channel) {
+                               group_index = group;
+                               break;
+                       }
+               }
+               /* group 4 has a few channels *above* its factory cal freq */
+               if (group == 5)
+                       group_index = 4;
+       } else
+               group_index = 0;        /* 2.4 GHz, group 0 */
+
+       IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
+                       group_index);
+       return group_index;
+}
+
+/**
+ * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) index
+ *   into radio/DSP gain settings table for requested power.
+ */
+static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
+                                      s8 requested_power,
+                                      s32 setting_index, s32 *new_index)
+{
+       const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       s32 index0, index1;
+       s32 power = 2 * requested_power;
+       s32 i;
+       const struct iwl3945_eeprom_txpower_sample *samples;
+       s32 gains0, gains1;
+       s32 res;
+       s32 denominator;
+
+       chnl_grp = &eeprom->groups[setting_index];
+       samples = chnl_grp->samples;
+       for (i = 0; i < 5; i++) {
+               if (power == samples[i].power) {
+                       *new_index = samples[i].gain_index;
+                       return 0;
+               }
+       }
+
+       if (power > samples[1].power) {
+               index0 = 0;
+               index1 = 1;
+       } else if (power > samples[2].power) {
+               index0 = 1;
+               index1 = 2;
+       } else if (power > samples[3].power) {
+               index0 = 2;
+               index1 = 3;
+       } else {
+               index0 = 3;
+               index1 = 4;
+       }
+
+       denominator = (s32) samples[index1].power - (s32) samples[index0].power;
+       if (denominator == 0)
+               return -EINVAL;
+       gains0 = (s32) samples[index0].gain_index * (1 << 19);
+       gains1 = (s32) samples[index1].gain_index * (1 << 19);
+       res = gains0 + (gains1 - gains0) *
+           ((s32) power - (s32) samples[index0].power) / denominator +
+           (1 << 18);
+       *new_index = res >> 19;
+       return 0;
+}
+
+static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
+{
+       u32 i;
+       s32 rate_index;
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       const struct iwl3945_eeprom_txpower_group *group;
+
+       IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
+
+       for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
+               s8 *clip_pwrs;  /* table of power levels for each rate */
+               s8 satur_pwr;   /* saturation power for each chnl group */
+               group = &eeprom->groups[i];
+
+               /* sanity check on factory saturation power value */
+               if (group->saturation_power < 40) {
+                       IWL_WARN(priv, "Error: saturation power is %d, "
+                                   "less than minimum expected 40\n",
+                                   group->saturation_power);
+                       return;
+               }
+
+               /*
+                * Derive requested power levels for each rate, based on
+                *   hardware capabilities (saturation power for band).
+                * Basic value is 3dB down from saturation, with further
+                *   power reductions for highest 3 data rates.  These
+                *   backoffs provide headroom for high rate modulation
+                *   power peaks, without too much distortion (clipping).
+                */
+               /* we'll fill in this array with h/w max power levels */
+               clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
+
+               /* divide factory saturation power by 2 to find -3dB level */
+               satur_pwr = (s8) (group->saturation_power >> 1);
+
+               /* fill in channel group's nominal powers for each rate */
+               for (rate_index = 0;
+                    rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
+                       switch (rate_index) {
+                       case IWL_RATE_36M_INDEX_TABLE:
+                               if (i == 0)     /* B/G */
+                                       *clip_pwrs = satur_pwr;
+                               else    /* A */
+                                       *clip_pwrs = satur_pwr - 5;
+                               break;
+                       case IWL_RATE_48M_INDEX_TABLE:
+                               if (i == 0)
+                                       *clip_pwrs = satur_pwr - 7;
+                               else
+                                       *clip_pwrs = satur_pwr - 10;
+                               break;
+                       case IWL_RATE_54M_INDEX_TABLE:
+                               if (i == 0)
+                                       *clip_pwrs = satur_pwr - 9;
+                               else
+                                       *clip_pwrs = satur_pwr - 12;
+                               break;
+                       default:
+                               *clip_pwrs = satur_pwr;
+                               break;
+                       }
+               }
+       }
+}
+
+/**
+ * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up priv->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
+{
+       struct iwl_channel_info *ch_info = NULL;
+       struct iwl3945_channel_power_info *pwr_info;
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       int delta_index;
+       u8 rate_index;
+       u8 scan_tbl_index;
+       const s8 *clip_pwrs;    /* array of power levels for each rate */
+       u8 gain, dsp_atten;
+       s8 power;
+       u8 pwr_index, base_pwr_index, a_band;
+       u8 i;
+       int temperature;
+
+       /* save temperature reference,
+        *   so we can determine next time to calibrate */
+       temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
+       priv->last_temperature = temperature;
+
+       iwl3945_hw_reg_init_channel_groups(priv);
+
+       /* initialize Tx power info for each and every channel, 2.4 and 5.x */
+       for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
+            i++, ch_info++) {
+               a_band = iwl_legacy_is_channel_a_band(ch_info);
+               if (!iwl_legacy_is_channel_valid(ch_info))
+                       continue;
+
+               /* find this channel's channel group (*not* "band") index */
+               ch_info->group_index =
+                       iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
+
+               /* Get this chnlgrp's rate->max/clip-powers table */
+               clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
+
+               /* calculate power index *adjustment* value according to
+                *  diff between current temperature and factory temperature */
+               delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
+                               eeprom->groups[ch_info->group_index].
+                               temperature);
+
+               IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
+                               ch_info->channel, delta_index, temperature +
+                               IWL_TEMP_CONVERT);
+
+               /* set tx power value for all OFDM rates */
+               for (rate_index = 0; rate_index < IWL_OFDM_RATES;
+                    rate_index++) {
+                       s32 uninitialized_var(power_idx);
+                       int rc;
+
+                       /* use channel group's clip-power table,
+                        *   but don't exceed channel's max power */
+                       s8 pwr = min(ch_info->max_power_avg,
+                                    clip_pwrs[rate_index]);
+
+                       pwr_info = &ch_info->power_info[rate_index];
+
+                       /* get base (i.e. at factory-measured temperature)
+                        *    power table index for this rate's power */
+                       rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
+                                                        ch_info->group_index,
+                                                        &power_idx);
+                       if (rc) {
+                               IWL_ERR(priv, "Invalid power index\n");
+                               return rc;
+                       }
+                       pwr_info->base_power_index = (u8) power_idx;
+
+                       /* temperature compensate */
+                       power_idx += delta_index;
+
+                       /* stay within range of gain table */
+                       power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
+
+                       /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
+                       pwr_info->requested_power = pwr;
+                       pwr_info->power_table_index = (u8) power_idx;
+                       pwr_info->tpc.tx_gain =
+                           power_gain_table[a_band][power_idx].tx_gain;
+                       pwr_info->tpc.dsp_atten =
+                           power_gain_table[a_band][power_idx].dsp_atten;
+               }
+
+               /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
+               pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
+               power = pwr_info->requested_power +
+                       IWL_CCK_FROM_OFDM_POWER_DIFF;
+               pwr_index = pwr_info->power_table_index +
+                       IWL_CCK_FROM_OFDM_INDEX_DIFF;
+               base_pwr_index = pwr_info->base_power_index +
+                       IWL_CCK_FROM_OFDM_INDEX_DIFF;
+
+               /* stay within table range */
+               pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
+               gain = power_gain_table[a_band][pwr_index].tx_gain;
+               dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
+
+               /* fill each CCK rate's iwl3945_channel_power_info structure
+                * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
+                * NOTE:  CCK rates start at end of OFDM rates! */
+               for (rate_index = 0;
+                    rate_index < IWL_CCK_RATES; rate_index++) {
+                       pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
+                       pwr_info->requested_power = power;
+                       pwr_info->power_table_index = pwr_index;
+                       pwr_info->base_power_index = base_pwr_index;
+                       pwr_info->tpc.tx_gain = gain;
+                       pwr_info->tpc.dsp_atten = dsp_atten;
+               }
+
+               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+               for (scan_tbl_index = 0;
+                    scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
+                       s32 actual_index = (scan_tbl_index == 0) ?
+                               IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
+                       iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
+                               actual_index, clip_pwrs, ch_info, a_band);
+               }
+       }
+
+       return 0;
+}
+
+int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
+{
+       int rc;
+
+       iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
+       rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
+                       FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+       if (rc < 0)
+               IWL_ERR(priv, "Can't stop Rx DMA.\n");
+
+       return 0;
+}
+
+int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       int txq_id = txq->q.id;
+
+       struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
+
+       shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
+
+       iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
+       iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
+
+       iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
+               FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+               FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+               FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+               FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+               FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+       /* fake read to flush all prev. writes */
+       iwl_read32(priv, FH39_TSSR_CBB_BASE);
+
+       return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+       switch (cmd_id) {
+       case REPLY_RXON:
+               return sizeof(struct iwl3945_rxon_cmd);
+       case POWER_TABLE_CMD:
+               return sizeof(struct iwl3945_powertable_cmd);
+       default:
+               return len;
+       }
+}
+
+
+static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+                                                               u8 *data)
+{
+       struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
+       addsta->mode = cmd->mode;
+       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+       memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
+       addsta->station_flags = cmd->station_flags;
+       addsta->station_flags_msk = cmd->station_flags_msk;
+       addsta->tid_disable_tx = cpu_to_le16(0);
+       addsta->rate_n_flags = cmd->rate_n_flags;
+       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+       return (u16)sizeof(struct iwl3945_addsta_cmd);
+}
+
+static int iwl3945_add_bssid_station(struct iwl_priv *priv,
+                                    const u8 *addr, u8 *sta_id_r)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       int ret;
+       u8 sta_id;
+       unsigned long flags;
+
+       if (sta_id_r)
+               *sta_id_r = IWL_INVALID_STATION;
+
+       ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM\n", addr);
+               return ret;
+       }
+
+       if (sta_id_r)
+               *sta_id_r = sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].used |= IWL_STA_LOCAL;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
+                                      struct ieee80211_vif *vif, bool add)
+{
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       int ret;
+
+       if (add) {
+               ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
+                                               &vif_priv->ibss_bssid_sta_id);
+               if (ret)
+                       return ret;
+
+               iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
+                                (priv->band == IEEE80211_BAND_5GHZ) ?
+                                IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
+               iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
+
+               return 0;
+       }
+
+       return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+                                 vif->bss_conf.bssid);
+}
+
+/**
+ * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
+{
+       int rc, i, index, prev_index;
+       struct iwl3945_rate_scaling_cmd rate_cmd = {
+               .reserved = {0, 0, 0},
+       };
+       struct iwl3945_rate_scaling_info *table = rate_cmd.table;
+
+       for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
+               index = iwl3945_rates[i].table_rs_index;
+
+               table[index].rate_n_flags =
+                       iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
+               table[index].try_cnt = priv->retry_rate;
+               prev_index = iwl3945_get_prev_ieee_rate(i);
+               table[index].next_rate_index =
+                               iwl3945_rates[prev_index].table_rs_index;
+       }
+
+       switch (priv->band) {
+       case IEEE80211_BAND_5GHZ:
+               IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
+               /* If one of the following CCK rates is used,
+                * have it fall back to the 6M OFDM rate */
+               for (i = IWL_RATE_1M_INDEX_TABLE;
+                       i <= IWL_RATE_11M_INDEX_TABLE; i++)
+                       table[i].next_rate_index =
+                         iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
+
+               /* Don't fall back to CCK rates */
+               table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
+                                               IWL_RATE_9M_INDEX_TABLE;
+
+               /* Don't drop out of OFDM rates */
+               table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
+                   iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
+               break;
+
+       case IEEE80211_BAND_2GHZ:
+               IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
+               /* If an OFDM rate is used, have it fall back to the
+                * 1M CCK rates */
+
+               if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+                   iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
+
+                       index = IWL_FIRST_CCK_RATE;
+                       for (i = IWL_RATE_6M_INDEX_TABLE;
+                            i <= IWL_RATE_54M_INDEX_TABLE; i++)
+                               table[i].next_rate_index =
+                                       iwl3945_rates[index].table_rs_index;
+
+                       index = IWL_RATE_11M_INDEX_TABLE;
+                       /* CCK shouldn't fall back to OFDM... */
+                       table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       /* Update the rate scaling for control frame Tx */
+       rate_cmd.table_id = 0;
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+                             &rate_cmd);
+       if (rc)
+               return rc;
+
+       /* Update the rate scaling for data frame Tx */
+       rate_cmd.table_id = 1;
+       return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+                               &rate_cmd);
+}
+
+/* Called when initializing driver */
+int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
+{
+       memset((void *)&priv->hw_params, 0,
+              sizeof(struct iwl_hw_params));
+
+       priv->_3945.shared_virt =
+               dma_alloc_coherent(&priv->pci_dev->dev,
+                                  sizeof(struct iwl3945_shared),
+                                  &priv->_3945.shared_phys, GFP_KERNEL);
+       if (!priv->_3945.shared_virt) {
+               IWL_ERR(priv, "failed to allocate pci memory\n");
+               return -ENOMEM;
+       }
+
+       /* Assign number of Usable TX queues */
+       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+
+       priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
+       priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
+       priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+       priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+       priv->hw_params.max_stations = IWL3945_STATION_COUNT;
+       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
+
+       priv->sta_key_max_num = STA_KEY_MAX_NUM;
+
+       priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+       priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
+       priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
+
+       return 0;
+}
+
+unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
+                         struct iwl3945_frame *frame, u8 rate)
+{
+       struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
+       unsigned int frame_size;
+
+       tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
+       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+       tx_beacon_cmd->tx.sta_id =
+               priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
+       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       frame_size = iwl3945_fill_beacon_frame(priv,
+                               tx_beacon_cmd->frame,
+                               sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+       BUG_ON(frame_size > MAX_MPDU_SIZE);
+       tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+
+       tx_beacon_cmd->tx.rate = rate;
+       tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
+                                     TX_CMD_FLG_TSF_MSK);
+
+       /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
+       tx_beacon_cmd->tx.supp_rates[0] =
+               (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+
+       tx_beacon_cmd->tx.supp_rates[1] =
+               (IWL_CCK_BASIC_RATES_MASK & 0xF);
+
+       return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
+}
+
+void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
+{
+       priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
+       priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
+}
+
+void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
+{
+       INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
+                         iwl3945_bg_reg_txpower_periodic);
+}
+
+void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
+{
+       cancel_delayed_work(&priv->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int iwl3945_verify_bsm(struct iwl_priv *priv)
+ {
+       __le32 *image = priv->ucode_boot.v_addr;
+       u32 len = priv->ucode_boot.len;
+       u32 reg;
+       u32 val;
+
+       IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
+
+       /* verify BSM SRAM contents */
+       val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
+       for (reg = BSM_SRAM_LOWER_BOUND;
+            reg < BSM_SRAM_LOWER_BOUND + len;
+            reg += sizeof(u32), image++) {
+               val = iwl_legacy_read_prph(priv, reg);
+               if (val != le32_to_cpu(*image)) {
+                       IWL_ERR(priv, "BSM uCode verification failed at "
+                                 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+                                 BSM_SRAM_LOWER_BOUND,
+                                 reg - BSM_SRAM_LOWER_BOUND, len,
+                                 val, le32_to_cpu(*image));
+                       return -EIO;
+               }
+       }
+
+       IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
+
+       return 0;
+}
+
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism.  Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
+{
+       _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+       return 0;
+}
+
+
+static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
+{
+       return;
+}
+
+ /**
+  * iwl3945_load_bsm - Load bootstrap instructions
+  *
+  * BSM operation:
+  *
+  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+  * in special SRAM that does not power down during RFKILL.  When powering back
+  * up after power-saving sleeps (or during initial uCode load), the BSM loads
+  * the bootstrap program into the on-board processor, and starts it.
+  *
+  * The bootstrap program loads (via DMA) instructions and data for a new
+  * program from host DRAM locations indicated by the host driver in the
+  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+  * automatically.
+  *
+  * When initializing the NIC, the host driver points the BSM to the
+  * "initialize" uCode image.  This uCode sets up some internal data, then
+  * notifies host via "initialize alive" that it is complete.
+  *
+  * The host then replaces the BSM_DRAM_* pointer values to point to the
+  * normal runtime uCode instructions and a backup uCode data cache buffer
+  * (filled initially with starting data values for the on-board processor),
+  * then triggers the "initialize" uCode to load and launch the runtime uCode,
+  * which begins normal operation.
+  *
+  * When doing a power-save shutdown, runtime uCode saves data SRAM into
+  * the backup data cache in DRAM before SRAM is powered down.
+  *
+  * When powering back up, the BSM loads the bootstrap program.  This reloads
+  * the runtime uCode instructions and the backup data cache into SRAM,
+  * and re-launches the runtime uCode from where it left off.
+  */
+static int iwl3945_load_bsm(struct iwl_priv *priv)
+{
+       __le32 *image = priv->ucode_boot.v_addr;
+       u32 len = priv->ucode_boot.len;
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+       u32 inst_len;
+       u32 data_len;
+       int rc;
+       int i;
+       u32 done;
+       u32 reg_offset;
+
+       IWL_DEBUG_INFO(priv, "Begin load bsm\n");
+
+       /* make sure bootstrap program is no larger than BSM's SRAM size */
+       if (len > IWL39_MAX_BSM_SIZE)
+               return -EINVAL;
+
+       /* Tell bootstrap uCode where to find the "Initialize" uCode
+       *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+       * NOTE:  iwl3945_initialize_alive_start() will replace these values,
+       *        after the "initialize" uCode has run, to point to
+       *        runtime/protocol instructions and backup data cache. */
+       pinst = priv->ucode_init.p_addr;
+       pdata = priv->ucode_init_data.p_addr;
+       inst_len = priv->ucode_init.len;
+       data_len = priv->ucode_init_data.len;
+
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+       /* Fill BSM memory with bootstrap instructions */
+       for (reg_offset = BSM_SRAM_LOWER_BOUND;
+            reg_offset < BSM_SRAM_LOWER_BOUND + len;
+            reg_offset += sizeof(u32), image++)
+               _iwl_legacy_write_prph(priv, reg_offset,
+                                         le32_to_cpu(*image));
+
+       rc = iwl3945_verify_bsm(priv);
+       if (rc)
+               return rc;
+
+       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+       iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+       iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
+                                IWL39_RTC_INST_LOWER_BOUND);
+       iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+       /* Load bootstrap code into instruction SRAM now,
+        *   to prepare to load "initialize" uCode */
+       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
+               BSM_WR_CTRL_REG_BIT_START);
+
+       /* Wait for load of bootstrap uCode to finish */
+       for (i = 0; i < 100; i++) {
+               done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
+               if (!(done & BSM_WR_CTRL_REG_BIT_START))
+                       break;
+               udelay(10);
+       }
+       if (i < 100)
+               IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
+       else {
+               IWL_ERR(priv, "BSM write did not complete!\n");
+               return -EIO;
+       }
+
+       /* Enable future boot loads whenever power management unit triggers it
+        *   (e.g. when powering back up after power-save shutdown) */
+       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
+               BSM_WR_CTRL_REG_BIT_START_EN);
+
+       return 0;
+}
+
+static struct iwl_hcmd_ops iwl3945_hcmd = {
+       .rxon_assoc = iwl3945_send_rxon_assoc,
+       .commit_rxon = iwl3945_commit_rxon,
+};
+
+static struct iwl_lib_ops iwl3945_lib = {
+       .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = iwl3945_hw_txq_free_tfd,
+       .txq_init = iwl3945_hw_tx_queue_init,
+       .load_ucode = iwl3945_load_bsm,
+       .dump_nic_event_log = iwl3945_dump_nic_event_log,
+       .dump_nic_error_log = iwl3945_dump_nic_error_log,
+       .apm_ops = {
+               .init = iwl3945_apm_init,
+               .config = iwl3945_nic_config,
+       },
+       .eeprom_ops = {
+               .regulatory_bands = {
+                       EEPROM_REGULATORY_BAND_1_CHANNELS,
+                       EEPROM_REGULATORY_BAND_2_CHANNELS,
+                       EEPROM_REGULATORY_BAND_3_CHANNELS,
+                       EEPROM_REGULATORY_BAND_4_CHANNELS,
+                       EEPROM_REGULATORY_BAND_5_CHANNELS,
+                       EEPROM_REGULATORY_BAND_NO_HT40,
+                       EEPROM_REGULATORY_BAND_NO_HT40,
+               },
+               .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
+               .release_semaphore = iwl3945_eeprom_release_semaphore,
+       },
+       .send_tx_power  = iwl3945_send_tx_power,
+       .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
+
+       .debugfs_ops = {
+               .rx_stats_read = iwl3945_ucode_rx_stats_read,
+               .tx_stats_read = iwl3945_ucode_tx_stats_read,
+               .general_stats_read = iwl3945_ucode_general_stats_read,
+       },
+};
+
+static const struct iwl_legacy_ops iwl3945_legacy_ops = {
+       .post_associate = iwl3945_post_associate,
+       .config_ap = iwl3945_config_ap,
+       .manage_ibss_station = iwl3945_manage_ibss_station,
+};
+
+static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
+       .get_hcmd_size = iwl3945_get_hcmd_size,
+       .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
+       .request_scan = iwl3945_request_scan,
+       .post_scan = iwl3945_post_scan,
+};
+
+static const struct iwl_ops iwl3945_ops = {
+       .lib = &iwl3945_lib,
+       .hcmd = &iwl3945_hcmd,
+       .utils = &iwl3945_hcmd_utils,
+       .led = &iwl3945_led_ops,
+       .legacy = &iwl3945_legacy_ops,
+       .ieee80211_ops = &iwl3945_hw_ops,
+};
+
+static struct iwl_base_params iwl3945_base_params = {
+       .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
+       .num_of_queues = IWL39_NUM_QUEUES,
+       .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+       .set_l0s = false,
+       .use_bsm = true,
+       .led_compensation = 64,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+       .wd_timeout = IWL_DEF_WD_TIMEOUT,
+       .max_event_log_size = 512,
+};
+
+static struct iwl_cfg iwl3945_bg_cfg = {
+       .name = "3945BG",
+       .fw_name_pre = IWL3945_FW_PRE,
+       .ucode_api_max = IWL3945_UCODE_API_MAX,
+       .ucode_api_min = IWL3945_UCODE_API_MIN,
+       .sku = IWL_SKU_G,
+       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+       .ops = &iwl3945_ops,
+       .mod_params = &iwl3945_mod_params,
+       .base_params = &iwl3945_base_params,
+       .led_mode = IWL_LED_BLINK,
+};
+
+static struct iwl_cfg iwl3945_abg_cfg = {
+       .name = "3945ABG",
+       .fw_name_pre = IWL3945_FW_PRE,
+       .ucode_api_max = IWL3945_UCODE_API_MAX,
+       .ucode_api_min = IWL3945_UCODE_API_MIN,
+       .sku = IWL_SKU_A|IWL_SKU_G,
+       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+       .ops = &iwl3945_ops,
+       .mod_params = &iwl3945_mod_params,
+       .base_params = &iwl3945_base_params,
+       .led_mode = IWL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
+       {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
+       {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
+       {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
+       {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
+       {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
+       {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
+       {0}
+};
+
+MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
new file mode 100644 (file)
index 0000000..b118b59
--- /dev/null
@@ -0,0 +1,308 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-3945.h) for driver implementation definitions.
+ * Please use iwl-3945-commands.h for uCode API definitions.
+ * Please use iwl-3945-hw.h for hardware-related definitions.
+ */
+
+#ifndef __iwl_3945_h__
+#define __iwl_3945_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id iwl3945_hw_card_ids[];
+
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-fh.h"
+#include "iwl-3945-hw.h"
+#include "iwl-debug.h"
+#include "iwl-power.h"
+#include "iwl-dev.h"
+#include "iwl-led.h"
+
+/* Highest firmware API version supported */
+#define IWL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IWL3945_UCODE_API_MIN 1
+
+#define IWL3945_FW_PRE "iwlwifi-3945-"
+#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
+#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon statistics being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct iwl_mod_params iwl3945_mod_params;
+
+struct iwl3945_rate_scale_data {
+       u64 data;
+       s32 success_counter;
+       s32 success_ratio;
+       s32 counter;
+       s32 average_tpt;
+       unsigned long stamp;
+};
+
+struct iwl3945_rs_sta {
+       spinlock_t lock;
+       struct iwl_priv *priv;
+       s32 *expected_tpt;
+       unsigned long last_partial_flush;
+       unsigned long last_flush;
+       u32 flush_time;
+       u32 last_tx_packets;
+       u32 tx_packets;
+       u8 tgg;
+       u8 flush_pending;
+       u8 start_rate;
+       struct timer_list rate_scale_flush;
+       struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+       struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+       /* used to be in sta_info */
+       int last_txrate_idx;
+};
+
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct iwl3945_sta_priv {
+       struct iwl_station_priv_common common;
+       struct iwl3945_rs_sta rs_sta;
+};
+
+enum iwl3945_antenna {
+       IWL_ANTENNA_DIVERSITY,
+       IWL_ANTENNA_MAIN,
+       IWL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE            2304U
+#define MAX_MPDU_SIZE            2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define        DEFAULT_SHORT_RETRY_LIMIT 7U
+#define        DEFAULT_LONG_RETRY_LIMIT  4U
+
+#define IWL_TX_FIFO_AC0        0
+#define IWL_TX_FIFO_AC1        1
+#define IWL_TX_FIFO_AC2        2
+#define IWL_TX_FIFO_AC3        3
+#define IWL_TX_FIFO_HCCA_1     5
+#define IWL_TX_FIFO_HCCA_2     6
+#define IWL_TX_FIFO_NONE       7
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct iwl3945_frame {
+       union {
+               struct ieee80211_hdr frame;
+               struct iwl3945_tx_beacon_cmd beacon;
+               u8 raw[IEEE80211_FRAME_LEN];
+               u8 cmd[360];
+       } u;
+       struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+#define IWL_SUPPORTED_RATES_IE_LEN         8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT        9
+
+#define IWL_INVALID_RATE     0xFF
+#define IWL_INVALID_VALUE    -1
+
+#define STA_PS_STATUS_WAKE             0
+#define STA_PS_STATUS_SLEEP            1
+
+struct iwl3945_ibss_seq {
+       u8 mac[ETH_ALEN];
+       u16 seq_num;
+       u16 frag_num;
+       unsigned long packet_time;
+       struct list_head list;
+};
+
+#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
+                      x->u.rx_frame.stats.payload + \
+                      x->u.rx_frame.stats.phy_count))
+#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
+                      IWL_RX_HDR(x)->payload + \
+                      le16_to_cpu(IWL_RX_HDR(x)->len)))
+#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
+
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int iwl3945_calc_db_from_ratio(int sig_ratio);
+extern void iwl3945_rx_replenish(void *data);
+extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
+                                       struct ieee80211_hdr *hdr, int left);
+extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+                                      char **buf, bool display);
+extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE:  The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * iwl3945_         <-- Its part of iwlwifi (should be changed to iwl3945_)
+ * iwl3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * iwl3945_bg_      <-- Called from work queue context
+ * iwl3945_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
+extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
+extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
+extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
+extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
+extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
+extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
+extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
+extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
+extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
+extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                           struct iwl_tx_queue *txq,
+                                           dma_addr_t addr, u16 len,
+                                           u8 reset, u8 pad);
+extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
+                                   struct iwl_tx_queue *txq);
+extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
+extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
+                               struct iwl_tx_queue *txq);
+extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
+                                struct iwl3945_frame *frame, u8 rate);
+void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
+                                 struct iwl_device_cmd *cmd,
+                                 struct ieee80211_tx_info *info,
+                                 struct ieee80211_hdr *hdr,
+                                 int sta_id, int tx_id);
+extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
+extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
+extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
+                                struct iwl_rx_mem_buffer *rxb);
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb);
+extern void iwl3945_disable_events(struct iwl_priv *priv);
+extern int iwl4965_get_temperature(const struct iwl_priv *priv);
+extern void iwl3945_post_associate(struct iwl_priv *priv);
+extern void iwl3945_config_ap(struct iwl_priv *priv);
+
+extern int iwl3945_commit_rxon(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx);
+
+/**
+ * iwl3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE:  This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
+
+extern struct ieee80211_ops iwl3945_hw_ops;
+
+/*
+ * Forward declare iwl-3945.c functions for iwl3945-base.c
+ */
+extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
+extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
+extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
+extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
+
+extern const struct iwl_channel_info *iwl3945_get_channel_info(
+       const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
+
+extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
+
+/* scanning */
+int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+void iwl3945_post_scan(struct iwl_priv *priv);
+
+/* rates */
+extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
+
+/* Requires full declaration of iwl_priv before including */
+#include "iwl-io.h"
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644 (file)
index 0000000..81d6a25
--- /dev/null
@@ -0,0 +1,967 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-4965-calib.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+struct statistics_general_data {
+       u32 beacon_silence_rssi_a;
+       u32 beacon_silence_rssi_b;
+       u32 beacon_silence_rssi_c;
+       u32 beacon_energy_a;
+       u32 beacon_energy_b;
+       u32 beacon_energy_c;
+};
+
+void iwl4965_calib_free_results(struct iwl_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < IWL_CALIB_MAX; i++) {
+               kfree(priv->calib_results[i].buf);
+               priv->calib_results[i].buf = NULL;
+               priv->calib_results[i].buf_len = 0;
+       }
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ *   but then determines that they are either noise, or transmissions
+ *   from a distant wireless network (also "noise", really) that get
+ *   "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ *   enough to receive all of our own network traffic, but not so
+ *   high that our DSP gets too busy trying to lock onto non-network
+ *   activity/noise. */
+static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
+                                  u32 norm_fa,
+                                  u32 rx_enable_time,
+                                  struct statistics_general_data *rx_info)
+{
+       u32 max_nrg_cck = 0;
+       int i = 0;
+       u8 max_silence_rssi = 0;
+       u32 silence_ref = 0;
+       u8 silence_rssi_a = 0;
+       u8 silence_rssi_b = 0;
+       u8 silence_rssi_c = 0;
+       u32 val;
+
+       /* "false_alarms" values below are cross-multiplications to assess the
+        *   numbers of false alarms within the measured period of actual Rx
+        *   (Rx is off when we're txing), vs the min/max expected false alarms
+        *   (some should be expected if rx is sensitive enough) in a
+        *   hypothetical listening period of 200 time units (TU), 204.8 msec:
+        *
+        * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+        *
+        * */
+       u32 false_alarms = norm_fa * 200 * 1024;
+       u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+       u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+       struct iwl_sensitivity_data *data = NULL;
+       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+       data = &(priv->sensitivity_data);
+
+       data->nrg_auto_corr_silence_diff = 0;
+
+       /* Find max silence rssi among all 3 receivers.
+        * This is background noise, which may include transmissions from other
+        *    networks, measured during silence before our network's beacon */
+       silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
+                           ALL_BAND_FILTER) >> 8);
+       silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
+                           ALL_BAND_FILTER) >> 8);
+       silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
+                           ALL_BAND_FILTER) >> 8);
+
+       val = max(silence_rssi_b, silence_rssi_c);
+       max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+       /* Store silence rssi in 20-beacon history table */
+       data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+       data->nrg_silence_idx++;
+       if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+               data->nrg_silence_idx = 0;
+
+       /* Find max silence rssi across 20 beacon history */
+       for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+               val = data->nrg_silence_rssi[i];
+               silence_ref = max(silence_ref, val);
+       }
+       IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
+                       silence_rssi_a, silence_rssi_b, silence_rssi_c,
+                       silence_ref);
+
+       /* Find max rx energy (min value!) among all 3 receivers,
+        *   measured during beacon frame.
+        * Save it in 10-beacon history table. */
+       i = data->nrg_energy_idx;
+       val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+       data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+       data->nrg_energy_idx++;
+       if (data->nrg_energy_idx >= 10)
+               data->nrg_energy_idx = 0;
+
+       /* Find min rx energy (max value) across 10 beacon history.
+        * This is the minimum signal level that we want to receive well.
+        * Add backoff (margin so we don't miss slightly lower energy frames).
+        * This establishes an upper bound (min value) for energy threshold. */
+       max_nrg_cck = data->nrg_value[0];
+       for (i = 1; i < 10; i++)
+               max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+       max_nrg_cck += 6;
+
+       IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+                       rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+                       rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+       /* Count number of consecutive beacons with fewer-than-desired
+        *   false alarms. */
+       if (false_alarms < min_false_alarms)
+               data->num_in_cck_no_fa++;
+       else
+               data->num_in_cck_no_fa = 0;
+       IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
+                       data->num_in_cck_no_fa);
+
+       /* If we got too many false alarms this time, reduce sensitivity */
+       if ((false_alarms > max_false_alarms) &&
+               (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
+               IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
+                    false_alarms, max_false_alarms);
+               IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
+               data->nrg_curr_state = IWL_FA_TOO_MANY;
+               /* Store for "fewer than desired" on later beacon */
+               data->nrg_silence_ref = silence_ref;
+
+               /* increase energy threshold (reduce nrg value)
+                *   to decrease sensitivity */
+               data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+       /* Else if we got fewer than desired, increase sensitivity */
+       } else if (false_alarms < min_false_alarms) {
+               data->nrg_curr_state = IWL_FA_TOO_FEW;
+
+               /* Compare silence level with silence level for most recent
+                *   healthy number or too many false alarms */
+               data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
+                                                  (s32)silence_ref;
+
+               IWL_DEBUG_CALIB(priv,
+                        "norm FA %u < min FA %u, silence diff %d\n",
+                        false_alarms, min_false_alarms,
+                        data->nrg_auto_corr_silence_diff);
+
+               /* Increase value to increase sensitivity, but only if:
+                * 1a) previous beacon did *not* have *too many* false alarms
+                * 1b) AND there's a significant difference in Rx levels
+                *      from a previous beacon with too many, or healthy # FAs
+                * OR 2) We've seen a lot of beacons (100) with too few
+                *       false alarms */
+               if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
+                       ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+                       (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+                       IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+                       /* Increase nrg value to increase sensitivity */
+                       val = data->nrg_th_cck + NRG_STEP_CCK;
+                       data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+               } else {
+                       IWL_DEBUG_CALIB(priv,
+                                        "... but not changing sensitivity\n");
+               }
+
+       /* Else we got a healthy number of false alarms, keep status quo */
+       } else {
+               IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
+               data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+
+               /* Store for use in "fewer than desired" with later beacon */
+               data->nrg_silence_ref = silence_ref;
+
+               /* If previous beacon had too many false alarms,
+                *   give it some extra margin by reducing sensitivity again
+                *   (but don't go below measured energy of desired Rx) */
+               if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
+                       IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+                       if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+                               data->nrg_th_cck -= NRG_MARGIN;
+                       else
+                               data->nrg_th_cck = max_nrg_cck;
+               }
+       }
+
+       /* Make sure the energy threshold does not go above the measured
+        * energy of the desired Rx signals (reduced by backoff margin),
+        * or else we might start missing Rx frames.
+        * Lower value is higher energy, so we use max()!
+        */
+       data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+       IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+
+       data->nrg_prev_state = data->nrg_curr_state;
+
+       /* Auto-correlation CCK algorithm */
+       if (false_alarms > min_false_alarms) {
+
+               /* increase auto_corr values to decrease sensitivity
+                * so the DSP won't be disturbed by the noise
+                */
+               if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+                       data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+               else {
+                       val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+                       data->auto_corr_cck =
+                               min((u32)ranges->auto_corr_max_cck, val);
+               }
+               val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+               data->auto_corr_cck_mrc =
+                       min((u32)ranges->auto_corr_max_cck_mrc, val);
+       } else if ((false_alarms < min_false_alarms) &&
+          ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+          (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+               /* Decrease auto_corr values to increase sensitivity */
+               val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+               data->auto_corr_cck =
+                       max((u32)ranges->auto_corr_min_cck, val);
+               val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+               data->auto_corr_cck_mrc =
+                       max((u32)ranges->auto_corr_min_cck_mrc, val);
+       }
+
+       return 0;
+}
+
+
+static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
+                                      u32 norm_fa,
+                                      u32 rx_enable_time)
+{
+       u32 val;
+       u32 false_alarms = norm_fa * 200 * 1024;
+       u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+       u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+       struct iwl_sensitivity_data *data = NULL;
+       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+       data = &(priv->sensitivity_data);
+
+       /* If we got too many false alarms this time, reduce sensitivity */
+       if (false_alarms > max_false_alarms) {
+
+               IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
+                            false_alarms, max_false_alarms);
+
+               val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm =
+                       min((u32)ranges->auto_corr_max_ofdm, val);
+
+               val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc =
+                       min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+
+               val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_x1 =
+                       min((u32)ranges->auto_corr_max_ofdm_x1, val);
+
+               val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc_x1 =
+                       min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+       }
+
+       /* Else if we got fewer than desired, increase sensitivity */
+       else if (false_alarms < min_false_alarms) {
+
+               IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
+                            false_alarms, min_false_alarms);
+
+               val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm =
+                       max((u32)ranges->auto_corr_min_ofdm, val);
+
+               val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc =
+                       max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+
+               val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_x1 =
+                       max((u32)ranges->auto_corr_min_ofdm_x1, val);
+
+               val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+               data->auto_corr_ofdm_mrc_x1 =
+                       max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+       } else {
+               IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
+                        min_false_alarms, false_alarms, max_false_alarms);
+       }
+       return 0;
+}
+
+static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
+                               struct iwl_sensitivity_data *data,
+                               __le16 *tbl)
+{
+       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm);
+       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
+       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm_x1);
+       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
+
+       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_cck);
+       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16((u16)data->auto_corr_cck_mrc);
+
+       tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
+                               cpu_to_le16((u16)data->nrg_th_cck);
+       tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
+                               cpu_to_le16((u16)data->nrg_th_ofdm);
+
+       tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
+                               cpu_to_le16(data->barker_corr_th_min);
+       tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
+                               cpu_to_le16(data->barker_corr_th_min_mrc);
+       tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
+                               cpu_to_le16(data->nrg_th_cca);
+
+       IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+                       data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+                       data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+                       data->nrg_th_ofdm);
+
+       IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
+                       data->auto_corr_cck, data->auto_corr_cck_mrc,
+                       data->nrg_th_cck);
+}
+
+/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
+static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+{
+       struct iwl_sensitivity_cmd cmd;
+       struct iwl_sensitivity_data *data = NULL;
+       struct iwl_host_cmd cmd_out = {
+               .id = SENSITIVITY_CMD,
+               .len = sizeof(struct iwl_sensitivity_cmd),
+               .flags = CMD_ASYNC,
+               .data = &cmd,
+       };
+
+       data = &(priv->sensitivity_data);
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+
+       /* Update uCode's "work" table, and copy it to DSP */
+       cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+
+       /* Don't send command to uCode if nothing has changed */
+       if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
+                   sizeof(u16)*HD_TABLE_SIZE)) {
+               IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+               return 0;
+       }
+
+       /* Copy table for comparison next time */
+       memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
+              sizeof(u16)*HD_TABLE_SIZE);
+
+       return iwl_legacy_send_cmd(priv, &cmd_out);
+}
+
+void iwl4965_init_sensitivity(struct iwl_priv *priv)
+{
+       int ret = 0;
+       int i;
+       struct iwl_sensitivity_data *data = NULL;
+       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+       if (priv->disable_sens_cal)
+               return;
+
+       IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+
+       /* Clear driver's sensitivity algo data */
+       data = &(priv->sensitivity_data);
+
+       if (ranges == NULL)
+               return;
+
+       memset(data, 0, sizeof(struct iwl_sensitivity_data));
+
+       data->num_in_cck_no_fa = 0;
+       data->nrg_curr_state = IWL_FA_TOO_MANY;
+       data->nrg_prev_state = IWL_FA_TOO_MANY;
+       data->nrg_silence_ref = 0;
+       data->nrg_silence_idx = 0;
+       data->nrg_energy_idx = 0;
+
+       for (i = 0; i < 10; i++)
+               data->nrg_value[i] = 0;
+
+       for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+               data->nrg_silence_rssi[i] = 0;
+
+       data->auto_corr_ofdm =  ranges->auto_corr_min_ofdm;
+       data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+       data->auto_corr_ofdm_x1  = ranges->auto_corr_min_ofdm_x1;
+       data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+       data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+       data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+       data->nrg_th_cck = ranges->nrg_th_cck;
+       data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+       data->barker_corr_th_min = ranges->barker_corr_th_min;
+       data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+       data->nrg_th_cca = ranges->nrg_th_cca;
+
+       data->last_bad_plcp_cnt_ofdm = 0;
+       data->last_fa_cnt_ofdm = 0;
+       data->last_bad_plcp_cnt_cck = 0;
+       data->last_fa_cnt_cck = 0;
+
+       ret |= iwl4965_sensitivity_write(priv);
+       IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+}
+
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+{
+       u32 rx_enable_time;
+       u32 fa_cck;
+       u32 fa_ofdm;
+       u32 bad_plcp_cck;
+       u32 bad_plcp_ofdm;
+       u32 norm_fa_ofdm;
+       u32 norm_fa_cck;
+       struct iwl_sensitivity_data *data = NULL;
+       struct statistics_rx_non_phy *rx_info;
+       struct statistics_rx_phy *ofdm, *cck;
+       unsigned long flags;
+       struct statistics_general_data statis;
+
+       if (priv->disable_sens_cal)
+               return;
+
+       data = &(priv->sensitivity_data);
+
+       if (!iwl_legacy_is_any_associated(priv)) {
+               IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
+       ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
+       cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+
+       if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+               IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return;
+       }
+
+       /* Extract Statistics: */
+       rx_enable_time = le32_to_cpu(rx_info->channel_load);
+       fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+       fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+       bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+       bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+       statis.beacon_silence_rssi_a =
+                       le32_to_cpu(rx_info->beacon_silence_rssi_a);
+       statis.beacon_silence_rssi_b =
+                       le32_to_cpu(rx_info->beacon_silence_rssi_b);
+       statis.beacon_silence_rssi_c =
+                       le32_to_cpu(rx_info->beacon_silence_rssi_c);
+       statis.beacon_energy_a =
+                       le32_to_cpu(rx_info->beacon_energy_a);
+       statis.beacon_energy_b =
+                       le32_to_cpu(rx_info->beacon_energy_b);
+       statis.beacon_energy_c =
+                       le32_to_cpu(rx_info->beacon_energy_c);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+
+       if (!rx_enable_time) {
+               IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+               return;
+       }
+
+       /* These statistics increase monotonically, and do not reset
+        *   at each beacon.  Calculate difference from last value, or just
+        *   use the new statistics value if it has reset or wrapped around. */
+       if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+               data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+       else {
+               bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+               data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+       }
+
+       if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+               data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+       else {
+               bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+               data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+       }
+
+       if (data->last_fa_cnt_ofdm > fa_ofdm)
+               data->last_fa_cnt_ofdm = fa_ofdm;
+       else {
+               fa_ofdm -= data->last_fa_cnt_ofdm;
+               data->last_fa_cnt_ofdm += fa_ofdm;
+       }
+
+       if (data->last_fa_cnt_cck > fa_cck)
+               data->last_fa_cnt_cck = fa_cck;
+       else {
+               fa_cck -= data->last_fa_cnt_cck;
+               data->last_fa_cnt_cck += fa_cck;
+       }
+
+       /* Total aborted signal locks */
+       norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+       norm_fa_cck = fa_cck + bad_plcp_cck;
+
+       IWL_DEBUG_CALIB(priv,
+                        "cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
+                       bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+       iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
+       iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+
+       iwl4965_sensitivity_write(priv);
+}
+
+static inline u8 iwl4965_find_first_chain(u8 mask)
+{
+       if (mask & ANT_A)
+               return CHAIN_A;
+       if (mask & ANT_B)
+               return CHAIN_B;
+       return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void
+iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+                                    struct iwl_chain_noise_data *data)
+{
+       u32 active_chains = 0;
+       u32 max_average_sig;
+       u16 max_average_sig_antenna_i;
+       u8 num_tx_chains;
+       u8 first_chain;
+       u16 i = 0;
+
+       average_sig[0] = data->chain_signal_a /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[1] = data->chain_signal_b /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[2] = data->chain_signal_c /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+
+       if (average_sig[0] >= average_sig[1]) {
+               max_average_sig = average_sig[0];
+               max_average_sig_antenna_i = 0;
+               active_chains = (1 << max_average_sig_antenna_i);
+       } else {
+               max_average_sig = average_sig[1];
+               max_average_sig_antenna_i = 1;
+               active_chains = (1 << max_average_sig_antenna_i);
+       }
+
+       if (average_sig[2] >= max_average_sig) {
+               max_average_sig = average_sig[2];
+               max_average_sig_antenna_i = 2;
+               active_chains = (1 << max_average_sig_antenna_i);
+       }
+
+       IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+                    average_sig[0], average_sig[1], average_sig[2]);
+       IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+                    max_average_sig, max_average_sig_antenna_i);
+
+       /* Compare signal strengths for all 3 receivers. */
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               if (i != max_average_sig_antenna_i) {
+                       s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+                       /* If signal is very weak, compared with
+                        * strongest, mark it as disconnected. */
+                       if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+                               data->disconn_array[i] = 1;
+                       else
+                               active_chains |= (1 << i);
+                       IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
+                            "disconn_array[i] = %d\n",
+                            i, rssi_delta, data->disconn_array[i]);
+               }
+       }
+
+       /*
+        * The above algorithm sometimes fails when the ucode
+        * reports 0 for all chains. It's not clear why that
+        * happens to start with, but it is then causing trouble
+        * because this can make us enable more chains than the
+        * hardware really has.
+        *
+        * To be safe, simply mask out any chains that we know
+        * are not on the device.
+        */
+       active_chains &= priv->hw_params.valid_rx_ant;
+
+       num_tx_chains = 0;
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               /* loops on all the bits of
+                * priv->hw_setting.valid_tx_ant */
+               u8 ant_msk = (1 << i);
+               if (!(priv->hw_params.valid_tx_ant & ant_msk))
+                       continue;
+
+               num_tx_chains++;
+               if (data->disconn_array[i] == 0)
+                       /* there is a Tx antenna connected */
+                       break;
+               if (num_tx_chains == priv->hw_params.tx_chains_num &&
+                   data->disconn_array[i]) {
+                       /*
+                        * If all chains are disconnected
+                        * connect the first valid tx chain
+                        */
+                       first_chain =
+                       iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+                       data->disconn_array[first_chain] = 0;
+                       active_chains |= BIT(first_chain);
+                       IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
+                                       W/A - declare %d as connected\n",
+                                       first_chain);
+                       break;
+               }
+       }
+
+       if (active_chains != priv->hw_params.valid_rx_ant &&
+           active_chains != priv->chain_noise_data.active_chains)
+               IWL_DEBUG_CALIB(priv,
+                               "Detected that not all antennas are connected! "
+                               "Connected: %#x, valid: %#x.\n",
+                               active_chains, priv->hw_params.valid_rx_ant);
+
+       /* Save for use within RXON, TX, SCAN commands, etc. */
+       data->active_chains = active_chains;
+       IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+                       active_chains);
+}
+
+static void iwl4965_gain_computation(struct iwl_priv *priv,
+               u32 *average_noise,
+               u16 min_average_noise_antenna_i,
+               u32 min_average_noise,
+               u8 default_chain)
+{
+       int i, ret;
+       struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+       data->delta_gain_code[min_average_noise_antenna_i] = 0;
+
+       for (i = default_chain; i < NUM_RX_CHAINS; i++) {
+               s32 delta_g = 0;
+
+               if (!(data->disconn_array[i]) &&
+                   (data->delta_gain_code[i] ==
+                            CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+                       delta_g = average_noise[i] - min_average_noise;
+                       data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+                       data->delta_gain_code[i] =
+                               min(data->delta_gain_code[i],
+                               (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+                       data->delta_gain_code[i] =
+                               (data->delta_gain_code[i] | (1 << 2));
+               } else {
+                       data->delta_gain_code[i] = 0;
+               }
+       }
+       IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
+                    data->delta_gain_code[0],
+                    data->delta_gain_code[1],
+                    data->delta_gain_code[2]);
+
+       /* Differential gain gets sent to uCode only once */
+       if (!data->radio_write) {
+               struct iwl_calib_diff_gain_cmd cmd;
+               data->radio_write = 1;
+
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+               cmd.diff_gain_a = data->delta_gain_code[0];
+               cmd.diff_gain_b = data->delta_gain_code[1];
+               cmd.diff_gain_c = data->delta_gain_code[2];
+               ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+                                     sizeof(cmd), &cmd);
+               if (ret)
+                       IWL_DEBUG_CALIB(priv, "fail sending cmd "
+                                    "REPLY_PHY_CALIBRATION_CMD\n");
+
+               /* TODO we might want recalculate
+                * rx_chain in rxon cmd */
+
+               /* Mark so we run this algo only once! */
+               data->state = IWL_CHAIN_NOISE_CALIBRATED;
+       }
+}
+
+
+
+/*
+ * Accumulate 16 beacons of signal and noise statistics for each of
+ *   3 receivers/antennas/rx-chains, then figure out:
+ * 1)  Which antennas are connected.
+ * 2)  Differential rx gain settings to balance the 3 receivers.
+ */
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+{
+       struct iwl_chain_noise_data *data = NULL;
+
+       u32 chain_noise_a;
+       u32 chain_noise_b;
+       u32 chain_noise_c;
+       u32 chain_sig_a;
+       u32 chain_sig_b;
+       u32 chain_sig_c;
+       u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+       u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+       u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+       u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+       u16 i = 0;
+       u16 rxon_chnum = INITIALIZATION_VALUE;
+       u16 stat_chnum = INITIALIZATION_VALUE;
+       u8 rxon_band24;
+       u8 stat_band24;
+       unsigned long flags;
+       struct statistics_rx_non_phy *rx_info;
+
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (priv->disable_chain_noise_cal)
+               return;
+
+       data = &(priv->chain_noise_data);
+
+       /*
+        * Accumulate just the first "chain_noise_num_beacons" after
+        * the first association, then we're done forever.
+        */
+       if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
+               if (data->state == IWL_CHAIN_NOISE_ALIVE)
+                       IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
+                     rx.general);
+
+       if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+               IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return;
+       }
+
+       rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
+       rxon_chnum = le16_to_cpu(ctx->staging.channel);
+
+       stat_band24 = !!(((struct iwl_notif_statistics *)
+                        stat_resp)->flag &
+                        STATISTICS_REPLY_FLG_BAND_24G_MSK);
+       stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
+                                stat_resp)->flag) >> 16;
+
+       /* Make sure we accumulate data for just the associated channel
+        *   (even if scanning). */
+       if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
+               IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
+                               rxon_chnum, rxon_band24);
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return;
+       }
+
+       /*
+        *  Accumulate beacon statistics values across
+        * "chain_noise_num_beacons"
+        */
+       chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
+                               IN_BAND_FILTER;
+       chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
+                               IN_BAND_FILTER;
+       chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
+                               IN_BAND_FILTER;
+
+       chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+       chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+       chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       data->beacon_count++;
+
+       data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+       data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+       data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+       data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+       data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+       data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+       IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
+                       rxon_chnum, rxon_band24, data->beacon_count);
+       IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
+                       chain_sig_a, chain_sig_b, chain_sig_c);
+       IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
+                       chain_noise_a, chain_noise_b, chain_noise_c);
+
+       /* If this is the "chain_noise_num_beacons", determine:
+        * 1)  Disconnected antennas (using signal strengths)
+        * 2)  Differential gain (using silence noise) to balance receivers */
+       if (data->beacon_count !=
+               priv->cfg->base_params->chain_noise_num_beacons)
+               return;
+
+       /* Analyze signal for disconnected antenna */
+       iwl4965_find_disconn_antenna(priv, average_sig, data);
+
+       /* Analyze noise for rx balance */
+       average_noise[0] = data->chain_noise_a /
+                          priv->cfg->base_params->chain_noise_num_beacons;
+       average_noise[1] = data->chain_noise_b /
+                          priv->cfg->base_params->chain_noise_num_beacons;
+       average_noise[2] = data->chain_noise_c /
+                          priv->cfg->base_params->chain_noise_num_beacons;
+
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               if (!(data->disconn_array[i]) &&
+                  (average_noise[i] <= min_average_noise)) {
+                       /* This means that chain i is active and has
+                        * lower noise values so far: */
+                       min_average_noise = average_noise[i];
+                       min_average_noise_antenna_i = i;
+               }
+       }
+
+       IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
+                       average_noise[0], average_noise[1],
+                       average_noise[2]);
+
+       IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
+                       min_average_noise, min_average_noise_antenna_i);
+
+       iwl4965_gain_computation(priv, average_noise,
+                       min_average_noise_antenna_i, min_average_noise,
+                       iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+
+       /* Some power changes may have been made during the calibration.
+        * Update and commit the RXON
+        */
+       if (priv->cfg->ops->lib->update_chain_flags)
+               priv->cfg->ops->lib->update_chain_flags(priv);
+
+       data->state = IWL_CHAIN_NOISE_DONE;
+       iwl_legacy_power_update_mode(priv, false);
+}
+
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+{
+       int i;
+       memset(&(priv->sensitivity_data), 0,
+              sizeof(struct iwl_sensitivity_data));
+       memset(&(priv->chain_noise_data), 0,
+              sizeof(struct iwl_chain_noise_data));
+       for (i = 0; i < NUM_RX_CHAINS; i++)
+               priv->chain_noise_data.delta_gain_code[i] =
+                               CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+       /* Ask for statistics now, the uCode will send notification
+        * periodically after association */
+       iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
new file mode 100644 (file)
index 0000000..f46c80e
--- /dev/null
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_4965_calib_h__
+#define __iwl_4965_calib_h__
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-commands.h"
+
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
+void iwl4965_init_sensitivity(struct iwl_priv *priv);
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
+void iwl4965_calib_free_results(struct iwl_priv *priv);
+
+#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644 (file)
index 0000000..1c93665
--- /dev/null
@@ -0,0 +1,774 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+*  Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
+
+static const char *fmt_value = "  %-30s %10u\n";
+static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char *fmt_header =
+       "%-32s    current  cumulative       delta         max\n";
+
+static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+       int p = 0;
+       u32 flag;
+
+       flag = le32_to_cpu(priv->_4965.statistics.flag);
+
+       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+       if (flag & UCODE_STATISTICS_CLEAR_MSK)
+               p += scnprintf(buf + p, bufsz - p,
+               "\tStatistics have been cleared\n");
+       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+               (flag & UCODE_STATISTICS_FREQUENCY_MSK)
+               ? "2.4 GHz" : "5.2 GHz");
+       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+               (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
+                ? "enabled" : "disabled");
+
+       return p;
+}
+
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+                   sizeof(struct statistics_rx_non_phy) * 40 +
+                   sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+       ssize_t ret;
+       struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+       struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+       struct statistics_rx_non_phy *general, *accum_general;
+       struct statistics_rx_non_phy *delta_general, *max_general;
+       struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * the statistic information display here is based on
+        * the last statistics notification from uCode
+        * might not reflect the current uCode activity
+        */
+       ofdm = &priv->_4965.statistics.rx.ofdm;
+       cck = &priv->_4965.statistics.rx.cck;
+       general = &priv->_4965.statistics.rx.general;
+       ht = &priv->_4965.statistics.rx.ofdm_ht;
+       accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
+       accum_cck = &priv->_4965.accum_statistics.rx.cck;
+       accum_general = &priv->_4965.accum_statistics.rx.general;
+       accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
+       delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
+       delta_cck = &priv->_4965.delta_statistics.rx.cck;
+       delta_general = &priv->_4965.delta_statistics.rx.general;
+       delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
+       max_ofdm = &priv->_4965.max_delta.rx.ofdm;
+       max_cck = &priv->_4965.max_delta.rx.cck;
+       max_general = &priv->_4965.max_delta.rx.general;
+       max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
+
+       pos += iwl4965_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - OFDM:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ina_cnt:",
+                        le32_to_cpu(ofdm->ina_cnt),
+                        accum_ofdm->ina_cnt,
+                        delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_cnt:",
+                        le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+                        delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "plcp_err:",
+                        le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+                        delta_ofdm->plcp_err, max_ofdm->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_err:",
+                        le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+                        delta_ofdm->crc32_err, max_ofdm->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "overrun_err:",
+                        le32_to_cpu(ofdm->overrun_err),
+                        accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+                        max_ofdm->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "early_overrun_err:",
+                        le32_to_cpu(ofdm->early_overrun_err),
+                        accum_ofdm->early_overrun_err,
+                        delta_ofdm->early_overrun_err,
+                        max_ofdm->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_good:",
+                        le32_to_cpu(ofdm->crc32_good),
+                        accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+                        max_ofdm->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "false_alarm_cnt:",
+                        le32_to_cpu(ofdm->false_alarm_cnt),
+                        accum_ofdm->false_alarm_cnt,
+                        delta_ofdm->false_alarm_cnt,
+                        max_ofdm->false_alarm_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_sync_err_cnt:",
+                        le32_to_cpu(ofdm->fina_sync_err_cnt),
+                        accum_ofdm->fina_sync_err_cnt,
+                        delta_ofdm->fina_sync_err_cnt,
+                        max_ofdm->fina_sync_err_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sfd_timeout:",
+                        le32_to_cpu(ofdm->sfd_timeout),
+                        accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+                        max_ofdm->sfd_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_timeout:",
+                        le32_to_cpu(ofdm->fina_timeout),
+                        accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+                        max_ofdm->fina_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "unresponded_rts:",
+                        le32_to_cpu(ofdm->unresponded_rts),
+                        accum_ofdm->unresponded_rts,
+                        delta_ofdm->unresponded_rts,
+                        max_ofdm->unresponded_rts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rxe_frame_lmt_ovrun:",
+                        le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+                        accum_ofdm->rxe_frame_limit_overrun,
+                        delta_ofdm->rxe_frame_limit_overrun,
+                        max_ofdm->rxe_frame_limit_overrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ack_cnt:",
+                        le32_to_cpu(ofdm->sent_ack_cnt),
+                        accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+                        max_ofdm->sent_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_cts_cnt:",
+                        le32_to_cpu(ofdm->sent_cts_cnt),
+                        accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+                        max_ofdm->sent_cts_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ba_rsp_cnt:",
+                        le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+                        accum_ofdm->sent_ba_rsp_cnt,
+                        delta_ofdm->sent_ba_rsp_cnt,
+                        max_ofdm->sent_ba_rsp_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dsp_self_kill:",
+                        le32_to_cpu(ofdm->dsp_self_kill),
+                        accum_ofdm->dsp_self_kill,
+                        delta_ofdm->dsp_self_kill,
+                        max_ofdm->dsp_self_kill);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "mh_format_err:",
+                        le32_to_cpu(ofdm->mh_format_err),
+                        accum_ofdm->mh_format_err,
+                        delta_ofdm->mh_format_err,
+                        max_ofdm->mh_format_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "re_acq_main_rssi_sum:",
+                        le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+                        accum_ofdm->re_acq_main_rssi_sum,
+                        delta_ofdm->re_acq_main_rssi_sum,
+                        max_ofdm->re_acq_main_rssi_sum);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - CCK:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ina_cnt:",
+                        le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+                        delta_cck->ina_cnt, max_cck->ina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_cnt:",
+                        le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+                        delta_cck->fina_cnt, max_cck->fina_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "plcp_err:",
+                        le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+                        delta_cck->plcp_err, max_cck->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_err:",
+                        le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+                        delta_cck->crc32_err, max_cck->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "overrun_err:",
+                        le32_to_cpu(cck->overrun_err),
+                        accum_cck->overrun_err, delta_cck->overrun_err,
+                        max_cck->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "early_overrun_err:",
+                        le32_to_cpu(cck->early_overrun_err),
+                        accum_cck->early_overrun_err,
+                        delta_cck->early_overrun_err,
+                        max_cck->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_good:",
+                        le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+                        delta_cck->crc32_good, max_cck->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "false_alarm_cnt:",
+                        le32_to_cpu(cck->false_alarm_cnt),
+                        accum_cck->false_alarm_cnt,
+                        delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_sync_err_cnt:",
+                        le32_to_cpu(cck->fina_sync_err_cnt),
+                        accum_cck->fina_sync_err_cnt,
+                        delta_cck->fina_sync_err_cnt,
+                        max_cck->fina_sync_err_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sfd_timeout:",
+                        le32_to_cpu(cck->sfd_timeout),
+                        accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+                        max_cck->sfd_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "fina_timeout:",
+                        le32_to_cpu(cck->fina_timeout),
+                        accum_cck->fina_timeout, delta_cck->fina_timeout,
+                        max_cck->fina_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "unresponded_rts:",
+                        le32_to_cpu(cck->unresponded_rts),
+                        accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+                        max_cck->unresponded_rts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rxe_frame_lmt_ovrun:",
+                        le32_to_cpu(cck->rxe_frame_limit_overrun),
+                        accum_cck->rxe_frame_limit_overrun,
+                        delta_cck->rxe_frame_limit_overrun,
+                        max_cck->rxe_frame_limit_overrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ack_cnt:",
+                        le32_to_cpu(cck->sent_ack_cnt),
+                        accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+                        max_cck->sent_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_cts_cnt:",
+                        le32_to_cpu(cck->sent_cts_cnt),
+                        accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+                        max_cck->sent_cts_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sent_ba_rsp_cnt:",
+                        le32_to_cpu(cck->sent_ba_rsp_cnt),
+                        accum_cck->sent_ba_rsp_cnt,
+                        delta_cck->sent_ba_rsp_cnt,
+                        max_cck->sent_ba_rsp_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dsp_self_kill:",
+                        le32_to_cpu(cck->dsp_self_kill),
+                        accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+                        max_cck->dsp_self_kill);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "mh_format_err:",
+                        le32_to_cpu(cck->mh_format_err),
+                        accum_cck->mh_format_err, delta_cck->mh_format_err,
+                        max_cck->mh_format_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "re_acq_main_rssi_sum:",
+                        le32_to_cpu(cck->re_acq_main_rssi_sum),
+                        accum_cck->re_acq_main_rssi_sum,
+                        delta_cck->re_acq_main_rssi_sum,
+                        max_cck->re_acq_main_rssi_sum);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - GENERAL:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bogus_cts:",
+                        le32_to_cpu(general->bogus_cts),
+                        accum_general->bogus_cts, delta_general->bogus_cts,
+                        max_general->bogus_cts);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bogus_ack:",
+                        le32_to_cpu(general->bogus_ack),
+                        accum_general->bogus_ack, delta_general->bogus_ack,
+                        max_general->bogus_ack);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "non_bssid_frames:",
+                        le32_to_cpu(general->non_bssid_frames),
+                        accum_general->non_bssid_frames,
+                        delta_general->non_bssid_frames,
+                        max_general->non_bssid_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "filtered_frames:",
+                        le32_to_cpu(general->filtered_frames),
+                        accum_general->filtered_frames,
+                        delta_general->filtered_frames,
+                        max_general->filtered_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "non_channel_beacons:",
+                        le32_to_cpu(general->non_channel_beacons),
+                        accum_general->non_channel_beacons,
+                        delta_general->non_channel_beacons,
+                        max_general->non_channel_beacons);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "channel_beacons:",
+                        le32_to_cpu(general->channel_beacons),
+                        accum_general->channel_beacons,
+                        delta_general->channel_beacons,
+                        max_general->channel_beacons);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "num_missed_bcon:",
+                        le32_to_cpu(general->num_missed_bcon),
+                        accum_general->num_missed_bcon,
+                        delta_general->num_missed_bcon,
+                        max_general->num_missed_bcon);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "adc_rx_saturation_time:",
+                        le32_to_cpu(general->adc_rx_saturation_time),
+                        accum_general->adc_rx_saturation_time,
+                        delta_general->adc_rx_saturation_time,
+                        max_general->adc_rx_saturation_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ina_detect_search_tm:",
+                        le32_to_cpu(general->ina_detection_search_time),
+                        accum_general->ina_detection_search_time,
+                        delta_general->ina_detection_search_time,
+                        max_general->ina_detection_search_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_silence_rssi_a:",
+                        le32_to_cpu(general->beacon_silence_rssi_a),
+                        accum_general->beacon_silence_rssi_a,
+                        delta_general->beacon_silence_rssi_a,
+                        max_general->beacon_silence_rssi_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_silence_rssi_b:",
+                        le32_to_cpu(general->beacon_silence_rssi_b),
+                        accum_general->beacon_silence_rssi_b,
+                        delta_general->beacon_silence_rssi_b,
+                        max_general->beacon_silence_rssi_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_silence_rssi_c:",
+                        le32_to_cpu(general->beacon_silence_rssi_c),
+                        accum_general->beacon_silence_rssi_c,
+                        delta_general->beacon_silence_rssi_c,
+                        max_general->beacon_silence_rssi_c);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "interference_data_flag:",
+                        le32_to_cpu(general->interference_data_flag),
+                        accum_general->interference_data_flag,
+                        delta_general->interference_data_flag,
+                        max_general->interference_data_flag);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "channel_load:",
+                        le32_to_cpu(general->channel_load),
+                        accum_general->channel_load,
+                        delta_general->channel_load,
+                        max_general->channel_load);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dsp_false_alarms:",
+                        le32_to_cpu(general->dsp_false_alarms),
+                        accum_general->dsp_false_alarms,
+                        delta_general->dsp_false_alarms,
+                        max_general->dsp_false_alarms);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_rssi_a:",
+                        le32_to_cpu(general->beacon_rssi_a),
+                        accum_general->beacon_rssi_a,
+                        delta_general->beacon_rssi_a,
+                        max_general->beacon_rssi_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_rssi_b:",
+                        le32_to_cpu(general->beacon_rssi_b),
+                        accum_general->beacon_rssi_b,
+                        delta_general->beacon_rssi_b,
+                        max_general->beacon_rssi_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_rssi_c:",
+                        le32_to_cpu(general->beacon_rssi_c),
+                        accum_general->beacon_rssi_c,
+                        delta_general->beacon_rssi_c,
+                        max_general->beacon_rssi_c);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_energy_a:",
+                        le32_to_cpu(general->beacon_energy_a),
+                        accum_general->beacon_energy_a,
+                        delta_general->beacon_energy_a,
+                        max_general->beacon_energy_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_energy_b:",
+                        le32_to_cpu(general->beacon_energy_b),
+                        accum_general->beacon_energy_b,
+                        delta_general->beacon_energy_b,
+                        max_general->beacon_energy_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "beacon_energy_c:",
+                        le32_to_cpu(general->beacon_energy_c),
+                        accum_general->beacon_energy_c,
+                        delta_general->beacon_energy_c,
+                        max_general->beacon_energy_c);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Rx - OFDM_HT:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "plcp_err:",
+                        le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+                        delta_ht->plcp_err, max_ht->plcp_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "overrun_err:",
+                        le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+                        delta_ht->overrun_err, max_ht->overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "early_overrun_err:",
+                        le32_to_cpu(ht->early_overrun_err),
+                        accum_ht->early_overrun_err,
+                        delta_ht->early_overrun_err,
+                        max_ht->early_overrun_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_good:",
+                        le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+                        delta_ht->crc32_good, max_ht->crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "crc32_err:",
+                        le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+                        delta_ht->crc32_err, max_ht->crc32_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "mh_format_err:",
+                        le32_to_cpu(ht->mh_format_err),
+                        accum_ht->mh_format_err,
+                        delta_ht->mh_format_err, max_ht->mh_format_err);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg_crc32_good:",
+                        le32_to_cpu(ht->agg_crc32_good),
+                        accum_ht->agg_crc32_good,
+                        delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg_mpdu_cnt:",
+                        le32_to_cpu(ht->agg_mpdu_cnt),
+                        accum_ht->agg_mpdu_cnt,
+                        delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg_cnt:",
+                        le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+                        delta_ht->agg_cnt, max_ht->agg_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "unsupport_mcs:",
+                        le32_to_cpu(ht->unsupport_mcs),
+                        accum_ht->unsupport_mcs,
+                        delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
+                               char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+       ssize_t ret;
+       struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /* the statistic information display here is based on
+         * the last statistics notification from uCode
+         * might not reflect the current uCode activity
+         */
+       tx = &priv->_4965.statistics.tx;
+       accum_tx = &priv->_4965.accum_statistics.tx;
+       delta_tx = &priv->_4965.delta_statistics.tx;
+       max_tx = &priv->_4965.max_delta.tx;
+
+       pos += iwl4965_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_Tx:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "preamble:",
+                        le32_to_cpu(tx->preamble_cnt),
+                        accum_tx->preamble_cnt,
+                        delta_tx->preamble_cnt, max_tx->preamble_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rx_detected_cnt:",
+                        le32_to_cpu(tx->rx_detected_cnt),
+                        accum_tx->rx_detected_cnt,
+                        delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bt_prio_defer_cnt:",
+                        le32_to_cpu(tx->bt_prio_defer_cnt),
+                        accum_tx->bt_prio_defer_cnt,
+                        delta_tx->bt_prio_defer_cnt,
+                        max_tx->bt_prio_defer_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "bt_prio_kill_cnt:",
+                        le32_to_cpu(tx->bt_prio_kill_cnt),
+                        accum_tx->bt_prio_kill_cnt,
+                        delta_tx->bt_prio_kill_cnt,
+                        max_tx->bt_prio_kill_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "few_bytes_cnt:",
+                        le32_to_cpu(tx->few_bytes_cnt),
+                        accum_tx->few_bytes_cnt,
+                        delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "cts_timeout:",
+                        le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+                        delta_tx->cts_timeout, max_tx->cts_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ack_timeout:",
+                        le32_to_cpu(tx->ack_timeout),
+                        accum_tx->ack_timeout,
+                        delta_tx->ack_timeout, max_tx->ack_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "expected_ack_cnt:",
+                        le32_to_cpu(tx->expected_ack_cnt),
+                        accum_tx->expected_ack_cnt,
+                        delta_tx->expected_ack_cnt,
+                        max_tx->expected_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "actual_ack_cnt:",
+                        le32_to_cpu(tx->actual_ack_cnt),
+                        accum_tx->actual_ack_cnt,
+                        delta_tx->actual_ack_cnt,
+                        max_tx->actual_ack_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "dump_msdu_cnt:",
+                        le32_to_cpu(tx->dump_msdu_cnt),
+                        accum_tx->dump_msdu_cnt,
+                        delta_tx->dump_msdu_cnt,
+                        max_tx->dump_msdu_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "abort_nxt_frame_mismatch:",
+                        le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+                        accum_tx->burst_abort_next_frame_mismatch_cnt,
+                        delta_tx->burst_abort_next_frame_mismatch_cnt,
+                        max_tx->burst_abort_next_frame_mismatch_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "abort_missing_nxt_frame:",
+                        le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+                        accum_tx->burst_abort_missing_next_frame_cnt,
+                        delta_tx->burst_abort_missing_next_frame_cnt,
+                        max_tx->burst_abort_missing_next_frame_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "cts_timeout_collision:",
+                        le32_to_cpu(tx->cts_timeout_collision),
+                        accum_tx->cts_timeout_collision,
+                        delta_tx->cts_timeout_collision,
+                        max_tx->cts_timeout_collision);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "ack_ba_timeout_collision:",
+                        le32_to_cpu(tx->ack_or_ba_timeout_collision),
+                        accum_tx->ack_or_ba_timeout_collision,
+                        delta_tx->ack_or_ba_timeout_collision,
+                        max_tx->ack_or_ba_timeout_collision);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg ba_timeout:",
+                        le32_to_cpu(tx->agg.ba_timeout),
+                        accum_tx->agg.ba_timeout,
+                        delta_tx->agg.ba_timeout,
+                        max_tx->agg.ba_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg ba_resched_frames:",
+                        le32_to_cpu(tx->agg.ba_reschedule_frames),
+                        accum_tx->agg.ba_reschedule_frames,
+                        delta_tx->agg.ba_reschedule_frames,
+                        max_tx->agg.ba_reschedule_frames);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_agg_frame:",
+                        le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+                        accum_tx->agg.scd_query_agg_frame_cnt,
+                        delta_tx->agg.scd_query_agg_frame_cnt,
+                        max_tx->agg.scd_query_agg_frame_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_no_agg:",
+                        le32_to_cpu(tx->agg.scd_query_no_agg),
+                        accum_tx->agg.scd_query_no_agg,
+                        delta_tx->agg.scd_query_no_agg,
+                        max_tx->agg.scd_query_no_agg);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_agg:",
+                        le32_to_cpu(tx->agg.scd_query_agg),
+                        accum_tx->agg.scd_query_agg,
+                        delta_tx->agg.scd_query_agg,
+                        max_tx->agg.scd_query_agg);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg scd_query_mismatch:",
+                        le32_to_cpu(tx->agg.scd_query_mismatch),
+                        accum_tx->agg.scd_query_mismatch,
+                        delta_tx->agg.scd_query_mismatch,
+                        max_tx->agg.scd_query_mismatch);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg frame_not_ready:",
+                        le32_to_cpu(tx->agg.frame_not_ready),
+                        accum_tx->agg.frame_not_ready,
+                        delta_tx->agg.frame_not_ready,
+                        max_tx->agg.frame_not_ready);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg underrun:",
+                        le32_to_cpu(tx->agg.underrun),
+                        accum_tx->agg.underrun,
+                        delta_tx->agg.underrun, max_tx->agg.underrun);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg bt_prio_kill:",
+                        le32_to_cpu(tx->agg.bt_prio_kill),
+                        accum_tx->agg.bt_prio_kill,
+                        delta_tx->agg.bt_prio_kill,
+                        max_tx->agg.bt_prio_kill);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "agg rx_ba_rsp_cnt:",
+                        le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+                        accum_tx->agg.rx_ba_rsp_cnt,
+                        delta_tx->agg.rx_ba_rsp_cnt,
+                        max_tx->agg.rx_ba_rsp_cnt);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct statistics_general) * 10 + 300;
+       ssize_t ret;
+       struct statistics_general_common *general, *accum_general;
+       struct statistics_general_common *delta_general, *max_general;
+       struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+       struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /* the statistic information display here is based on
+         * the last statistics notification from uCode
+         * might not reflect the current uCode activity
+         */
+       general = &priv->_4965.statistics.general.common;
+       dbg = &priv->_4965.statistics.general.common.dbg;
+       div = &priv->_4965.statistics.general.common.div;
+       accum_general = &priv->_4965.accum_statistics.general.common;
+       accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
+       accum_div = &priv->_4965.accum_statistics.general.common.div;
+       delta_general = &priv->_4965.delta_statistics.general.common;
+       max_general = &priv->_4965.max_delta.general.common;
+       delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
+       max_dbg = &priv->_4965.max_delta.general.common.dbg;
+       delta_div = &priv->_4965.delta_statistics.general.common.div;
+       max_div = &priv->_4965.max_delta.general.common.div;
+
+       pos += iwl4965_statistics_flag(priv, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_header, "Statistics_General:");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_value, "temperature:",
+                        le32_to_cpu(general->temperature));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_value, "ttl_timestamp:",
+                        le32_to_cpu(general->ttl_timestamp));
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "burst_check:",
+                        le32_to_cpu(dbg->burst_check),
+                        accum_dbg->burst_check,
+                        delta_dbg->burst_check, max_dbg->burst_check);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "burst_count:",
+                        le32_to_cpu(dbg->burst_count),
+                        accum_dbg->burst_count,
+                        delta_dbg->burst_count, max_dbg->burst_count);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "wait_for_silence_timeout_count:",
+                        le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+                        accum_dbg->wait_for_silence_timeout_cnt,
+                        delta_dbg->wait_for_silence_timeout_cnt,
+                        max_dbg->wait_for_silence_timeout_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "sleep_time:",
+                        le32_to_cpu(general->sleep_time),
+                        accum_general->sleep_time,
+                        delta_general->sleep_time, max_general->sleep_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "slots_out:",
+                        le32_to_cpu(general->slots_out),
+                        accum_general->slots_out,
+                        delta_general->slots_out, max_general->slots_out);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "slots_idle:",
+                        le32_to_cpu(general->slots_idle),
+                        accum_general->slots_idle,
+                        delta_general->slots_idle, max_general->slots_idle);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "tx_on_a:",
+                        le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+                        delta_div->tx_on_a, max_div->tx_on_a);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "tx_on_b:",
+                        le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+                        delta_div->tx_on_b, max_div->tx_on_b);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "exec_time:",
+                        le32_to_cpu(div->exec_time), accum_div->exec_time,
+                        delta_div->exec_time, max_div->exec_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "probe_time:",
+                        le32_to_cpu(div->probe_time), accum_div->probe_time,
+                        delta_div->probe_time, max_div->probe_time);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "rx_enable_counter:",
+                        le32_to_cpu(general->rx_enable_counter),
+                        accum_general->rx_enable_counter,
+                        delta_general->rx_enable_counter,
+                        max_general->rx_enable_counter);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        fmt_table, "num_of_sos_states:",
+                        le32_to_cpu(general->num_of_sos_states),
+                        accum_general->num_of_sos_states,
+                        delta_general->num_of_sos_states,
+                        max_general->num_of_sos_states);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644 (file)
index 0000000..6c8e353
--- /dev/null
@@ -0,0 +1,59 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_general_stats_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos);
+#else
+static ssize_t
+iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       return 0;
+}
+static ssize_t
+iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       return 0;
+}
+static ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644 (file)
index 0000000..cb9baab
--- /dev/null
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-4965.h"
+#include "iwl-io.h"
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
+{
+       u16 count;
+       int ret;
+
+       for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+               /* Request semaphore */
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+               /* See if we got it */
+               ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+                               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                               EEPROM_SEM_TIMEOUT);
+               if (ret >= 0) {
+                       IWL_DEBUG_IO(priv,
+                               "Acquired semaphore after %d tries.\n",
+                               count+1);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
+{
+       iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int iwl4965_eeprom_check_version(struct iwl_priv *priv)
+{
+       u16 eeprom_ver;
+       u16 calib_ver;
+
+       eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+       calib_ver = iwl_legacy_eeprom_query16(priv,
+                       EEPROM_4965_CALIB_VERSION_OFFSET);
+
+       if (eeprom_ver < priv->cfg->eeprom_ver ||
+           calib_ver < priv->cfg->eeprom_calib_ver)
+               goto err;
+
+       IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+                eeprom_ver, calib_ver);
+
+       return 0;
+err:
+       IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+                 "CALIB=0x%x < 0x%x\n",
+                 eeprom_ver, priv->cfg->eeprom_ver,
+                 calib_ver,  priv->cfg->eeprom_calib_ver);
+       return -EINVAL;
+
+}
+
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+{
+       const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
+                                       EEPROM_MAC_ADDRESS);
+       memcpy(mac, addr, ETH_ALEN);
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
new file mode 100644 (file)
index 0000000..08b189c
--- /dev/null
@@ -0,0 +1,814 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
+ * Use iwl-commands.h for uCode API definitions.
+ * Use iwl-dev.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_4965_hw_h__
+#define __iwl_4965_hw_h__
+
+#include "iwl-fh.h"
+
+/* EEPROM */
+#define IWL4965_EEPROM_IMG_SIZE                        1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IWL49_FIRST_AMPDU_QUEUE        7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IWL49_RTC_INST_LOWER_BOUND             (0x000000)
+#define IWL49_RTC_INST_UPPER_BOUND             (0x018000)
+
+#define IWL49_RTC_DATA_LOWER_BOUND             (0x800000)
+#define IWL49_RTC_DATA_UPPER_BOUND             (0x80A000)
+
+#define IWL49_RTC_INST_SIZE  (IWL49_RTC_INST_UPPER_BOUND - \
+                               IWL49_RTC_INST_LOWER_BOUND)
+#define IWL49_RTC_DATA_SIZE  (IWL49_RTC_DATA_UPPER_BOUND - \
+                               IWL49_RTC_DATA_LOWER_BOUND)
+
+#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
+#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
+{
+       return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
+              (addr < IWL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent).  The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct iwl4965_init_alive_resp).  After the runtime uCode
+ * image loads, uCode updates the R4 value via statistics notifications
+ * (see STATISTICS_NOTIFICATION), which occur after each received beacon
+ * when associated, or can be requested via REPLY_STATISTICS_CMD.
+ *
+ * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
+ *        must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IWL_TX_POWER_TEMPERATURE_MIN  (263)
+#define IWL_TX_POWER_TEMPERATURE_MAX  (410)
+
+#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+       (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
+        ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ *     1) EEPROM
+ *     2) "initialize" alive notification
+ *     3) statistics notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1)  Regulatory information (max txpower and channel usage flags) is provided
+ *     separately for each channel that can possibly supported by 4965.
+ *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ *     (legacy) channels.
+ *
+ *     See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
+ *     for locations in EEPROM.
+ *
+ * 2)  Factory txpower calibration information is provided separately for
+ *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
+ *     but 5 GHz has several sub-bands.
+ *
+ *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ *     See struct iwl4965_eeprom_calib_info (and the tree of structures
+ *     contained within it) for format, and struct iwl4965_eeprom for
+ *     locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
+ * consists of:
+ *
+ * 1)  Temperature calculation parameters.
+ *
+ * 2)  Power supply voltage measurement.
+ *
+ * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1)  Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ *     Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ *     If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
+ *     2 transmitters will be used simultaneously; driver must reduce the
+ *     regulatory limit by 3 dB (half-power) for each transmitter, so the
+ *     combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
+ *     reduce target txpower if necessary.
+ *
+ *     Backoff values below are in 1/2 dB units (equivalent to steps in
+ *     txpower gain tables):
+ *
+ *     OFDM 6 - 36 MBit:  10 steps (5 dB)
+ *     OFDM 48 MBit:      15 steps (7.5 dB)
+ *     OFDM 54 MBit:      17 steps (8.5 dB)
+ *     OFDM 60 MBit:      20 steps (10 dB)
+ *     CCK all rates:     10 steps (5 dB)
+ *
+ *     Backoff values apply to saturation txpower on a per-transmitter basis;
+ *     when using MIMO (2 transmitters), each transmitter uses the same
+ *     saturation level provided in EEPROM, and the same backoff values;
+ *     no reduction (such as with regulatory txpower limits) is required.
+ *
+ *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ *     factory measurement for ht40 channels.
+ *
+ *     The result of this step is the final target txpower.  The rest of
+ *     the steps figure out the proper settings for the device to achieve
+ *     that target txpower.
+ *
+ *
+ * 3)  Determine (EEPROM) calibration sub band for the target channel, by
+ *     comparing against first and last channels in each sub band
+ *     (see struct iwl4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
+ *     referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ *     Interpolation is based on difference between target channel's frequency
+ *     and the sample channels' frequencies.  Since channel numbers are based
+ *     on frequency (5 MHz between each channel number), this is equivalent
+ *     to interpolating based on channel number differences.
+ *
+ *     Note that the sample channels may or may not be the channels at the
+ *     edges of the sub band.  The target channel may be "outside" of the
+ *     span of the sampled channels.
+ *
+ *     Driver may choose the pair (for 2 Tx chains) of measurements (see
+ *     struct iwl4965_eeprom_calib_ch_info) for which the actual measured
+ *     txpower comes closest to the desired txpower.  Usually, though,
+ *     the middle set of measurements is closest to the regulatory limits,
+ *     and is therefore a good choice for all txpower calculations (this
+ *     assumes that high accuracy is needed for maximizing legal txpower,
+ *     while lower txpower configurations do not need as much accuracy).
+ *
+ *     Driver should interpolate both members of the chosen measurement pair,
+ *     i.e. for both Tx chains (radio transmitters), unless the driver knows
+ *     that only one of the chains will be used (e.g. only one tx antenna
+ *     connected, but this should be unusual).  The rate scaling algorithm
+ *     switches antennas to find best performance, so both Tx chains will
+ *     be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ *     Driver should interpolate factory values for temperature, gain table
+ *     index, and actual power.  The power amplifier detector values are
+ *     not used by the driver.
+ *
+ *     Sanity check:  If the target channel happens to be one of the sample
+ *     channels, the results should agree with the sample channel's
+ *     measurements!
+ *
+ *
+ * 5)  Find difference between desired txpower and (interpolated)
+ *     factory-measured txpower.  Using (interpolated) factory gain table index
+ *     (shown elsewhere) as a starting point, adjust this index lower to
+ *     increase txpower, or higher to decrease txpower, until the target
+ *     txpower is reached.  Each step in the gain table is 1/2 dB.
+ *
+ *     For example, if factory measured txpower is 16 dBm, and target txpower
+ *     is 13 dBm, add 6 steps to the factory gain index to reduce txpower
+ *     by 3 dB.
+ *
+ *
+ * 6)  Find difference between current device temperature and (interpolated)
+ *     factory-measured temperature for sub-band.  Factory values are in
+ *     degrees Celsius.  To calculate current temperature, see comments for
+ *     "4965 temperature calculation".
+ *
+ *     If current temperature is higher than factory temperature, driver must
+ *     increase gain (lower gain table index), and vice verse.
+ *
+ *     Temperature affects gain differently for different channels:
+ *
+ *     2.4 GHz all channels:  3.5 degrees per half-dB step
+ *     5 GHz channels 34-43:  4.5 degrees per half-dB step
+ *     5 GHz channels >= 44:  4.0 degrees per half-dB step
+ *
+ *     NOTE:  Temperature can increase rapidly when transmitting, especially
+ *            with heavy traffic at high txpowers.  Driver should update
+ *            temperature calculations often under these conditions to
+ *            maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7)  Find difference between current power supply voltage indicator
+ *     (from "initialize alive") and factory-measured power supply voltage
+ *     indicator (EEPROM).
+ *
+ *     If the current voltage is higher (indicator is lower) than factory
+ *     voltage, gain should be reduced (gain table index increased) by:
+ *
+ *     (eeprom - current) / 7
+ *
+ *     If the current voltage is lower (indicator is higher) than factory
+ *     voltage, gain should be increased (gain table index decreased) by:
+ *
+ *     2 * (current - eeprom) / 7
+ *
+ *     If number of index steps in either direction turns out to be > 2,
+ *     something is wrong ... just use 0.
+ *
+ *     NOTE:  Voltage compensation is independent of band/channel.
+ *
+ *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
+ *            to be constant after this initial measurement.  Voltage
+ *            compensation for txpower (number of steps in gain table)
+ *            may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8)  If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
+ *     adjust txpower for each transmitter chain, so txpower is balanced
+ *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
+ *     values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ *     Group 0:  5 GHz channel 34-43
+ *     Group 1:  5 GHz channel 44-70
+ *     Group 2:  5 GHz channel 71-124
+ *     Group 3:  5 GHz channel 125-200
+ *     Group 4:  2.4 GHz all channels
+ *
+ *     Add the tx_atten[group][chain] value to the index for the target chain.
+ *     The values are signed, but are in pairs of 0 and a non-negative number,
+ *     so as to reduce gain (if necessary) of the "hotter" channel.  This
+ *     avoids any need to double-check for regulatory compliance after
+ *     this step.
+ *
+ *
+ * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ *     value to the index:
+ *
+ *     Hardware rev B:  9 steps (4.5 dB)
+ *     Hardware rev C:  5 steps (2.5 dB)
+ *
+ *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ *     bits [3:2], 1 = B, 2 = C.
+ *
+ *     NOTE:  This compensation is in addition to any saturation backoff that
+ *            might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ *     Limit the adjusted index to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ *     location(s) in command (struct iwl4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device.  That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table index.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V   (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
+ * are *relative* steps, not indications of absolute output power.  Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower.  This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
+ * has an extension (into negative indexes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration).  A 5 Ghz EEPROM index of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_INDEX              (0)  /* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_INDEX_52GHZ_EXT    (-9) /* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *   0        110         0x3f      (highest gain)
+ *   1        104         0x3f
+ *   2         98         0x3f
+ *   3        110         0x3e
+ *   4        104         0x3e
+ *   5         98         0x3e
+ *   6        110         0x3d
+ *   7        104         0x3d
+ *   8         98         0x3d
+ *   9        110         0x3c
+ *  10        104         0x3c
+ *  11         98         0x3c
+ *  12        110         0x3b
+ *  13        104         0x3b
+ *  14         98         0x3b
+ *  15        110         0x3a
+ *  16        104         0x3a
+ *  17         98         0x3a
+ *  18        110         0x39
+ *  19        104         0x39
+ *  20         98         0x39
+ *  21        110         0x38
+ *  22        104         0x38
+ *  23         98         0x38
+ *  24        110         0x37
+ *  25        104         0x37
+ *  26         98         0x37
+ *  27        110         0x36
+ *  28        104         0x36
+ *  29         98         0x36
+ *  30        110         0x35
+ *  31        104         0x35
+ *  32         98         0x35
+ *  33        110         0x34
+ *  34        104         0x34
+ *  35         98         0x34
+ *  36        110         0x33
+ *  37        104         0x33
+ *  38         98         0x33
+ *  39        110         0x32
+ *  40        104         0x32
+ *  41         98         0x32
+ *  42        110         0x31
+ *  43        104         0x31
+ *  44         98         0x31
+ *  45        110         0x30
+ *  46        104         0x30
+ *  47         98         0x30
+ *  48        110          0x6
+ *  49        104          0x6
+ *  50         98          0x6
+ *  51        110          0x5
+ *  52        104          0x5
+ *  53         98          0x5
+ *  54        110          0x4
+ *  55        104          0x4
+ *  56         98          0x4
+ *  57        110          0x3
+ *  58        104          0x3
+ *  59         98          0x3
+ *  60        110          0x2
+ *  61        104          0x2
+ *  62         98          0x2
+ *  63        110          0x1
+ *  64        104          0x1
+ *  65         98          0x1
+ *  66        110          0x0
+ *  67        104          0x0
+ *  68         98          0x0
+ *  69         97            0
+ *  70         96            0
+ *  71         95            0
+ *  72         94            0
+ *  73         93            0
+ *  74         92            0
+ *  75         91            0
+ *  76         90            0
+ *  77         89            0
+ *  78         88            0
+ *  79         87            0
+ *  80         86            0
+ *  81         85            0
+ *  82         84            0
+ *  83         83            0
+ *  84         82            0
+ *  85         81            0
+ *  86         80            0
+ *  87         79            0
+ *  88         78            0
+ *  89         77            0
+ *  90         76            0
+ *  91         75            0
+ *  92         74            0
+ *  93         73            0
+ *  94         72            0
+ *  95         71            0
+ *  96         70            0
+ *  97         69            0
+ *  98         68            0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *  -9               123         0x3F      (highest gain)
+ *  -8               117         0x3F
+ *  -7        110         0x3F
+ *  -6        104         0x3F
+ *  -5         98         0x3F
+ *  -4        110         0x3E
+ *  -3        104         0x3E
+ *  -2         98         0x3E
+ *  -1        110         0x3D
+ *   0        104         0x3D
+ *   1         98         0x3D
+ *   2        110         0x3C
+ *   3        104         0x3C
+ *   4         98         0x3C
+ *   5        110         0x3B
+ *   6        104         0x3B
+ *   7         98         0x3B
+ *   8        110         0x3A
+ *   9        104         0x3A
+ *  10         98         0x3A
+ *  11        110         0x39
+ *  12        104         0x39
+ *  13         98         0x39
+ *  14        110         0x38
+ *  15        104         0x38
+ *  16         98         0x38
+ *  17        110         0x37
+ *  18        104         0x37
+ *  19         98         0x37
+ *  20        110         0x36
+ *  21        104         0x36
+ *  22         98         0x36
+ *  23        110         0x35
+ *  24        104         0x35
+ *  25         98         0x35
+ *  26        110         0x34
+ *  27        104         0x34
+ *  28         98         0x34
+ *  29        110         0x33
+ *  30        104         0x33
+ *  31         98         0x33
+ *  32        110         0x32
+ *  33        104         0x32
+ *  34         98         0x32
+ *  35        110         0x31
+ *  36        104         0x31
+ *  37         98         0x31
+ *  38        110         0x30
+ *  39        104         0x30
+ *  40         98         0x30
+ *  41        110         0x25
+ *  42        104         0x25
+ *  43         98         0x25
+ *  44        110         0x24
+ *  45        104         0x24
+ *  46         98         0x24
+ *  47        110         0x23
+ *  48        104         0x23
+ *  49         98         0x23
+ *  50        110         0x22
+ *  51        104         0x18
+ *  52         98         0x18
+ *  53        110         0x17
+ *  54        104         0x17
+ *  55         98         0x17
+ *  56        110         0x16
+ *  57        104         0x16
+ *  58         98         0x16
+ *  59        110         0x15
+ *  60        104         0x15
+ *  61         98         0x15
+ *  62        110         0x14
+ *  63        104         0x14
+ *  64         98         0x14
+ *  65        110         0x13
+ *  66        104         0x13
+ *  67         98         0x13
+ *  68        110         0x12
+ *  69        104         0x08
+ *  70         98         0x08
+ *  71        110         0x07
+ *  72        104         0x07
+ *  73         98         0x07
+ *  74        110         0x06
+ *  75        104         0x06
+ *  76         98         0x06
+ *  77        110         0x05
+ *  78        104         0x05
+ *  79         98         0x05
+ *  80        110         0x04
+ *  81        104         0x04
+ *  82         98         0x04
+ *  83        110         0x03
+ *  84        104         0x03
+ *  85         98         0x03
+ *  86        110         0x02
+ *  87        104         0x02
+ *  88         98         0x02
+ *  89        110         0x01
+ *  90        104         0x01
+ *  91         98         0x01
+ *  92        110         0x00
+ *  93        104         0x00
+ *  94         98         0x00
+ *  95         93         0x00
+ *  96         88         0x00
+ *  97         83         0x00
+ *  98         78         0x00
+ */
+
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated.  These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IWL_TX_POWER_DEFAULT_REGULATORY_24   (34)
+#define IWL_TX_POWER_DEFAULT_REGULATORY_52   (34)
+#define IWL_TX_POWER_REGULATORY_MIN          (0)
+#define IWL_TX_POWER_REGULATORY_MAX          (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion.  This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IWL_TX_POWER_DEFAULT_SATURATION_24   (38)
+#define IWL_TX_POWER_DEFAULT_SATURATION_52   (38)
+#define IWL_TX_POWER_SATURATION_MIN          (20)
+#define IWL_TX_POWER_SATURATION_MAX          (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain.  Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below.  If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
+#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
+#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
+#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
+#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
+#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
+
+enum {
+       CALIB_CH_GROUP_1 = 0,
+       CALIB_CH_GROUP_2 = 1,
+       CALIB_CH_GROUP_3 = 2,
+       CALIB_CH_GROUP_4 = 3,
+       CALIB_CH_GROUP_5 = 4,
+       CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues).  All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device.  When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IWL49_NUM_FIFOS        7
+#define IWL49_CMD_FIFO_NUM     4
+#define IWL49_NUM_QUEUES       16
+#define IWL49_NUM_AMPDU_QUEUES 8
+
+
+/**
+ * struct iwl4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
+ * max Tx window is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue.  If the TFD index is 0-63, the driver
+ * must duplicate the byte count entry in corresponding index 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct iwl4965_scd_bc_tbl {
+       __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+       u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+
+#define IWL4965_RTC_INST_LOWER_BOUND           (0x000000)
+
+/* RSSI to dBm */
+#define IWL4965_RSSI_OFFSET    44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT  0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN   0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN    0x02
+
+#define IWL4965_DEFAULT_TX_RETRY  15
+
+/* Limit range of txpower output target to be between these values */
+#define IWL4965_TX_POWER_TARGET_POWER_MIN      (0)     /* 0 dBm: 1 milliwatt */
+
+/* EEPROM */
+#define IWL4965_FIRST_AMPDU_QUEUE      10
+
+
+#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644 (file)
index 0000000..26d324e
--- /dev/null
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-4965-led.h"
+
+/* Send led command */
+static int
+iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_LEDS_CMD,
+               .len = sizeof(struct iwl_led_cmd),
+               .data = led_cmd,
+               .flags = CMD_ASYNC,
+               .callback = NULL,
+       };
+       u32 reg;
+
+       reg = iwl_read32(priv, CSR_LED_REG);
+       if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+               iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+       return iwl_legacy_send_cmd(priv, &cmd);
+}
+
+/* Set led register off */
+void iwl4965_led_enable(struct iwl_priv *priv)
+{
+       iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct iwl_led_ops iwl4965_led_ops = {
+       .cmd = iwl4965_send_led_cmd,
+};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644 (file)
index 0000000..5ed3615
--- /dev/null
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_4965_led_h__
+#define __iwl_4965_led_h__
+
+extern const struct iwl_led_ops iwl4965_led_ops;
+void iwl4965_led_enable(struct iwl_priv *priv);
+
+#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644 (file)
index 0000000..5a8a3cc
--- /dev/null
@@ -0,0 +1,1260 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-sta.h"
+
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+                           u8 frame_count, u32 status)
+{
+       if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+               IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+               if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       queue_work(priv->workqueue, &priv->tx_flush);
+       }
+}
+
+/*
+ * EEPROM
+ */
+struct iwl_mod_params iwl4965_mod_params = {
+       .amsdu_size_8K = 1,
+       .restart_fw = 1,
+       /* the rest are 0 by default */
+};
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       unsigned long flags;
+       int i;
+       spin_lock_irqsave(&rxq->lock, flags);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+               /* In the reset function, these buffers may have been allocated
+                * to an SKB, so we need to unmap and free potential storage */
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+       }
+
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
+               rxq->queue[i] = NULL;
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       u32 rb_size;
+       const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+       u32 rb_timeout = 0;
+
+       if (priv->cfg->mod_params->amsdu_size_8K)
+               rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+       else
+               rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+       /* Stop Rx DMA */
+       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+       /* Reset driver's Rx queue write index */
+       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+       /* Tell device where to find RBD circular buffer in DRAM */
+       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+                          (u32)(rxq->bd_dma >> 8));
+
+       /* Tell device where in DRAM to update its Rx status */
+       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+                          rxq->rb_stts_dma >> 4);
+
+       /* Enable Rx DMA
+        * Direct rx interrupts to hosts
+        * Rx buffer size 4 or 8k
+        * RB timeout 0x10
+        * 256 RBDs
+        */
+       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+                          FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+                          FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+                          FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+                          rb_size|
+                          (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+                          (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+       return 0;
+}
+
+static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+               if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+                       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                                              APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+                                              ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+                              APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+                              ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int iwl4965_hw_nic_init(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       int ret;
+
+       /* nic_init */
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->cfg->ops->lib->apm_ops.init(priv);
+
+       /* Set interrupt coalescing calibration timer to default (512 usecs) */
+       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl4965_set_pwr_vmain(priv);
+
+       priv->cfg->ops->lib->apm_ops.config(priv);
+
+       /* Allocate the RX queue, or reset if it is already allocated */
+       if (!rxq->bd) {
+               ret = iwl_legacy_rx_queue_alloc(priv);
+               if (ret) {
+                       IWL_ERR(priv, "Unable to initialize Rx queue\n");
+                       return -ENOMEM;
+               }
+       } else
+               iwl4965_rx_queue_reset(priv, rxq);
+
+       iwl4965_rx_replenish(priv);
+
+       iwl4965_rx_init(priv, rxq);
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       rxq->need_update = 1;
+       iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Allocate or reset and init all Tx and Command queues */
+       if (!priv->txq) {
+               ret = iwl4965_txq_ctx_alloc(priv);
+               if (ret)
+                       return ret;
+       } else
+               iwl4965_txq_ctx_reset(priv);
+
+       set_bit(STATUS_INIT, &priv->status);
+
+       return 0;
+}
+
+/**
+ * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
+                                         dma_addr_t dma_addr)
+{
+       return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+               /* The overwritten rxb must be a used one */
+               rxb = rxq->queue[rxq->write];
+               BUG_ON(rxb && rxb->page);
+
+               /* Get next free Rx buffer, remove from free list */
+               element = rxq->rx_free.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+               list_del(element);
+
+               /* Point to Rx buffer via next RBD in circular buffer */
+               rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
+                                                             rxb->page_dma);
+               rxq->queue[rxq->write] = rxb;
+               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+               rxq->free_count--;
+       }
+       spin_unlock_irqrestore(&rxq->lock, flags);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+       /* If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8. */
+       if (rxq->write_actual != (rxq->write & ~0x7)) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               rxq->need_update = 1;
+               spin_unlock_irqrestore(&rxq->lock, flags);
+               iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+       }
+}
+
+/**
+ * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       struct page *page;
+       unsigned long flags;
+       gfp_t gfp_mask = priority;
+
+       while (1) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (priv->hw_params.rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
+               /* Alloc a new receive buffer */
+               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+                                              "order: %d\n",
+                                              priv->hw_params.rx_page_order);
+
+                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+                           net_ratelimit())
+                               IWL_CRIT(priv,
+                                       "Failed to alloc_pages with %s. "
+                                       "Only %u free buffers remaining.\n",
+                                        priority == GFP_ATOMIC ?
+                                                "GFP_ATOMIC" : "GFP_KERNEL",
+                                        rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
+                       return;
+               }
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       __free_pages(page, priv->hw_params.rx_page_order);
+                       return;
+               }
+               element = rxq->rx_used.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+               list_del(element);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               BUG_ON(rxb->page);
+               rxb->page = page;
+               /* Get physical address of the RB */
+               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+               /* dma address must be no more than 36 bits */
+               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+               /* and also 256 byte aligned! */
+               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               list_add_tail(&rxb->list, &rxq->rx_free);
+               rxq->free_count++;
+               priv->alloc_rxb_page++;
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+       }
+}
+
+void iwl4965_rx_replenish(struct iwl_priv *priv)
+{
+       unsigned long flags;
+
+       iwl4965_rx_allocate(priv, GFP_KERNEL);
+
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl4965_rx_queue_restock(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwl4965_rx_replenish_now(struct iwl_priv *priv)
+{
+       iwl4965_rx_allocate(priv, GFP_ATOMIC);
+
+       iwl4965_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       int i;
+       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+       }
+
+       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+       dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+                         rxq->rb_stts, rxq->rb_stts_dma);
+       rxq->bd = NULL;
+       rxq->rb_stts  = NULL;
+}
+
+int iwl4965_rxq_stop(struct iwl_priv *priv)
+{
+
+       /* stop Rx DMA */
+       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+       iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+                           FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+       return 0;
+}
+
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+       int idx = 0;
+       int band_offset = 0;
+
+       /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = (rate_n_flags & 0xff);
+               return idx;
+       /* Legacy rate format, search for match in table */
+       } else {
+               if (band == IEEE80211_BAND_5GHZ)
+                       band_offset = IWL_FIRST_OFDM_RATE;
+               for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+                       if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+                               return idx - band_offset;
+       }
+
+       return -1;
+}
+
+static int iwl4965_calc_rssi(struct iwl_priv *priv,
+                            struct iwl_rx_phy_res *rx_resp)
+{
+       /* data from PHY/DSP regarding signal strength, etc.,
+        *   contents are always there, not configurable by host.  */
+       struct iwl4965_rx_non_cfg_phy *ncphy =
+           (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+       u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
+                       >> IWL49_AGC_DB_POS;
+
+       u32 valid_antennae =
+           (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+                       >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+       u8 max_rssi = 0;
+       u32 i;
+
+       /* Find max rssi among 3 possible receivers.
+        * These values are measured by the digital signal processor (DSP).
+        * They should stay fairly constant even as the signal strength varies,
+        *   if the radio's automatic gain control (AGC) is working right.
+        * AGC value (see below) will provide the "interesting" info. */
+       for (i = 0; i < 3; i++)
+               if (valid_antennae & (1 << i))
+                       max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+       IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+               ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+               max_rssi, agc);
+
+       /* dBm = max_rssi dB - agc dB - constant.
+        * Higher AGC (higher radio gain) means lower signal. */
+       return max_rssi - agc - IWL4965_RSSI_OFFSET;
+}
+
+
+static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+       u32 decrypt_out = 0;
+
+       if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+                                       RX_RES_STATUS_STATION_FOUND)
+               decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+                               RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+       decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+       /* packet was not encrypted */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+                                       RX_RES_STATUS_SEC_TYPE_NONE)
+               return decrypt_out;
+
+       /* packet was encrypted with unknown alg */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+                                       RX_RES_STATUS_SEC_TYPE_ERR)
+               return decrypt_out;
+
+       /* decryption was not done in HW */
+       if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+                                       RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+               return decrypt_out;
+
+       switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               /* alg is CCM: check MIC only */
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+                       /* Bad MIC */
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+               break;
+
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+                       /* Bad TTAK */
+                       decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+                       break;
+               }
+               /* fall through if TTAK OK */
+       default:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+               break;
+       }
+
+       IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
+                                       decrypt_in, decrypt_out);
+
+       return decrypt_out;
+}
+
+static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
+                                       struct ieee80211_hdr *hdr,
+                                       u16 len,
+                                       u32 ampdu_status,
+                                       struct iwl_rx_mem_buffer *rxb,
+                                       struct ieee80211_rx_status *stats)
+{
+       struct sk_buff *skb;
+       __le16 fc = hdr->frame_control;
+
+       /* We only process data packets if the interface is open */
+       if (unlikely(!priv->is_open)) {
+               IWL_DEBUG_DROP_LIMIT(priv,
+                   "Dropping packet while interface is not open.\n");
+               return;
+       }
+
+       /* In case of HW accelerated crypto and bad decryption, drop */
+       if (!priv->cfg->mod_params->sw_crypto &&
+           iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+               return;
+
+       skb = dev_alloc_skb(128);
+       if (!skb) {
+               IWL_ERR(priv, "dev_alloc_skb failed\n");
+               return;
+       }
+
+       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+       iwl_legacy_update_stats(priv, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(priv->hw, skb);
+       priv->alloc_rxb_page--;
+       rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct ieee80211_hdr *header;
+       struct ieee80211_rx_status rx_status;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_rx_phy_res *phy_res;
+       __le32 rx_pkt_status;
+       struct iwl_rx_mpdu_res_start *amsdu;
+       u32 len;
+       u32 ampdu_status;
+       u32 rate_n_flags;
+
+       /**
+        * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+        *      REPLY_RX: physical layer info is in this buffer
+        *      REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+        *              command and cached in priv->last_phy_res
+        *
+        * Here we set up local variables depending on which command is
+        * received.
+        */
+       if (pkt->hdr.cmd == REPLY_RX) {
+               phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+                               + phy_res->cfg_phy_cnt);
+
+               len = le16_to_cpu(phy_res->byte_count);
+               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+                               phy_res->cfg_phy_cnt + len);
+               ampdu_status = le32_to_cpu(rx_pkt_status);
+       } else {
+               if (!priv->_4965.last_phy_res_valid) {
+                       IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+                       return;
+               }
+               phy_res = &priv->_4965.last_phy_res;
+               amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+               len = le16_to_cpu(amsdu->byte_count);
+               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+               ampdu_status = iwl4965_translate_rx_status(priv,
+                               le32_to_cpu(rx_pkt_status));
+       }
+
+       if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+                               phy_res->cfg_phy_cnt);
+               return;
+       }
+
+       if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+           !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+                               le32_to_cpu(rx_pkt_status));
+               return;
+       }
+
+       /* This will be used in several places later */
+       rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+       /* rx_status carries information about the packet to mac80211 */
+       rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+                                                       rx_status.band);
+       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.rate_idx =
+               iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+       rx_status.flag = 0;
+
+       /* TSF isn't reliable. In order to allow smooth user experience,
+        * this W/A doesn't propagate it to the mac80211 */
+       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+       priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+       rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
+
+       iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
+       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+               rx_status.signal, (unsigned long long)rx_status.mactime);
+
+       /*
+        * "antenna number"
+        *
+        * It seems that the antenna field in the phy flags value
+        * is actually a bit field. This is undefined by radiotap,
+        * it wants an actual antenna number but I always get "7"
+        * for most legacy frames I receive indicating that the
+        * same frame was received on all three RX chains.
+        *
+        * I think this field should be removed in favor of a
+        * new 802.11n radiotap field "RX chains" that is defined
+        * as a bitmask.
+        */
+       rx_status.antenna =
+               (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+       /* set the preamble flag if appropriate */
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       /* Set up the HT phy flags */
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               rx_status.flag |= RX_FLAG_HT;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               rx_status.flag |= RX_FLAG_40MHZ;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               rx_status.flag |= RX_FLAG_SHORT_GI;
+
+       iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+                                   rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+                           struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       priv->_4965.last_phy_res_valid = true;
+       memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
+              sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
+                                          struct ieee80211_vif *vif,
+                                          enum ieee80211_band band,
+                                          struct iwl_scan_channel *scan_ch)
+{
+       const struct ieee80211_supported_band *sband;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added = 0;
+       u16 channel = 0;
+
+       sband = iwl_get_hw_mode(priv, band);
+       if (!sband) {
+               IWL_ERR(priv, "invalid band\n");
+               return added;
+       }
+
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       channel = iwl_legacy_get_single_channel_number(priv, band);
+       if (channel) {
+               scan_ch->channel = cpu_to_le16(channel);
+               scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+               /* Set txpower levels to defaults */
+               scan_ch->dsp_atten = 110;
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+               added++;
+       } else
+               IWL_ERR(priv, "no valid channel found\n");
+       return added;
+}
+
+static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
+                                    struct ieee80211_vif *vif,
+                                    enum ieee80211_band band,
+                                    u8 is_active, u8 n_probes,
+                                    struct iwl_scan_channel *scan_ch)
+{
+       struct ieee80211_channel *chan;
+       const struct ieee80211_supported_band *sband;
+       const struct iwl_channel_info *ch_info;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added, i;
+       u16 channel;
+
+       sband = iwl_get_hw_mode(priv, band);
+       if (!sband)
+               return 0;
+
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+               chan = priv->scan_request->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               channel = chan->hw_value;
+               scan_ch->channel = cpu_to_le16(channel);
+
+               ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+               if (!iwl_legacy_is_channel_valid(ch_info)) {
+                       IWL_DEBUG_SCAN(priv,
+                                "Channel %d is INVALID for this band.\n",
+                                       channel);
+                       continue;
+               }
+
+               if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
+                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+                       scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+               else
+                       scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+               if (n_probes)
+                       scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+               /* Set txpower levels to defaults */
+               scan_ch->dsp_atten = 110;
+
+               /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+                * power level:
+                * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+                */
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+               IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+                              channel, le32_to_cpu(scan_ch->type),
+                              (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+                               "ACTIVE" : "PASSIVE",
+                              (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+                              active_dwell : passive_dwell);
+
+               scan_ch++;
+               added++;
+       }
+
+       IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+       return added;
+}
+
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_SCAN_CMD,
+               .len = sizeof(struct iwl_scan_cmd),
+               .flags = CMD_SIZE_HUGE,
+       };
+       struct iwl_scan_cmd *scan;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       u32 rate_flags = 0;
+       u16 cmd_len;
+       u16 rx_chain = 0;
+       enum ieee80211_band band;
+       u8 n_probes = 0;
+       u8 rx_ant = priv->hw_params.valid_rx_ant;
+       u8 rate;
+       bool is_active = false;
+       int  chan_mod;
+       u8 active_chains;
+       u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (vif)
+               ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       if (!priv->scan_cmd) {
+               priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+                                        IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+               if (!priv->scan_cmd) {
+                       IWL_DEBUG_SCAN(priv,
+                                      "fail to allocate memory for scan\n");
+                       return -ENOMEM;
+               }
+       }
+       scan = priv->scan_cmd;
+       memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+       scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+       scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+       if (iwl_legacy_is_any_associated(priv)) {
+               u16 interval = 0;
+               u32 extra;
+               u32 suspend_time = 100;
+               u32 scan_suspend_time = 100;
+
+               IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+               if (priv->is_internal_short_scan)
+                       interval = 0;
+               else
+                       interval = vif->bss_conf.beacon_int;
+
+               scan->suspend_time = 0;
+               scan->max_out_time = cpu_to_le32(200 * 1024);
+               if (!interval)
+                       interval = suspend_time;
+
+               extra = (suspend_time / interval) << 22;
+               scan_suspend_time = (extra |
+                   ((suspend_time % interval) * 1024));
+               scan->suspend_time = cpu_to_le32(scan_suspend_time);
+               IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+                              scan_suspend_time, interval);
+       }
+
+       if (priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+       } else if (priv->scan_request->n_ssids) {
+               int i, p = 0;
+               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+               for (i = 0; i < priv->scan_request->n_ssids; i++) {
+                       /* always does wildcard anyway */
+                       if (!priv->scan_request->ssids[i].ssid_len)
+                               continue;
+                       scan->direct_scan[p].id = WLAN_EID_SSID;
+                       scan->direct_scan[p].len =
+                               priv->scan_request->ssids[i].ssid_len;
+                       memcpy(scan->direct_scan[p].ssid,
+                              priv->scan_request->ssids[i].ssid,
+                              priv->scan_request->ssids[i].ssid_len);
+                       n_probes++;
+                       p++;
+               }
+               is_active = true;
+       } else
+               IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+       scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       switch (priv->scan_band) {
+       case IEEE80211_BAND_2GHZ:
+               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+               chan_mod = le32_to_cpu(
+                       priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+                                               RXON_FLG_CHANNEL_MODE_MSK)
+                                      >> RXON_FLG_CHANNEL_MODE_POS;
+               if (chan_mod == CHANNEL_MODE_PURE_40) {
+                       rate = IWL_RATE_6M_PLCP;
+               } else {
+                       rate = IWL_RATE_1M_PLCP;
+                       rate_flags = RATE_MCS_CCK_MSK;
+               }
+               break;
+       case IEEE80211_BAND_5GHZ:
+               rate = IWL_RATE_6M_PLCP;
+               break;
+       default:
+               IWL_WARN(priv, "Invalid scan band\n");
+               return -EIO;
+       }
+
+       /*
+        * If active scanning is requested but a certain channel is
+        * marked passive, we can do active scanning if we detect
+        * transmissions.
+        *
+        * There is an issue with some firmware versions that triggers
+        * a sysassert on a "good CRC threshold" of zero (== disabled),
+        * on a radar channel even though this means that we should NOT
+        * send probes.
+        *
+        * The "good CRC threshold" is the number of frames that we
+        * need to receive during our dwell time on a channel before
+        * sending out probes -- setting this to a huge value will
+        * mean we never reach it, but at the same time work around
+        * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+        * here instead of IWL_GOOD_CRC_TH_DISABLED.
+        */
+       scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+                                       IWL_GOOD_CRC_TH_NEVER;
+
+       band = priv->scan_band;
+
+       if (priv->cfg->scan_rx_antennas[band])
+               rx_ant = priv->cfg->scan_rx_antennas[band];
+
+       if (priv->cfg->scan_tx_antennas[band])
+               scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
+
+       priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
+                                               priv->scan_tx_ant[band],
+                                                   scan_tx_antennas);
+       rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
+       scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
+
+       /* In power save mode use one chain, otherwise use all chains */
+       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+               /* rx_ant has been set to all valid chains previously */
+               active_chains = rx_ant &
+                               ((u8)(priv->chain_noise_data.active_chains));
+               if (!active_chains)
+                       active_chains = rx_ant;
+
+               IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+                               priv->chain_noise_data.active_chains);
+
+               rx_ant = iwl4965_first_antenna(active_chains);
+       }
+
+       /* MIMO is not used here, but value is required */
+       rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+       rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+       scan->rx_chain = cpu_to_le16(rx_chain);
+       if (!priv->is_internal_short_scan) {
+               cmd_len = iwl_legacy_fill_probe_req(priv,
+                                       (struct ieee80211_mgmt *)scan->data,
+                                       vif->addr,
+                                       priv->scan_request->ie,
+                                       priv->scan_request->ie_len,
+                                       IWL_MAX_SCAN_SIZE - sizeof(*scan));
+       } else {
+               /* use bcast addr, will not be transmitted but must be valid */
+               cmd_len = iwl_legacy_fill_probe_req(priv,
+                                       (struct ieee80211_mgmt *)scan->data,
+                                       iwlegacy_bcast_addr, NULL, 0,
+                                       IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+       }
+       scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+       scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+                              RXON_FILTER_BCON_AWARE_MSK);
+
+       if (priv->is_internal_short_scan) {
+               scan->channel_count =
+                       iwl4965_get_single_channel_for_scan(priv, vif, band,
+                               (void *)&scan->data[le16_to_cpu(
+                               scan->tx_cmd.len)]);
+       } else {
+               scan->channel_count =
+                       iwl4965_get_channels_for_scan(priv, vif, band,
+                               is_active, n_probes,
+                               (void *)&scan->data[le16_to_cpu(
+                               scan->tx_cmd.len)]);
+       }
+       if (scan->channel_count == 0) {
+               IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+               return -EIO;
+       }
+
+       cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+           scan->channel_count * sizeof(struct iwl_scan_channel);
+       cmd.data = scan;
+       scan->len = cpu_to_le16(cmd.len);
+
+       set_bit(STATUS_SCAN_HW, &priv->status);
+
+       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (ret)
+               clear_bit(STATUS_SCAN_HW, &priv->status);
+
+       return ret;
+}
+
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+                              struct ieee80211_vif *vif, bool add)
+{
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       if (add)
+               return iwl4965_add_bssid_station(priv, vif_priv->ctx,
+                                               vif->bss_conf.bssid,
+                                               &vif_priv->ibss_bssid_sta_id);
+       return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+                                 vif->bss_conf.bssid);
+}
+
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+                           int sta_id, int tid, int freed)
+{
+       lockdep_assert_held(&priv->sta_lock);
+
+       if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+               priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+       else {
+               IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+                       priv->stations[sta_id].tid[tid].tfds_in_queue,
+                       freed);
+               priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+       }
+}
+
+#define IWL_TX_QUEUE_MSK       0xfffff
+
+static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
+{
+       return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+              priv->current_ht_config.single_chain_sufficient;
+}
+
+#define IWL_NUM_RX_CHAINS_MULTIPLE     3
+#define IWL_NUM_RX_CHAINS_SINGLE       2
+#define IWL_NUM_IDLE_CHAINS_DUAL       2
+#define IWL_NUM_IDLE_CHAINS_SINGLE     1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
+{
+       /* # of Rx chains to use when expecting MIMO. */
+       if (iwl4965_is_single_rx_stream(priv))
+               return IWL_NUM_RX_CHAINS_SINGLE;
+       else
+               return IWL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+{
+       /* # Rx chains when idling, depending on SMPS mode */
+       switch (priv->current_ht_config.smps) {
+       case IEEE80211_SMPS_STATIC:
+       case IEEE80211_SMPS_DYNAMIC:
+               return IWL_NUM_IDLE_CHAINS_SINGLE;
+       case IEEE80211_SMPS_OFF:
+               return active_cnt;
+       default:
+               WARN(1, "invalid SMPS mode %d",
+                    priv->current_ht_config.smps);
+               return active_cnt;
+       }
+}
+
+/* up to 4 chains */
+static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
+{
+       u8 res;
+       res = (chain_bitmap & BIT(0)) >> 0;
+       res += (chain_bitmap & BIT(1)) >> 1;
+       res += (chain_bitmap & BIT(2)) >> 2;
+       res += (chain_bitmap & BIT(3)) >> 3;
+       return res;
+}
+
+/**
+ * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       bool is_single = iwl4965_is_single_rx_stream(priv);
+       bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+       u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+       u32 active_chains;
+       u16 rx_chain;
+
+       /* Tell uCode which antennas are actually connected.
+        * Before first association, we assume all antennas are connected.
+        * Just after first association, iwl4965_chain_noise_calibration()
+        *    checks which antennas actually *are* connected. */
+       if (priv->chain_noise_data.active_chains)
+               active_chains = priv->chain_noise_data.active_chains;
+       else
+               active_chains = priv->hw_params.valid_rx_ant;
+
+       rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+       /* How many receivers should we use? */
+       active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
+       idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
+
+
+       /* correct rx chain count according hw settings
+        * and chain noise calibration
+        */
+       valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
+       if (valid_rx_cnt < active_rx_cnt)
+               active_rx_cnt = valid_rx_cnt;
+
+       if (valid_rx_cnt < idle_rx_cnt)
+               idle_rx_cnt = valid_rx_cnt;
+
+       rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+       rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
+
+       ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+       if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
+               ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+       else
+               ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+       IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
+                       ctx->staging.rx_chain,
+                       active_rx_cnt, idle_rx_cnt);
+
+       WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+               active_rx_cnt < idle_rx_cnt);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
+{
+       int i;
+       u8 ind = ant;
+
+       for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+               ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
+               if (valid & BIT(ind))
+                       return ind;
+       }
+       return ant;
+}
+
+static const char *iwl4965_get_fh_string(int cmd)
+{
+       switch (cmd) {
+       IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+       IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+       IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+       IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+       IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+       IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+       IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+       IWL_CMD(FH_TSSR_TX_STATUS_REG);
+       IWL_CMD(FH_TSSR_TX_ERROR_REG);
+       default:
+               return "UNKNOWN";
+       }
+}
+
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+       int i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       int pos = 0;
+       size_t bufsz = 0;
+#endif
+       static const u32 fh_tbl[] = {
+               FH_RSCSR_CHNL0_STTS_WPTR_REG,
+               FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+               FH_RSCSR_CHNL0_WPTR,
+               FH_MEM_RCSR_CHNL0_CONFIG_REG,
+               FH_MEM_RSSR_SHARED_CTRL_REG,
+               FH_MEM_RSSR_RX_STATUS_REG,
+               FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+               FH_TSSR_TX_STATUS_REG,
+               FH_TSSR_TX_ERROR_REG
+       };
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (display) {
+               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+               pos += scnprintf(*buf + pos, bufsz - pos,
+                               "FH register values:\n");
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+                       pos += scnprintf(*buf + pos, bufsz - pos,
+                               "  %34s: 0X%08x\n",
+                               iwl4965_get_fh_string(fh_tbl[i]),
+                               iwl_legacy_read_direct32(priv, fh_tbl[i]));
+               }
+               return pos;
+       }
+#endif
+       IWL_ERR(priv, "FH register values:\n");
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+               IWL_ERR(priv, "  %34s: 0X%08x\n",
+                       iwl4965_get_fh_string(fh_tbl[i]),
+                       iwl_legacy_read_direct32(priv, fh_tbl[i]));
+       }
+       return 0;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644 (file)
index 0000000..31ac672
--- /dev/null
@@ -0,0 +1,2870 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "iwl-dev.h"
+#include "iwl-sta.h"
+#include "iwl-core.h"
+#include "iwl-4965.h"
+
+#define IWL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY      1
+#define IWL_HT_NUMBER_TRY   3
+
+#define IWL_RATE_MAX_WINDOW            62      /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH                6       /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH                8       /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX            15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX,
+       IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+       IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+       IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+       IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+       /*ANT_NONE -> */ ANT_NONE,
+       /*ANT_A    -> */ ANT_B,
+       /*ANT_B    -> */ ANT_C,
+       /*ANT_AB   -> */ ANT_BC,
+       /*ANT_C    -> */ ANT_A,
+       /*ANT_AC   -> */ ANT_AB,
+       /*ANT_BC   -> */ ANT_AC,
+       /*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
+                                   IWL_RATE_SISO_##s##M_PLCP, \
+                                   IWL_RATE_MIMO2_##s##M_PLCP,\
+                                   IWL_RATE_##r##M_IEEE,      \
+                                   IWL_RATE_##ip##M_INDEX,    \
+                                   IWL_RATE_##in##M_INDEX,    \
+                                   IWL_RATE_##rp##M_INDEX,    \
+                                   IWL_RATE_##rn##M_INDEX,    \
+                                   IWL_RATE_##pp##M_INDEX,    \
+                                   IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
+       IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
+       IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
+       IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
+       IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
+       IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
+       IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
+       IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
+       IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
+       IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
+       IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
+       IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
+       IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+       IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+       int idx = 0;
+
+       /* HT rate format */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = (rate_n_flags & 0xff);
+
+               if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+                       idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+               idx += IWL_FIRST_OFDM_RATE;
+               /* skip 9M not supported in ht*/
+               if (idx >= IWL_RATE_9M_INDEX)
+                       idx += 1;
+               if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+                       return idx;
+
+       /* legacy rate format, search for match in table */
+       } else {
+               for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
+                       if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+                               return idx;
+       }
+
+       return -1;
+}
+
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+                                  struct sk_buff *skb,
+                                  struct ieee80211_sta *sta,
+                                  struct iwl_lq_sta *lq_sta);
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
+                                       bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index);
+#else
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *     1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
+       {0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
+       {0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
+       {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+       {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+       {0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+       {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+       {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+       {0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+       {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+       {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+       {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+       {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+       {  "1", "BPSK DSSS"},
+       {  "2", "QPSK DSSS"},
+       {"5.5", "BPSK CCK"},
+       { "11", "QPSK CCK"},
+       {  "6", "BPSK 1/2"},
+       {  "9", "BPSK 1/2"},
+       { "12", "QPSK 1/2"},
+       { "18", "QPSK 3/4"},
+       { "24", "16QAM 1/2"},
+       { "36", "16QAM 3/4"},
+       { "48", "64QAM 2/3"},
+       { "54", "64QAM 3/4"},
+       { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM   (8)
+
+static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
+{
+       return (u8)(rate_n_flags & 0xFF);
+}
+
+static void
+iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+       window->data = 0;
+       window->success_counter = 0;
+       window->success_ratio = IWL_INVALID_VALUE;
+       window->counter = 0;
+       window->average_tpt = IWL_INVALID_VALUE;
+       window->stamp = 0;
+}
+
+static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+       return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *     removes the old data from the statistics. All data that is older than
+ *     TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+       /* The oldest age we want to keep */
+       u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+       while (tl->queue_count &&
+              (tl->time_stamp < oldest_time)) {
+               tl->total -= tl->packet_count[tl->head];
+               tl->packet_count[tl->head] = 0;
+               tl->time_stamp += TID_QUEUE_CELL_SPACING;
+               tl->queue_count--;
+               tl->head++;
+               if (tl->head >= TID_QUEUE_MAX_SIZE)
+                       tl->head = 0;
+       }
+}
+
+/*
+ *     increment traffic load value for tid and also remove
+ *     any old values if passed the certain time period
+ */
+static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+                          struct ieee80211_hdr *hdr)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 index;
+       struct iwl_traffic_load *tl = NULL;
+       u8 tid;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       } else
+               return MAX_TID_COUNT;
+
+       if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+               return MAX_TID_COUNT;
+
+       tl = &lq_data->load[tid];
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       /* Happens only for the first packet. Initialize the data */
+       if (!(tl->queue_count)) {
+               tl->total = 1;
+               tl->time_stamp = curr_time;
+               tl->queue_count = 1;
+               tl->head = 0;
+               tl->packet_count[0] = 1;
+               return MAX_TID_COUNT;
+       }
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       index = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (index >= TID_QUEUE_MAX_SIZE)
+               iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+       index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+       tl->packet_count[index] = tl->packet_count[index] + 1;
+       tl->total = tl->total + 1;
+
+       if ((index + 1) > tl->queue_count)
+               tl->queue_count = index + 1;
+
+       return tid;
+}
+
+/*
+       get the traffic load value for tid
+*/
+static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 index;
+       struct iwl_traffic_load *tl = NULL;
+
+       if (tid >= TID_MAX_LOAD_COUNT)
+               return 0;
+
+       tl = &(lq_data->load[tid]);
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       if (!(tl->queue_count))
+               return 0;
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       index = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (index >= TID_QUEUE_MAX_SIZE)
+               iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+       return tl->total;
+}
+
+static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+                                     struct iwl_lq_sta *lq_data, u8 tid,
+                                     struct ieee80211_sta *sta)
+{
+       int ret = -EAGAIN;
+       u32 load;
+
+       load = iwl4965_rs_tl_get_load(lq_data, tid);
+
+       if (load > IWL_AGG_LOAD_THRESHOLD) {
+               IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
+                               sta->addr, tid);
+               ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+               if (ret == -EAGAIN) {
+                       /*
+                        * driver and mac80211 is out of sync
+                        * this might be cause by reloading firmware
+                        * stop the tx ba session here
+                        */
+                       IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
+                               tid);
+                       ieee80211_stop_tx_ba_session(sta, tid);
+               }
+       } else {
+               IWL_ERR(priv, "Aggregation not enabled for tid %d "
+                       "because load = %u\n", tid, load);
+       }
+       return ret;
+}
+
+static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
+                             struct iwl_lq_sta *lq_data,
+                             struct ieee80211_sta *sta)
+{
+       if (tid < TID_MAX_LOAD_COUNT)
+               iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+       else
+               IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
+                       tid, TID_MAX_LOAD_COUNT);
+}
+
+static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+       return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+              !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+              !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+       if (tbl->expected_tpt)
+               return tbl->expected_tpt[rs_index];
+       return 0;
+}
+
+/**
+ * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate.  window->data contains the bitmask of successful
+ * packets.
+ */
+static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes)
+{
+       struct iwl_rate_scale_data *window = NULL;
+       static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+       s32 fail_count, tpt;
+
+       if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+               return -EINVAL;
+
+       /* Select window for current tx bit rate */
+       window = &(tbl->win[scale_index]);
+
+       /* Get expected throughput */
+       tpt = iwl4965_get_expected_tpt(tbl, scale_index);
+
+       /*
+        * Keep track of only the latest 62 tx frame attempts in this rate's
+        * history window; anything older isn't really relevant any more.
+        * If we have filled up the sliding window, drop the oldest attempt;
+        * if the oldest attempt (highest bit in bitmap) shows "success",
+        * subtract "1" from the success counter (this is the main reason
+        * we keep these bitmaps!).
+        */
+       while (attempts > 0) {
+               if (window->counter >= IWL_RATE_MAX_WINDOW) {
+
+                       /* remove earliest */
+                       window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+                       if (window->data & mask) {
+                               window->data &= ~mask;
+                               window->success_counter--;
+                       }
+               }
+
+               /* Increment frames-attempted counter */
+               window->counter++;
+
+               /* Shift bitmap by one frame to throw away oldest history */
+               window->data <<= 1;
+
+               /* Mark the most recent #successes attempts as successful */
+               if (successes > 0) {
+                       window->success_counter++;
+                       window->data |= 0x1;
+                       successes--;
+               }
+
+               attempts--;
+       }
+
+       /* Calculate current success ratio, avoid divide-by-0! */
+       if (window->counter > 0)
+               window->success_ratio = 128 * (100 * window->success_counter)
+                                       / window->counter;
+       else
+               window->success_ratio = IWL_INVALID_VALUE;
+
+       fail_count = window->counter - window->success_counter;
+
+       /* Calculate average throughput, if we have enough history. */
+       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+               window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+       else
+               window->average_tpt = IWL_INVALID_VALUE;
+
+       /* Tag this window as having been updated */
+       window->stamp = jiffies;
+
+       return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
+                                struct iwl_scale_tbl_info *tbl,
+                                int index, u8 use_green)
+{
+       u32 rate_n_flags = 0;
+
+       if (is_legacy(tbl->lq_type)) {
+               rate_n_flags = iwlegacy_rates[index].plcp;
+               if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+                       rate_n_flags |= RATE_MCS_CCK_MSK;
+
+       } else if (is_Ht(tbl->lq_type)) {
+               if (index > IWL_LAST_OFDM_RATE) {
+                       IWL_ERR(priv, "Invalid HT rate index %d\n", index);
+                       index = IWL_LAST_OFDM_RATE;
+               }
+               rate_n_flags = RATE_MCS_HT_MSK;
+
+               if (is_siso(tbl->lq_type))
+                       rate_n_flags |= iwlegacy_rates[index].plcp_siso;
+               else
+                       rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
+       } else {
+               IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+       }
+
+       rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+                                                    RATE_MCS_ANT_ABC_MSK);
+
+       if (is_Ht(tbl->lq_type)) {
+               if (tbl->is_ht40) {
+                       if (tbl->is_dup)
+                               rate_n_flags |= RATE_MCS_DUP_MSK;
+                       else
+                               rate_n_flags |= RATE_MCS_HT40_MSK;
+               }
+               if (tbl->is_SGI)
+                       rate_n_flags |= RATE_MCS_SGI_MSK;
+
+               if (use_green) {
+                       rate_n_flags |= RATE_MCS_GF_MSK;
+                       if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+                               rate_n_flags &= ~RATE_MCS_SGI_MSK;
+                               IWL_ERR(priv, "GF was set with SGI:SISO\n");
+                       }
+               }
+       }
+       return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+                                   enum ieee80211_band band,
+                                   struct iwl_scale_tbl_info *tbl,
+                                   int *rate_idx)
+{
+       u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+       u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
+       u8 mcs;
+
+       memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+       *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
+
+       if (*rate_idx  == IWL_RATE_INVALID) {
+               *rate_idx = -1;
+               return -EINVAL;
+       }
+       tbl->is_SGI = 0;        /* default legacy setup */
+       tbl->is_ht40 = 0;
+       tbl->is_dup = 0;
+       tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+       tbl->lq_type = LQ_NONE;
+       tbl->max_search = IWL_MAX_SEARCH;
+
+       /* legacy rate format */
+       if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+               if (iwl4965_num_of_ant == 1) {
+                       if (band == IEEE80211_BAND_5GHZ)
+                               tbl->lq_type = LQ_A;
+                       else
+                               tbl->lq_type = LQ_G;
+               }
+       /* HT rate format */
+       } else {
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       tbl->is_SGI = 1;
+
+               if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+                   (rate_n_flags & RATE_MCS_DUP_MSK))
+                       tbl->is_ht40 = 1;
+
+               if (rate_n_flags & RATE_MCS_DUP_MSK)
+                       tbl->is_dup = 1;
+
+               mcs = iwl4965_rs_extract_rate(rate_n_flags);
+
+               /* SISO */
+               if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+                       if (iwl4965_num_of_ant == 1)
+                               tbl->lq_type = LQ_SISO; /*else NONE*/
+               /* MIMO2 */
+               } else {
+                       if (iwl4965_num_of_ant == 2)
+                               tbl->lq_type = LQ_MIMO2;
+               }
+       }
+       return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+                            struct iwl_scale_tbl_info *tbl)
+{
+       u8 new_ant_type;
+
+       if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+               return 0;
+
+       if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+               return 0;
+
+       new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+       while ((new_ant_type != tbl->ant_type) &&
+              !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
+               new_ant_type = ant_toggle_lookup[new_ant_type];
+
+       if (new_ant_type == tbl->ant_type)
+               return 0;
+
+       tbl->ant_type = new_ant_type;
+       *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+       *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+       return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
+{
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+               !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * iwl4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+                                 struct ieee80211_hdr *hdr,
+                                 enum iwl_table_type rate_type)
+{
+       if (is_legacy(rate_type)) {
+               return lq_sta->active_legacy_rate;
+       } else {
+               if (is_siso(rate_type))
+                       return lq_sta->active_siso_rate;
+               else
+                       return lq_sta->active_mimo2_rate;
+       }
+}
+
+static u16
+iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
+                               int rate_type)
+{
+       u8 high = IWL_RATE_INVALID;
+       u8 low = IWL_RATE_INVALID;
+
+       /* 802.11A or ht walks to the next literal adjacent rate in
+        * the rate table */
+       if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+               int i;
+               u32 mask;
+
+               /* Find the previous rate that is in the rate mask */
+               i = index - 1;
+               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+                       if (rate_mask & mask) {
+                               low = i;
+                               break;
+                       }
+               }
+
+               /* Find the next rate that is in the rate mask */
+               i = index + 1;
+               for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+                       if (rate_mask & mask) {
+                               high = i;
+                               break;
+                       }
+               }
+
+               return (high << 8) | low;
+       }
+
+       low = index;
+       while (low != IWL_RATE_INVALID) {
+               low = iwlegacy_rates[low].prev_rs;
+               if (low == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << low))
+                       break;
+               IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
+       }
+
+       high = index;
+       while (high != IWL_RATE_INVALID) {
+               high = iwlegacy_rates[high].next_rs;
+               if (high == IWL_RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << high))
+                       break;
+               IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
+       }
+
+       return (high << 8) | low;
+}
+
+static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+                            struct iwl_scale_tbl_info *tbl,
+                            u8 scale_index, u8 ht_possible)
+{
+       s32 low;
+       u16 rate_mask;
+       u16 high_low;
+       u8 switch_to_legacy = 0;
+       u8 is_green = lq_sta->is_green;
+       struct iwl_priv *priv = lq_sta->drv;
+
+       /* check if we need to switch from HT to legacy rates.
+        * assumption is that mandatory rates (1Mbps or 6Mbps)
+        * are always supported (spec demand) */
+       if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+               switch_to_legacy = 1;
+               scale_index = rs_ht_to_legacy[scale_index];
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       tbl->lq_type = LQ_A;
+               else
+                       tbl->lq_type = LQ_G;
+
+               if (iwl4965_num_of_ant(tbl->ant_type) > 1)
+                       tbl->ant_type =
+                               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+               tbl->is_ht40 = 0;
+               tbl->is_SGI = 0;
+               tbl->max_search = IWL_MAX_SEARCH;
+       }
+
+       rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+       /* Mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               /* supp_rates has no CCK bits in A mode */
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       rate_mask  = (u16)(rate_mask &
+                          (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+               else
+                       rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+       }
+
+       /* If we switched from HT to legacy, check current rate */
+       if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+               low = scale_index;
+               goto out;
+       }
+
+       high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
+                                       scale_index, rate_mask,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+
+       if (low == IWL_RATE_INVALID)
+               low = scale_index;
+
+out:
+       return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
+                              struct iwl_scale_tbl_info *b)
+{
+       return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+               (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta,
+                        struct sk_buff *skb)
+{
+       int legacy_success;
+       int retries;
+       int rs_index, mac_index, i;
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       struct iwl_link_quality_cmd *table;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       enum mac80211_rate_control_flags mac_flags;
+       u32 tx_rate;
+       struct iwl_scale_tbl_info tbl_type;
+       struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       IWL_DEBUG_RATE_LIMIT(priv,
+               "get frame ack response, update rate scale window\n");
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (!lq_sta) {
+               IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+               return;
+       } else if (!lq_sta->drv) {
+               IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+               return;
+       }
+
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
+       /* This packet was aggregated but doesn't carry status info */
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+           !(info->flags & IEEE80211_TX_STAT_AMPDU))
+               return;
+
+       /*
+        * Ignore this Tx frame response if its initial rate doesn't match
+        * that of latest Link Quality command.  There may be stragglers
+        * from a previous Link Quality command, but we're no longer interested
+        * in those; they're either from the "active" mode while we're trying
+        * to check "search" mode, or a prior "search" mode after we've moved
+        * to a new "search" mode (which might become the new "active" mode).
+        */
+       table = &lq_sta->lq;
+       tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+       iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
+                        priv->band, &tbl_type, &rs_index);
+       if (priv->band == IEEE80211_BAND_5GHZ)
+               rs_index -= IWL_FIRST_OFDM_RATE;
+       mac_flags = info->status.rates[0].flags;
+       mac_index = info->status.rates[0].idx;
+       /* For HT packets, map MCS to PLCP */
+       if (mac_flags & IEEE80211_TX_RC_MCS) {
+               mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+                       mac_index++;
+               /*
+                * mac80211 HT index is always zero-indexed; we need to move
+                * HT OFDM rates after CCK rates in 2.4 GHz band
+                */
+               if (priv->band == IEEE80211_BAND_2GHZ)
+                       mac_index += IWL_FIRST_OFDM_RATE;
+       }
+       /* Here we actually compare this rate to the latest LQ command */
+       if ((mac_index < 0) ||
+           (tbl_type.is_SGI !=
+                       !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+           (tbl_type.is_ht40 !=
+                       !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+           (tbl_type.is_dup !=
+                       !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
+           (tbl_type.ant_type != info->antenna_sel_tx) ||
+           (!!(tx_rate & RATE_MCS_HT_MSK) !=
+                       !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+           (!!(tx_rate & RATE_MCS_GF_MSK) !=
+                       !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+           (rs_index != mac_index)) {
+               IWL_DEBUG_RATE(priv,
+               "initial rate %d does not match %d (0x%x)\n",
+                        mac_index, rs_index, tx_rate);
+               /*
+                * Since rates mis-match, the last LQ command may have failed.
+                * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+                * ... driver.
+                */
+               lq_sta->missed_rate_counter++;
+               if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+                       lq_sta->missed_rate_counter = 0;
+                       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
+                                                       CMD_ASYNC, false);
+               }
+               /* Regardless, ignore this status info for outdated rate */
+               return;
+       } else
+               /* Rate did match, so reset the missed_rate_counter */
+               lq_sta->missed_rate_counter = 0;
+
+       /* Figure out if rate scale algorithm is in active or search table */
+       if (iwl4965_table_type_matches(&tbl_type,
+                               &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+               curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+       } else if (iwl4965_table_type_matches(&tbl_type,
+                               &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+               curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       } else {
+               IWL_DEBUG_RATE(priv,
+                       "Neither active nor search matches tx rate\n");
+               tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
+                       tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+               tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
+                       tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+               IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
+                       tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
+               /*
+                * no matching table found, let's by-pass the data collection
+                * and continue to perform rate scale to find the rate table
+                */
+               iwl4965_rs_stay_in_table(lq_sta, true);
+               goto done;
+       }
+
+       /*
+        * Updating the frame history depends on whether packets were
+        * aggregated.
+        *
+        * For aggregation, all packets were transmitted at the same rate, the
+        * first index into rate scale table.
+        */
+       if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+               tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+               iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
+                               &rs_index);
+               iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
+                                  info->status.ampdu_len,
+                                  info->status.ampdu_ack_len);
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += info->status.ampdu_ack_len;
+                       lq_sta->total_failed += (info->status.ampdu_len -
+                                       info->status.ampdu_ack_len);
+               }
+       } else {
+       /*
+        * For legacy, update frame history with for each Tx retry.
+        */
+               retries = info->status.rates[0].count - 1;
+               /* HW doesn't send more than 15 retries */
+               retries = min(retries, 15);
+
+               /* The last transmission may have been successful */
+               legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+               /* Collect data for each rate used during failed TX attempts */
+               for (i = 0; i <= retries; ++i) {
+                       tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+                       iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
+                                       &tbl_type, &rs_index);
+                       /*
+                        * Only collect stats if retried rate is in the same RS
+                        * table as active/search.
+                        */
+                       if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
+                               tmp_tbl = curr_tbl;
+                       else if (iwl4965_table_type_matches(&tbl_type,
+                                                                other_tbl))
+                               tmp_tbl = other_tbl;
+                       else
+                               continue;
+                       iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
+                                          i < retries ? 0 : legacy_success);
+               }
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += legacy_success;
+                       lq_sta->total_failed += retries + (1 - legacy_success);
+               }
+       }
+       /* The last TX rate is cached in lq_sta; it's set in if/else above */
+       lq_sta->last_rate_n_flags = tx_rate;
+done:
+       /* See if there's a better rate or modulation mode to try. */
+       if (sta && sta->supp_rates[sband->band])
+               iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
+                                struct iwl_lq_sta *lq_sta)
+{
+       IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
+       lq_sta->stay_in_tbl = 1;        /* only place this gets set */
+       if (is_legacy) {
+               lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+               lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+       } else {
+               lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+               lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+       }
+       lq_sta->table_count = 0;
+       lq_sta->total_failed = 0;
+       lq_sta->total_success = 0;
+       lq_sta->flush_timer = jiffies;
+       lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+                                     struct iwl_scale_tbl_info *tbl)
+{
+       /* Used to choose among HT tables */
+       s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+       /* Check for invalid LQ type */
+       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Legacy rates have only one table */
+       if (is_legacy(tbl->lq_type)) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Choose among many HT tables depending on number of streams
+        * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+        * status */
+       if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+               ht_tbl_pointer = expected_tpt_siso20MHz;
+       else if (is_siso(tbl->lq_type))
+               ht_tbl_pointer = expected_tpt_siso40MHz;
+       else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+               ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+       else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+               ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+       if (!tbl->is_SGI && !lq_sta->is_agg)            /* Normal */
+               tbl->expected_tpt = ht_tbl_pointer[0];
+       else if (tbl->is_SGI && !lq_sta->is_agg)        /* SGI */
+               tbl->expected_tpt = ht_tbl_pointer[1];
+       else if (!tbl->is_SGI && lq_sta->is_agg)        /* AGG */
+               tbl->expected_tpt = ht_tbl_pointer[2];
+       else                                            /* AGG+SGI */
+               tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
+                           struct iwl_lq_sta *lq_sta,
+                           struct iwl_scale_tbl_info *tbl,     /* "search" */
+                           u16 rate_mask, s8 index)
+{
+       /* "active" values */
+       struct iwl_scale_tbl_info *active_tbl =
+           &(lq_sta->lq_info[lq_sta->active_tbl]);
+       s32 active_sr = active_tbl->win[index].success_ratio;
+       s32 active_tpt = active_tbl->expected_tpt[index];
+
+       /* expected "search" throughput */
+       s32 *tpt_tbl = tbl->expected_tpt;
+
+       s32 new_rate, high, low, start_hi;
+       u16 high_low;
+       s8 rate = index;
+
+       new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+       for (; ;) {
+               high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
+                                               tbl->lq_type);
+
+               low = high_low & 0xff;
+               high = (high_low >> 8) & 0xff;
+
+               /*
+                * Lower the "search" bit rate, to give new "search" mode
+                * approximately the same throughput as "active" if:
+                *
+                * 1) "Active" mode has been working modestly well (but not
+                *    great), and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above the actual
+                *    measured "active" throughput (but less than expected
+                *    "active" throughput under perfect conditions).
+                * OR
+                * 2) "Active" mode has been working perfectly or very well
+                *    and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above expected
+                *    "active" throughput (under perfect conditions).
+                */
+               if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+                    ((active_sr > IWL_RATE_DECREASE_TH) &&
+                     (active_sr <= IWL_RATE_HIGH_TH) &&
+                     (tpt_tbl[rate] <= active_tpt))) ||
+                   ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+                    (tpt_tbl[rate] > active_tpt))) {
+
+                       /* (2nd or later pass)
+                        * If we've already tried to raise the rate, and are
+                        * now trying to lower it, use the higher rate. */
+                       if (start_hi != IWL_RATE_INVALID) {
+                               new_rate = start_hi;
+                               break;
+                       }
+
+                       new_rate = rate;
+
+                       /* Loop again with lower rate */
+                       if (low != IWL_RATE_INVALID)
+                               rate = low;
+
+                       /* Lower rate not available, use the original */
+                       else
+                               break;
+
+               /* Else try to raise the "search" rate to match "active" */
+               } else {
+                       /* (2nd or later pass)
+                        * If we've already tried to lower the rate, and are
+                        * now trying to raise it, use the lower rate. */
+                       if (new_rate != IWL_RATE_INVALID)
+                               break;
+
+                       /* Loop again with higher rate */
+                       else if (high != IWL_RATE_INVALID) {
+                               start_hi = high;
+                               rate = high;
+
+                       /* Higher rate not available, use the original */
+                       } else {
+                               new_rate = rate;
+                               break;
+                       }
+               }
+       }
+
+       return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       s32 rate;
+       s8 is_green = lq_sta->is_green;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+               return -1;
+
+       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
+                                               == WLAN_HT_CAP_SM_PS_STATIC)
+               return -1;
+
+       /* Need both Tx chains/antennas to support MIMO */
+       if (priv->hw_params.tx_chains_num < 2)
+               return -1;
+
+       IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
+
+       tbl->lq_type = LQ_MIMO2;
+       tbl->is_dup = lq_sta->is_dup;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_SEARCH;
+       rate_mask = lq_sta->active_mimo2_rate;
+
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+       rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
+                               rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(priv,
+                               "Can't switch with index %d rate mask %x\n",
+                                               rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+                                                tbl, rate, is_green);
+
+       IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+                    tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta,
+                            struct iwl_scale_tbl_info *tbl, int index)
+{
+       u16 rate_mask;
+       u8 is_green = lq_sta->is_green;
+       s32 rate;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+               return -1;
+
+       IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
+
+       tbl->is_dup = lq_sta->is_dup;
+       tbl->lq_type = LQ_SISO;
+       tbl->action = 0;
+       tbl->max_search = IWL_MAX_SEARCH;
+       rate_mask = lq_sta->active_siso_rate;
+
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       if (is_green)
+               tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+       rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+       IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+               IWL_DEBUG_RATE(priv,
+                       "can not switch with index %d rate mask %x\n",
+                            rate, rate_mask);
+               return -1;
+       }
+       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+                                               tbl, rate, is_green);
+       IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+                    tbl->current_rate, is_green);
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
+                               struct iwl_lq_sta *lq_sta,
+                               struct ieee80211_conf *conf,
+                               struct ieee80211_sta *sta,
+                               int index)
+{
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+       int ret = 0;
+       u8 update_search_tbl_counter = 0;
+
+       tbl->action = IWL_LEGACY_SWITCH_SISO;
+
+       start_action = tbl->action;
+       for (; ;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_LEGACY_SWITCH_ANTENNA1:
+               case IWL_LEGACY_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
+
+                       if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+                                                       tx_chains_num <= 1) ||
+                           (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+                                                       tx_chains_num <= 2))
+                               break;
+
+                       /* Don't change antenna if success has been great */
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       /* Set up search table to try other antenna */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+                               &search_tbl->current_rate, search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               iwl4965_rs_set_expected_tpt_table(lq_sta,
+                                                               search_tbl);
+                               goto out;
+                       }
+                       break;
+               case IWL_LEGACY_SWITCH_SISO:
+                       IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
+
+                       /* Set up search table to try SISO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+
+                       break;
+               case IWL_LEGACY_SWITCH_MIMO2_AB:
+               case IWL_LEGACY_SWITCH_MIMO2_AC:
+               case IWL_LEGACY_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
+
+                       /* Set up search table to try MIMO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+                                               search_tbl->ant_type))
+                               break;
+
+                       ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+                                               conf, sta,
+                                                search_tbl, index);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+                       break;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+                       tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+               tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+       return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta, int index)
+{
+       u8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_SISO_SWITCH_ANTENNA1:
+               case IWL_SISO_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
+                       if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+                                               tx_chains_num <= 1) ||
+                           (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+                                               tx_chains_num <= 2))
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                      &search_tbl->current_rate, search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IWL_SISO_SWITCH_MIMO2_AB:
+               case IWL_SISO_SWITCH_MIMO2_AC:
+               case IWL_SISO_SWITCH_MIMO2_BC:
+                       IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+                                                search_tbl->ant_type))
+                               break;
+
+                       ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+                                               conf, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+                       break;
+               case IWL_SISO_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (is_green) {
+                               if (!tbl->is_SGI)
+                                       break;
+                               else
+                                       IWL_ERR(priv,
+                                               "SGI was set in GF+SISO\n");
+                       }
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+               }
+               tbl->action++;
+               if (tbl->action > IWL_SISO_SWITCH_GI)
+                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_SISO_SWITCH_GI)
+               tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta, int index)
+{
+       s8 is_green = lq_sta->is_green;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct iwl_scale_tbl_info *search_tbl =
+                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct iwl_rate_scale_data *window = &(tbl->win[index]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IWL_MIMO2_SWITCH_ANTENNA1:
+               case IWL_MIMO2_SWITCH_ANTENNA2:
+                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
+
+                       if (tx_chains_num <= 2)
+                               break;
+
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                      &search_tbl->current_rate, search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IWL_MIMO2_SWITCH_SISO_A:
+               case IWL_MIMO2_SWITCH_SISO_B:
+               case IWL_MIMO2_SWITCH_SISO_C:
+                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
+
+                       /* Set up new search table for SISO */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+                               search_tbl->ant_type = ANT_A;
+                       else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+                               search_tbl->ant_type = ANT_B;
+                       else
+                               search_tbl->ant_type = ANT_C;
+
+                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+                                               search_tbl->ant_type))
+                               break;
+
+                       ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
+                                               conf, sta,
+                                                search_tbl, index);
+                       if (!ret)
+                               goto out;
+
+                       break;
+
+               case IWL_MIMO2_SWITCH_GI:
+                       if (!tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 && !(ht_cap->cap &
+                                               IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
+
+                       /* Set up new search table for MIMO2 */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       /*
+                        * If active table already uses the fastest possible
+                        * modulation (dual stream with short guard interval),
+                        * and it's working well, there's no need to look
+                        * for a better type of modulation!
+                        */
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[index])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                               iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+                                                     index, is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+
+               }
+               tbl->action++;
+               if (tbl->action > IWL_MIMO2_SWITCH_GI)
+                       tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+ out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IWL_MIMO2_SWITCH_GI)
+               tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+       struct iwl_scale_tbl_info *tbl;
+       int i;
+       int active_tbl;
+       int flush_interval_passed = 0;
+       struct iwl_priv *priv;
+
+       priv = lq_sta->drv;
+       active_tbl = lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       /* If we've been disallowing search, see if we should now allow it */
+       if (lq_sta->stay_in_tbl) {
+
+               /* Elapsed time using current modulation mode */
+               if (lq_sta->flush_timer)
+                       flush_interval_passed =
+                       time_after(jiffies,
+                                       (unsigned long)(lq_sta->flush_timer +
+                                       IWL_RATE_SCALE_FLUSH_INTVL));
+
+               /*
+                * Check if we should allow search for new modulation mode.
+                * If many frames have failed or succeeded, or we've used
+                * this same modulation for a long time, allow search, and
+                * reset history stats that keep track of whether we should
+                * allow a new search.  Also (below) reset all bitmaps and
+                * stats in active history.
+                */
+               if (force_search ||
+                   (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+                   (lq_sta->total_success > lq_sta->max_success_limit) ||
+                   ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
+                    && (flush_interval_passed))) {
+                       IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
+                                    lq_sta->total_failed,
+                                    lq_sta->total_success,
+                                    flush_interval_passed);
+
+                       /* Allow search for new mode */
+                       lq_sta->stay_in_tbl = 0;        /* only place reset */
+                       lq_sta->total_failed = 0;
+                       lq_sta->total_success = 0;
+                       lq_sta->flush_timer = 0;
+
+               /*
+                * Else if we've used this modulation mode enough repetitions
+                * (regardless of elapsed time or success/failure), reset
+                * history bitmaps and rate-specific stats for all rates in
+                * active table.
+                */
+               } else {
+                       lq_sta->table_count++;
+                       if (lq_sta->table_count >=
+                           lq_sta->table_count_limit) {
+                               lq_sta->table_count = 0;
+
+                               IWL_DEBUG_RATE(priv,
+                                       "LQ: stay in table clear win\n");
+                               for (i = 0; i < IWL_RATE_COUNT; i++)
+                                       iwl4965_rs_rate_scale_clear_window(
+                                               &(tbl->win[i]));
+                       }
+               }
+
+               /* If transitioning to allow "search", reset all history
+                * bitmaps and stats in active table (this will become the new
+                * "search" table). */
+               if (!lq_sta->stay_in_tbl) {
+                       for (i = 0; i < IWL_RATE_COUNT; i++)
+                               iwl4965_rs_rate_scale_clear_window(
+                                                       &(tbl->win[i]));
+               }
+       }
+}
+
+/*
+ * setup rate table in uCode
+ * return rate_n_flags as used in the table
+ */
+static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx,
+                               struct iwl_lq_sta *lq_sta,
+                               struct iwl_scale_tbl_info *tbl,
+                               int index, u8 is_green)
+{
+       u32 rate;
+
+       /* Update uCode's rate table. */
+       rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
+       iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
+       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+
+       return rate;
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+                                 struct sk_buff *skb,
+                                 struct ieee80211_sta *sta,
+                                 struct iwl_lq_sta *lq_sta)
+{
+       struct ieee80211_hw *hw = priv->hw;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int low = IWL_RATE_INVALID;
+       int high = IWL_RATE_INVALID;
+       int index;
+       int i;
+       struct iwl_rate_scale_data *window = NULL;
+       int current_tpt = IWL_INVALID_VALUE;
+       int low_tpt = IWL_INVALID_VALUE;
+       int high_tpt = IWL_INVALID_VALUE;
+       u32 fail_count;
+       s8 scale_action = 0;
+       u16 rate_mask;
+       u8 update_lq = 0;
+       struct iwl_scale_tbl_info *tbl, *tbl1;
+       u16 rate_scale_index_msk = 0;
+       u32 rate;
+       u8 is_green = 0;
+       u8 active_tbl = 0;
+       u8 done_search = 0;
+       u16 high_low;
+       s32 sr;
+       u8 tid = MAX_TID_COUNT;
+       struct iwl_tid_data *tid_data;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       /* TODO: this could probably be improved.. */
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
+       if (!sta || !lq_sta)
+               return;
+
+       lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+       tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
+       if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+               tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+               if (tid_data->agg.state == IWL_AGG_OFF)
+                       lq_sta->is_agg = 0;
+               else
+                       lq_sta->is_agg = 1;
+       } else
+               lq_sta->is_agg = 0;
+
+       /*
+        * Select rate-scale / modulation-mode table to work with in
+        * the rest of this function:  "search" if searching for better
+        * modulation mode, or "active" if doing rate scaling within a mode.
+        */
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+       if (is_legacy(tbl->lq_type))
+               lq_sta->is_green = 0;
+       else
+               lq_sta->is_green = iwl4965_rs_use_green(sta);
+       is_green = lq_sta->is_green;
+
+       /* current tx rate */
+       index = lq_sta->last_txrate_idx;
+
+       IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
+                      tbl->lq_type);
+
+       /* rates available for this association, and for modulation mode */
+       rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+       IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
+
+       /* mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       /* supp_rates has no CCK bits in A mode */
+                       rate_scale_index_msk = (u16) (rate_mask &
+                               (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+               else
+                       rate_scale_index_msk = (u16) (rate_mask &
+                                                     lq_sta->supp_rates);
+
+       } else
+               rate_scale_index_msk = rate_mask;
+
+       if (!rate_scale_index_msk)
+               rate_scale_index_msk = rate_mask;
+
+       if (!((1 << index) & rate_scale_index_msk)) {
+               IWL_ERR(priv, "Current Rate is not valid\n");
+               if (lq_sta->search_better_tbl) {
+                       /* revert to active table if search table is not valid*/
+                       tbl->lq_type = LQ_NONE;
+                       lq_sta->search_better_tbl = 0;
+                       tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+                       /* get "active" rate info */
+                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+                       rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+                                                 tbl, index, is_green);
+               }
+               return;
+       }
+
+       /* Get expected throughput table and history window for current rate */
+       if (!tbl->expected_tpt) {
+               IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
+               return;
+       }
+
+       /* force user max rate if set by user */
+       if ((lq_sta->max_rate_idx != -1) &&
+           (lq_sta->max_rate_idx < index)) {
+               index = lq_sta->max_rate_idx;
+               update_lq = 1;
+               window = &(tbl->win[index]);
+               goto lq_update;
+       }
+
+       window = &(tbl->win[index]);
+
+       /*
+        * If there is not enough history to calculate actual average
+        * throughput, keep analyzing results of more tx frames, without
+        * changing rate or mode (bypass most of the rest of this function).
+        * Set up new rate table in uCode only if old rate is not supported
+        * in current association (use new rate found above).
+        */
+       fail_count = window->counter - window->success_counter;
+       if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+                       (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+               IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
+                              "for index %d\n",
+                              window->success_counter, window->counter, index);
+
+               /* Can't calculate this yet; not enough history */
+               window->average_tpt = IWL_INVALID_VALUE;
+
+               /* Should we stay with this modulation mode,
+                * or search for a new one? */
+               iwl4965_rs_stay_in_table(lq_sta, false);
+
+               goto out;
+       }
+       /* Else we have enough samples; calculate estimate of
+        * actual average throughput */
+       if (window->average_tpt != ((window->success_ratio *
+                       tbl->expected_tpt[index] + 64) / 128)) {
+               IWL_ERR(priv,
+                        "expected_tpt should have been calculated by now\n");
+               window->average_tpt = ((window->success_ratio *
+                                       tbl->expected_tpt[index] + 64) / 128);
+       }
+
+       /* If we are searching for better modulation mode, check success. */
+       if (lq_sta->search_better_tbl) {
+               /* If good success, continue using the "search" mode;
+                * no need to send new link quality command, since we're
+                * continuing to use the setup that we've been trying. */
+               if (window->average_tpt > lq_sta->last_tpt) {
+
+                       IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
+                                       "suc=%d cur-tpt=%d old-tpt=%d\n",
+                                       window->success_ratio,
+                                       window->average_tpt,
+                                       lq_sta->last_tpt);
+
+                       if (!is_legacy(tbl->lq_type))
+                               lq_sta->enable_counter = 1;
+
+                       /* Swap tables; "search" becomes "active" */
+                       lq_sta->active_tbl = active_tbl;
+                       current_tpt = window->average_tpt;
+
+               /* Else poor success; go back to mode in "active" table */
+               } else {
+
+                       IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
+                                       "suc=%d cur-tpt=%d old-tpt=%d\n",
+                                       window->success_ratio,
+                                       window->average_tpt,
+                                       lq_sta->last_tpt);
+
+                       /* Nullify "search" table */
+                       tbl->lq_type = LQ_NONE;
+
+                       /* Revert to "active" table */
+                       active_tbl = lq_sta->active_tbl;
+                       tbl = &(lq_sta->lq_info[active_tbl]);
+
+                       /* Revert to "active" rate and throughput info */
+                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+                       current_tpt = lq_sta->last_tpt;
+
+                       /* Need to set up a new rate table in uCode */
+                       update_lq = 1;
+               }
+
+               /* Either way, we've made a decision; modulation mode
+                * search is done, allow rate adjustment next time. */
+               lq_sta->search_better_tbl = 0;
+               done_search = 1;        /* Don't switch modes below! */
+               goto lq_update;
+       }
+
+       /* (Else) not in search of better modulation mode, try for better
+        * starting rate, while staying in this mode. */
+       high_low = iwl4965_rs_get_adjacent_rate(priv, index,
+                                       rate_scale_index_msk,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+       high = (high_low >> 8) & 0xff;
+
+       /* If user set max rate, dont allow higher than user constrain */
+       if ((lq_sta->max_rate_idx != -1) &&
+           (lq_sta->max_rate_idx < high))
+               high = IWL_RATE_INVALID;
+
+       sr = window->success_ratio;
+
+       /* Collect measured throughputs for current and adjacent rates */
+       current_tpt = window->average_tpt;
+       if (low != IWL_RATE_INVALID)
+               low_tpt = tbl->win[low].average_tpt;
+       if (high != IWL_RATE_INVALID)
+               high_tpt = tbl->win[high].average_tpt;
+
+       scale_action = 0;
+
+       /* Too many failures, decrease rate */
+       if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+               IWL_DEBUG_RATE(priv,
+                       "decrease rate because of low success_ratio\n");
+               scale_action = -1;
+
+       /* No throughput measured yet for adjacent rates; try increase. */
+       } else if ((low_tpt == IWL_INVALID_VALUE) &&
+                  (high_tpt == IWL_INVALID_VALUE)) {
+
+               if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+                       scale_action = 1;
+               else if (low != IWL_RATE_INVALID)
+                       scale_action = 0;
+       }
+
+       /* Both adjacent throughputs are measured, but neither one has better
+        * throughput; we're using the best rate, don't change it! */
+       else if ((low_tpt != IWL_INVALID_VALUE) &&
+                (high_tpt != IWL_INVALID_VALUE) &&
+                (low_tpt < current_tpt) &&
+                (high_tpt < current_tpt))
+               scale_action = 0;
+
+       /* At least one adjacent rate's throughput is measured,
+        * and may have better performance. */
+       else {
+               /* Higher adjacent rate's throughput is measured */
+               if (high_tpt != IWL_INVALID_VALUE) {
+                       /* Higher rate has better throughput */
+                       if (high_tpt > current_tpt &&
+                                       sr >= IWL_RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       } else {
+                               scale_action = 0;
+                       }
+
+               /* Lower adjacent rate's throughput is measured */
+               } else if (low_tpt != IWL_INVALID_VALUE) {
+                       /* Lower rate has better throughput */
+                       if (low_tpt > current_tpt) {
+                               IWL_DEBUG_RATE(priv,
+                                   "decrease rate because of low tpt\n");
+                               scale_action = -1;
+                       } else if (sr >= IWL_RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       }
+               }
+       }
+
+       /* Sanity check; asked for decrease, but success rate or throughput
+        * has been good at old rate.  Don't change it. */
+       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+                   ((sr > IWL_RATE_HIGH_TH) ||
+                    (current_tpt > (100 * tbl->expected_tpt[low]))))
+               scale_action = 0;
+
+       switch (scale_action) {
+       case -1:
+               /* Decrease starting rate, update uCode's rate table */
+               if (low != IWL_RATE_INVALID) {
+                       update_lq = 1;
+                       index = low;
+               }
+
+               break;
+       case 1:
+               /* Increase starting rate, update uCode's rate table */
+               if (high != IWL_RATE_INVALID) {
+                       update_lq = 1;
+                       index = high;
+               }
+
+               break;
+       case 0:
+               /* No change */
+       default:
+               break;
+       }
+
+       IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
+                   "high %d type %d\n",
+                    index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+       /* Replace uCode's rate table for the destination station. */
+       if (update_lq)
+               rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+                                         tbl, index, is_green);
+
+       /* Should we stay with this modulation mode,
+        * or search for a new one? */
+        iwl4965_rs_stay_in_table(lq_sta, false);
+
+       /*
+        * Search for new modulation mode if we're:
+        * 1)  Not changing rates right now
+        * 2)  Not just finishing up a search
+        * 3)  Allowing a new search
+        */
+       if (!update_lq && !done_search &&
+               !lq_sta->stay_in_tbl && window->counter) {
+               /* Save current throughput to compare with "search" throughput*/
+               lq_sta->last_tpt = current_tpt;
+
+               /* Select a new "search" modulation mode to try.
+                * If one is found, set up the new "search" table. */
+               if (is_legacy(tbl->lq_type))
+                       iwl4965_rs_move_legacy_other(priv, lq_sta,
+                                                       conf, sta, index);
+               else if (is_siso(tbl->lq_type))
+                       iwl4965_rs_move_siso_to_other(priv, lq_sta,
+                                                       conf, sta, index);
+               else /* (is_mimo2(tbl->lq_type)) */
+                       iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
+                                                       conf, sta, index);
+
+               /* If new "search" mode was selected, set up in uCode table */
+               if (lq_sta->search_better_tbl) {
+                       /* Access the "search" table, clear its history. */
+                       tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+                       for (i = 0; i < IWL_RATE_COUNT; i++)
+                               iwl4965_rs_rate_scale_clear_window(
+                                                       &(tbl->win[i]));
+
+                       /* Use new "search" start rate */
+                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+                       IWL_DEBUG_RATE(priv,
+                               "Switch current  mcs: %X index: %d\n",
+                                    tbl->current_rate, index);
+                       iwl4965_rs_fill_link_cmd(priv, lq_sta,
+                                               tbl->current_rate);
+                       iwl_legacy_send_lq_cmd(priv, ctx,
+                                               &lq_sta->lq, CMD_ASYNC, false);
+               } else
+                       done_search = 1;
+       }
+
+       if (done_search && !lq_sta->stay_in_tbl) {
+               /* If the "active" (non-search) mode was legacy,
+                * and we've tried switching antennas,
+                * but we haven't been able to try HT modes (not available),
+                * stay with best antenna legacy modulation for a while
+                * before next round of mode comparisons. */
+               tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+                   lq_sta->action_counter > tbl1->max_search) {
+                       IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
+                       iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
+               }
+
+               /* If we're in an HT mode, and all 3 mode switch actions
+                * have been tried and compared, stay in this best modulation
+                * mode for a while before next round of mode comparisons. */
+               if (lq_sta->enable_counter &&
+                   (lq_sta->action_counter >= tbl1->max_search)) {
+                       if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+                           (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+                           (tid != MAX_TID_COUNT)) {
+                               tid_data =
+                                  &priv->stations[lq_sta->lq.sta_id].tid[tid];
+                               if (tid_data->agg.state == IWL_AGG_OFF) {
+                                       IWL_DEBUG_RATE(priv,
+                                                      "try to aggregate tid %d\n",
+                                                      tid);
+                                       iwl4965_rs_tl_turn_on_agg(priv, tid,
+                                                         lq_sta, sta);
+                               }
+                       }
+                       iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
+               }
+       }
+
+out:
+       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
+                                                       index, is_green);
+       i = index;
+       lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta,
+                            struct iwl_lq_sta *lq_sta)
+{
+       struct iwl_scale_tbl_info *tbl;
+       int rate_idx;
+       int i;
+       u32 rate;
+       u8 use_green = iwl4965_rs_use_green(sta);
+       u8 active_tbl = 0;
+       u8 valid_tx_ant;
+       struct iwl_station_priv *sta_priv;
+       struct iwl_rxon_context *ctx;
+
+       if (!sta || !lq_sta)
+               return;
+
+       sta_priv = (void *)sta->drv_priv;
+       ctx = sta_priv->common.ctx;
+
+       i = lq_sta->last_txrate_idx;
+
+       valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       if ((i < 0) || (i >= IWL_RATE_COUNT))
+               i = 0;
+
+       rate = iwlegacy_rates[i].plcp;
+       tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
+       rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+       if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+               rate |= RATE_MCS_CCK_MSK;
+
+       iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
+       if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+               iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+       rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
+       tbl->current_rate = rate;
+       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+       iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+       priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
+                       struct ieee80211_tx_rate_control *txrc)
+{
+
+       struct sk_buff *skb = txrc->skb;
+       struct ieee80211_supported_band *sband = txrc->sband;
+       struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       int rate_idx;
+
+       IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
+
+       /* Get max rate if user set max rate */
+       if (lq_sta) {
+               lq_sta->max_rate_idx = txrc->max_rate_idx;
+               if ((sband->band == IEEE80211_BAND_5GHZ) &&
+                   (lq_sta->max_rate_idx != -1))
+                       lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+               if ((lq_sta->max_rate_idx < 0) ||
+                   (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+                       lq_sta->max_rate_idx = -1;
+       }
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (lq_sta && !lq_sta->drv) {
+               IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+               priv_sta = NULL;
+       }
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       if (rate_control_send_low(sta, priv_sta, txrc))
+               return;
+
+       rate_idx  = lq_sta->last_txrate_idx;
+
+       if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+               rate_idx -= IWL_FIRST_OFDM_RATE;
+               /* 6M and 9M shared same MCS index */
+               rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+               if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+                        IWL_RATE_MIMO2_6M_PLCP)
+                       rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+               info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_SHORT_GI;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_DUP_DATA;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_40_MHZ_WIDTH;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+                       info->control.rates[0].flags |=
+                                       IEEE80211_TX_RC_GREEN_FIELD;
+       } else {
+               /* Check for invalid rates */
+               if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+                               ((sband->band == IEEE80211_BAND_5GHZ) &&
+                                (rate_idx < IWL_FIRST_OFDM_RATE)))
+                       rate_idx = rate_lowest_index(sband, sta);
+               /* On valid 5 GHz rate, adjust index */
+               else if (sband->band == IEEE80211_BAND_5GHZ)
+                       rate_idx -= IWL_FIRST_OFDM_RATE;
+               info->control.rates[0].flags = 0;
+       }
+       info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
+                         gfp_t gfp)
+{
+       struct iwl_lq_sta *lq_sta;
+       struct iwl_station_priv *sta_priv =
+                               (struct iwl_station_priv *) sta->drv_priv;
+       struct iwl_priv *priv;
+
+       priv = (struct iwl_priv *)priv_rate;
+       IWL_DEBUG_RATE(priv, "create station rate scale window\n");
+
+       lq_sta = &sta_priv->lq_sta;
+
+       return lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+iwl4965_rs_rate_init(struct iwl_priv *priv,
+                       struct ieee80211_sta *sta,
+                       u8 sta_id)
+{
+       int i, j;
+       struct ieee80211_hw *hw = priv->hw;
+       struct ieee80211_conf *conf = &priv->hw->conf;
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct iwl_station_priv *sta_priv;
+       struct iwl_lq_sta *lq_sta;
+       struct ieee80211_supported_band *sband;
+
+       sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+       lq_sta = &sta_priv->lq_sta;
+       sband = hw->wiphy->bands[conf->channel->band];
+
+
+       lq_sta->lq.sta_id = sta_id;
+
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < IWL_RATE_COUNT; i++)
+                       iwl4965_rs_rate_scale_clear_window(
+                                       &lq_sta->lq_info[j].win[i]);
+
+       lq_sta->flush_timer = 0;
+       lq_sta->supp_rates = sta->supp_rates[sband->band];
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < IWL_RATE_COUNT; i++)
+                       iwl4965_rs_rate_scale_clear_window(
+                                       &lq_sta->lq_info[j].win[i]);
+
+       IWL_DEBUG_RATE(priv, "LQ:"
+                       "*** rate scale station global init for station %d ***\n",
+                      sta_id);
+       /* TODO: what is a good starting rate for STA? About middle? Maybe not
+        * the lowest or the highest rate.. Could consider using RSSI from
+        * previous packets? Need to have IEEE 802.1X auth succeed immediately
+        * after assoc.. */
+
+       lq_sta->is_dup = 0;
+       lq_sta->max_rate_idx = -1;
+       lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+       lq_sta->is_green = iwl4965_rs_use_green(sta);
+       lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
+       lq_sta->band = priv->band;
+       /*
+        * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+        * supp_rates[] does not; shift to convert format, force 9 MBits off.
+        */
+       lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+       lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+       lq_sta->active_siso_rate &= ~((u16)0x2);
+       lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+       /* Same here */
+       lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+       lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+       lq_sta->active_mimo2_rate &= ~((u16)0x2);
+       lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+       /* These values will be overridden later */
+       lq_sta->lq.general_params.single_stream_ant_msk =
+               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+       lq_sta->lq.general_params.dual_stream_ant_msk =
+               priv->hw_params.valid_tx_ant &
+               ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+       if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+               lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+       } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+               lq_sta->lq.general_params.dual_stream_ant_msk =
+                       priv->hw_params.valid_tx_ant;
+       }
+
+       /* as default allow aggregation for all tids */
+       lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+       lq_sta->drv = priv;
+
+       /* Set last_txrate_idx to lowest rate */
+       lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+       lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       lq_sta->dbg_fixed_rate = 0;
+#endif
+
+       iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
+}
+
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+                            struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+       struct iwl_scale_tbl_info tbl_type;
+       int index = 0;
+       int rate_idx;
+       int repeat_rate = 0;
+       u8 ant_toggle_cnt = 0;
+       u8 use_ht_possible = 1;
+       u8 valid_tx_ant = 0;
+       struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+       /* Override starting rate (index 0) if needed for debug purposes */
+       iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+       /* Interpret new_rate (rate_n_flags) */
+       iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+                                 &tbl_type, &rate_idx);
+
+       /* How many times should we repeat the initial rate? */
+       if (is_legacy(tbl_type.lq_type)) {
+               ant_toggle_cnt = 1;
+               repeat_rate = IWL_NUMBER_TRY;
+       } else {
+               repeat_rate = IWL_HT_NUMBER_TRY;
+       }
+
+       lq_cmd->general_params.mimo_delimiter =
+                       is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+       /* Fill 1st table entry (index 0) */
+       lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+       if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
+               lq_cmd->general_params.single_stream_ant_msk =
+                                               tbl_type.ant_type;
+       } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
+               lq_cmd->general_params.dual_stream_ant_msk =
+                                               tbl_type.ant_type;
+       } /* otherwise we don't modify the existing value */
+
+       index++;
+       repeat_rate--;
+       if (priv)
+               valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+       /* Fill rest of rate table */
+       while (index < LINK_QUAL_MAX_RETRY_NUM) {
+               /* Repeat initial/next rate.
+                * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+                * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+               while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+                       if (is_legacy(tbl_type.lq_type)) {
+                               if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                                       ant_toggle_cnt++;
+                               else if (priv &&
+                                        iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                                       &new_rate, &tbl_type))
+                                       ant_toggle_cnt = 1;
+                       }
+
+                       /* Override next rate if needed for debug purposes */
+                       iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+                       /* Fill next table entry */
+                       lq_cmd->rs_table[index].rate_n_flags =
+                                       cpu_to_le32(new_rate);
+                       repeat_rate--;
+                       index++;
+               }
+
+               iwl4965_rs_get_tbl_info_from_mcs(new_rate,
+                                               lq_sta->band, &tbl_type,
+                                               &rate_idx);
+
+               /* Indicate to uCode which entries might be MIMO.
+                * If initial rate was MIMO, this will finally end up
+                * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+               if (is_mimo(tbl_type.lq_type))
+                       lq_cmd->general_params.mimo_delimiter = index;
+
+               /* Get next rate */
+               new_rate = iwl4965_rs_get_lower_rate(lq_sta,
+                                       &tbl_type, rate_idx,
+                                            use_ht_possible);
+
+               /* How many times should we repeat the next rate? */
+               if (is_legacy(tbl_type.lq_type)) {
+                       if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                               ant_toggle_cnt++;
+                       else if (priv &&
+                                iwl4965_rs_toggle_antenna(valid_tx_ant,
+                                                  &new_rate, &tbl_type))
+                               ant_toggle_cnt = 1;
+
+                       repeat_rate = IWL_NUMBER_TRY;
+               } else {
+                       repeat_rate = IWL_HT_NUMBER_TRY;
+               }
+
+               /* Don't allow HT rates after next pass.
+                * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+               use_ht_possible = 0;
+
+               /* Override next rate if needed for debug purposes */
+               iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+               /* Fill next table entry */
+               lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+               index++;
+               repeat_rate--;
+       }
+
+       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+       lq_cmd->agg_params.agg_time_limit =
+               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void
+*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+       return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void iwl4965_rs_free(void *priv_rate)
+{
+       return;
+}
+
+static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
+                       void *priv_sta)
+{
+       struct iwl_priv *priv __maybe_unused = priv_r;
+
+       IWL_DEBUG_RATE(priv, "enter\n");
+       IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+                            u32 *rate_n_flags, int index)
+{
+       struct iwl_priv *priv;
+       u8 valid_tx_ant;
+       u8 ant_sel_tx;
+
+       priv = lq_sta->drv;
+       valid_tx_ant = priv->hw_params.valid_tx_ant;
+       if (lq_sta->dbg_fixed_rate) {
+               ant_sel_tx =
+                 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+                 >> RATE_MCS_ANT_POS);
+               if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+                       *rate_n_flags = lq_sta->dbg_fixed_rate;
+                       IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
+               } else {
+                       lq_sta->dbg_fixed_rate = 0;
+                       IWL_ERR(priv,
+                           "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+                           ant_sel_tx, valid_tx_ant);
+                       IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+               }
+       } else {
+               IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+       }
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+                       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_priv *priv;
+       char buf[64];
+       int buf_size;
+       u32 parsed_rate;
+       struct iwl_station_priv *sta_priv =
+               container_of(lq_sta, struct iwl_station_priv, lq_sta);
+       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+       priv = lq_sta->drv;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x", &parsed_rate) == 1)
+               lq_sta->dbg_fixed_rate = parsed_rate;
+       else
+               lq_sta->dbg_fixed_rate = 0;
+
+       lq_sta->active_legacy_rate = 0x0FFF;    /* 1 - 54 MBits, includes CCK */
+       lq_sta->active_siso_rate   = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+       lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
+
+       IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
+               lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+       if (lq_sta->dbg_fixed_rate) {
+               iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+               iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
+                               false);
+       }
+
+       return count;
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i = 0;
+       int index = 0;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_priv *priv;
+       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+       priv = lq_sta->drv;
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+       desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+                       lq_sta->total_failed, lq_sta->total_success,
+                       lq_sta->active_legacy_rate);
+       desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+                       lq_sta->dbg_fixed_rate);
+       desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+           (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+           (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+           (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+       desc += sprintf(buff+desc, "lq type %s\n",
+          (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+       if (is_Ht(tbl->lq_type)) {
+               desc += sprintf(buff+desc, " %s",
+                  (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+                  desc += sprintf(buff+desc, " %s",
+                  (tbl->is_ht40) ? "40MHz" : "20MHz");
+                  desc += sprintf(buff+desc, " %s %s %s\n",
+                       (tbl->is_SGI) ? "SGI" : "",
+                  (lq_sta->is_green) ? "GF enabled" : "",
+                  (lq_sta->is_agg) ? "AGG on" : "");
+       }
+       desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+               lq_sta->last_rate_n_flags);
+       desc += sprintf(buff+desc, "general:"
+               "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+               lq_sta->lq.general_params.flags,
+               lq_sta->lq.general_params.mimo_delimiter,
+               lq_sta->lq.general_params.single_stream_ant_msk,
+               lq_sta->lq.general_params.dual_stream_ant_msk);
+
+       desc += sprintf(buff+desc, "agg:"
+                       "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+                       le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+                       lq_sta->lq.agg_params.agg_dis_start_th,
+                       lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+       desc += sprintf(buff+desc,
+                       "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+                       lq_sta->lq.general_params.start_rate_index[0],
+                       lq_sta->lq.general_params.start_rate_index[1],
+                       lq_sta->lq.general_params.start_rate_index[2],
+                       lq_sta->lq.general_params.start_rate_index[3]);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               index = iwl4965_hwrate_to_plcp_idx(
+                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+               if (is_legacy(tbl->lq_type)) {
+                       desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+                       i,
+                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+                       iwl_rate_mcs[index].mbps);
+               } else {
+                       desc += sprintf(buff+desc,
+                       " rate[%d] 0x%X %smbps (%s)\n",
+                       i,
+                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+                       iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+       .write = iwl4965_rs_sta_dbgfs_scale_table_write,
+       .read = iwl4965_rs_sta_dbgfs_scale_table_read,
+       .open = iwl4965_open_file_generic,
+       .llseek = default_llseek,
+};
+static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i, j;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       for (i = 0; i < LQ_SIZE; i++) {
+               desc += sprintf(buff+desc,
+                               "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+                               "rate=0x%X\n",
+                               lq_sta->active_tbl == i ? "*" : "x",
+                               lq_sta->lq_info[i].lq_type,
+                               lq_sta->lq_info[i].is_SGI,
+                               lq_sta->lq_info[i].is_ht40,
+                               lq_sta->lq_info[i].is_dup,
+                               lq_sta->is_green,
+                               lq_sta->lq_info[i].current_rate);
+               for (j = 0; j < IWL_RATE_COUNT; j++) {
+                       desc += sprintf(buff+desc,
+                               "counter=%d success=%d %%=%d\n",
+                               lq_sta->lq_info[i].win[j].counter,
+                               lq_sta->lq_info[i].win[j].success_counter,
+                               lq_sta->lq_info[i].win[j].success_ratio);
+               }
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+       .read = iwl4965_rs_sta_dbgfs_stats_table_read,
+       .open = iwl4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+                       char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char buff[120];
+       int desc = 0;
+       ssize_t ret;
+
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct iwl_priv *priv;
+       struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+       priv = lq_sta->drv;
+
+       if (is_Ht(tbl->lq_type))
+               desc += sprintf(buff+desc,
+                               "Bit Rate= %d Mb/s\n",
+                               tbl->expected_tpt[lq_sta->last_txrate_idx]);
+       else
+               desc += sprintf(buff+desc,
+                               "Bit Rate= %d Mb/s\n",
+                               iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+       .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
+       .open = iwl4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
+                                       struct dentry *dir)
+{
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       lq_sta->rs_sta_dbgfs_scale_table_file =
+               debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+                               lq_sta, &rs_sta_dbgfs_scale_table_ops);
+       lq_sta->rs_sta_dbgfs_stats_table_file =
+               debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+                       lq_sta, &rs_sta_dbgfs_stats_table_ops);
+       lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+               debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+                       lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+       lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+               debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+               &lq_sta->tx_agg_tid_en);
+
+}
+
+static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
+{
+       struct iwl_lq_sta *lq_sta = priv_sta;
+       debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+static struct rate_control_ops rs_4965_ops = {
+       .module = NULL,
+       .name = IWL4965_RS_NAME,
+       .tx_status = iwl4965_rs_tx_status,
+       .get_rate = iwl4965_rs_get_rate,
+       .rate_init = iwl4965_rs_rate_init_stub,
+       .alloc = iwl4965_rs_alloc,
+       .free = iwl4965_rs_free,
+       .alloc_sta = iwl4965_rs_alloc_sta,
+       .free_sta = iwl4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+       .add_sta_debugfs = iwl4965_rs_add_debugfs,
+       .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
+#endif
+};
+
+int iwl4965_rate_control_register(void)
+{
+       pr_err("Registering 4965 rate control operations\n");
+       return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void iwl4965_rate_control_unregister(void)
+{
+       ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
new file mode 100644 (file)
index 0000000..b9fa2f6
--- /dev/null
@@ -0,0 +1,291 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-4965-calib.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_missed_beacon_notif *missed_beacon;
+
+       missed_beacon = &pkt->u.missed_beacon;
+       if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+           priv->missed_beacon_threshold) {
+               IWL_DEBUG_CALIB(priv,
+                   "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+                   le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+                   le32_to_cpu(missed_beacon->total_missed_becons),
+                   le32_to_cpu(missed_beacon->num_recvd_beacons),
+                   le32_to_cpu(missed_beacon->num_expected_beacons));
+               if (!test_bit(STATUS_SCANNING, &priv->status))
+                       iwl4965_init_sensitivity(priv);
+       }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
+{
+       struct statistics_rx_non_phy *rx_info;
+       int num_active_rx = 0;
+       int total_silence = 0;
+       int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+       int last_rx_noise;
+
+       rx_info = &(priv->_4965.statistics.rx.general);
+       bcn_silence_a =
+               le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+       bcn_silence_b =
+               le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+       bcn_silence_c =
+               le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+       if (bcn_silence_a) {
+               total_silence += bcn_silence_a;
+               num_active_rx++;
+       }
+       if (bcn_silence_b) {
+               total_silence += bcn_silence_b;
+               num_active_rx++;
+       }
+       if (bcn_silence_c) {
+               total_silence += bcn_silence_c;
+               num_active_rx++;
+       }
+
+       /* Average among active antennas */
+       if (num_active_rx)
+               last_rx_noise = (total_silence / num_active_rx) - 107;
+       else
+               last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+       IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+                       bcn_silence_a, bcn_silence_b, bcn_silence_c,
+                       last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
+                                       __le32 *stats)
+{
+       int i, size;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+       struct statistics_general_common *general, *accum_general;
+       struct statistics_tx *tx, *accum_tx;
+
+       prev_stats = (__le32 *)&priv->_4965.statistics;
+       accum_stats = (u32 *)&priv->_4965.accum_statistics;
+       size = sizeof(struct iwl_notif_statistics);
+       general = &priv->_4965.statistics.general.common;
+       accum_general = &priv->_4965.accum_statistics.general.common;
+       tx = &priv->_4965.statistics.tx;
+       accum_tx = &priv->_4965.accum_statistics.tx;
+       delta = (u32 *)&priv->_4965.delta_statistics;
+       max_delta = (u32 *)&priv->_4965.max_delta;
+
+       for (i = sizeof(__le32); i < size;
+            i += sizeof(__le32), stats++, prev_stats++, delta++,
+            max_delta++, accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta = (le32_to_cpu(*stats) -
+                               le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
+               }
+       }
+
+       /* reset accumulative statistics for "no-counter" type statistics */
+       accum_general->temperature = general->temperature;
+       accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * iwl4965_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
+                               struct iwl_rx_packet *pkt)
+{
+       bool rc = true;
+       int combined_plcp_delta;
+       unsigned int plcp_msec;
+       unsigned long plcp_received_jiffies;
+
+       if (priv->cfg->base_params->plcp_delta_threshold ==
+           IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+               IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+               return rc;
+       }
+
+       /*
+        * check for plcp_err and trigger radio reset if it exceeds
+        * the plcp error threshold plcp_delta.
+        */
+       plcp_received_jiffies = jiffies;
+       plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+                                       (long) priv->plcp_jiffies);
+       priv->plcp_jiffies = plcp_received_jiffies;
+       /*
+        * check to make sure plcp_msec is not 0 to prevent division
+        * by zero.
+        */
+       if (plcp_msec) {
+               struct statistics_rx_phy *ofdm;
+               struct statistics_rx_ht_phy *ofdm_ht;
+
+               ofdm = &pkt->u.stats.rx.ofdm;
+               ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
+               combined_plcp_delta =
+                   (le32_to_cpu(ofdm->plcp_err) -
+                   le32_to_cpu(priv->_4965.statistics.
+                               rx.ofdm.plcp_err)) +
+                   (le32_to_cpu(ofdm_ht->plcp_err) -
+                   le32_to_cpu(priv->_4965.statistics.
+                               rx.ofdm_ht.plcp_err));
+
+               if ((combined_plcp_delta > 0) &&
+                   ((combined_plcp_delta * 100) / plcp_msec) >
+                       priv->cfg->base_params->plcp_delta_threshold) {
+                       /*
+                        * if plcp_err exceed the threshold,
+                        * the following data is printed in csv format:
+                        *    Text: plcp_err exceeded %d,
+                        *    Received ofdm.plcp_err,
+                        *    Current ofdm.plcp_err,
+                        *    Received ofdm_ht.plcp_err,
+                        *    Current ofdm_ht.plcp_err,
+                        *    combined_plcp_delta,
+                        *    plcp_msec
+                        */
+                       IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+                               "%u, %u, %u, %u, %d, %u mSecs\n",
+                               priv->cfg->base_params->plcp_delta_threshold,
+                               le32_to_cpu(ofdm->plcp_err),
+                               le32_to_cpu(ofdm->plcp_err),
+                               le32_to_cpu(ofdm_ht->plcp_err),
+                               le32_to_cpu(ofdm_ht->plcp_err),
+                               combined_plcp_delta, plcp_msec);
+
+                       rc = false;
+               }
+       }
+       return rc;
+}
+
+void iwl4965_rx_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+       int change;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       IWL_DEBUG_RX(priv,
+                    "Statistics notification received (%d vs %d).\n",
+                    (int)sizeof(struct iwl_notif_statistics),
+                    le32_to_cpu(pkt->len_n_flags) &
+                    FH_RSCSR_FRAME_SIZE_MSK);
+
+       change = ((priv->_4965.statistics.general.common.temperature !=
+                  pkt->u.stats.general.common.temperature) ||
+                  ((priv->_4965.statistics.flag &
+                  STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+                  (pkt->u.stats.flag &
+                  STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+
+       iwl_legacy_recover_from_statistics(priv, pkt);
+
+       memcpy(&priv->_4965.statistics, &pkt->u.stats,
+               sizeof(priv->_4965.statistics));
+
+       set_bit(STATUS_STATISTICS, &priv->status);
+
+       /* Reschedule the statistics timer to occur in
+        * REG_RECALIB_PERIOD seconds to ensure we get a
+        * thermal update even if the uCode doesn't give
+        * us one */
+       mod_timer(&priv->statistics_periodic, jiffies +
+                 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+       if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+           (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+               iwl4965_rx_calc_noise(priv);
+               queue_work(priv->workqueue, &priv->run_time_calib_work);
+       }
+       if (priv->cfg->ops->lib->temp_ops.temperature && change)
+               priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+void iwl4965_reply_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+               memset(&priv->_4965.accum_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_4965.delta_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_4965.max_delta, 0,
+                       sizeof(struct iwl_notif_statistics));
+#endif
+               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+       }
+       iwl4965_rx_statistics(priv, rxb);
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644 (file)
index 0000000..a262c23
--- /dev/null
@@ -0,0 +1,721 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-4965.h"
+
+static struct iwl_link_quality_cmd *
+iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
+{
+       int i, r;
+       struct iwl_link_quality_cmd *link_cmd;
+       u32 rate_flags = 0;
+       __le32 rate_n_flags;
+
+       link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+       if (!link_cmd) {
+               IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+               return NULL;
+       }
+       /* Set up the rate scaling to start at selected rate, fall back
+        * all the way down to 1M in IEEE order, and then spin on 1M */
+       if (priv->band == IEEE80211_BAND_5GHZ)
+               r = IWL_RATE_6M_INDEX;
+       else
+               r = IWL_RATE_1M_INDEX;
+
+       if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
+                               RATE_MCS_ANT_POS;
+       rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
+                                                  rate_flags);
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+               link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+       link_cmd->general_params.single_stream_ant_msk =
+                               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+       link_cmd->general_params.dual_stream_ant_msk =
+               priv->hw_params.valid_tx_ant &
+               ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+       if (!link_cmd->general_params.dual_stream_ant_msk) {
+               link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+       } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+               link_cmd->general_params.dual_stream_ant_msk =
+                       priv->hw_params.valid_tx_ant;
+       }
+
+       link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+       link_cmd->agg_params.agg_time_limit =
+               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+       link_cmd->sta_id = sta_id;
+
+       return link_cmd;
+}
+
+/*
+ * iwl4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                            const u8 *addr, u8 *sta_id_r)
+{
+       int ret;
+       u8 sta_id;
+       struct iwl_link_quality_cmd *link_cmd;
+       unsigned long flags;
+
+       if (sta_id_r)
+               *sta_id_r = IWL_INVALID_STATION;
+
+       ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM\n", addr);
+               return ret;
+       }
+
+       if (sta_id_r)
+               *sta_id_r = sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].used |= IWL_STA_LOCAL;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       /* Set up default rate scaling table in device's station table */
+       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+       if (!link_cmd) {
+               IWL_ERR(priv,
+                       "Unable to initialize rate scaling for station %pM.\n",
+                       addr);
+               return -ENOMEM;
+       }
+
+       ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+       if (ret)
+               IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
+                                     struct iwl_rxon_context *ctx,
+                                     bool send_if_empty)
+{
+       int i, not_empty = 0;
+       u8 buff[sizeof(struct iwl_wep_cmd) +
+               sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
+       struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
+       size_t cmd_size  = sizeof(struct iwl_wep_cmd);
+       struct iwl_host_cmd cmd = {
+               .id = ctx->wep_key_cmd,
+               .data = wep_cmd,
+               .flags = CMD_SYNC,
+       };
+
+       might_sleep();
+
+       memset(wep_cmd, 0, cmd_size +
+                       (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
+
+       for (i = 0; i < WEP_KEYS_MAX ; i++) {
+               wep_cmd->key[i].key_index = i;
+               if (ctx->wep_keys[i].key_size) {
+                       wep_cmd->key[i].key_offset = i;
+                       not_empty = 1;
+               } else {
+                       wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+               }
+
+               wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+               memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+                               ctx->wep_keys[i].key_size);
+       }
+
+       wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+       wep_cmd->num_keys = WEP_KEYS_MAX;
+
+       cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
+
+       cmd.len = cmd_size;
+
+       if (not_empty || send_if_empty)
+               return iwl_legacy_send_cmd(priv, &cmd);
+       else
+               return 0;
+}
+
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+                                struct iwl_rxon_context *ctx)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       return iwl4965_static_wepkey_cmd(priv, ctx, false);
+}
+
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx,
+                              struct ieee80211_key_conf *keyconf)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
+                     keyconf->keyidx);
+
+       memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_WEP(priv,
+               "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+               /* but keys in device are clear anyway so return success */
+               return 0;
+       }
+       ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
+       IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+                     keyconf->keyidx, ret);
+
+       return ret;
+}
+
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_key_conf *keyconf)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (keyconf->keylen != WEP_KEY_LEN_128 &&
+           keyconf->keylen != WEP_KEY_LEN_64) {
+               IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
+               return -EINVAL;
+       }
+
+       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->hw_key_idx = HW_KEY_DEFAULT;
+       priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+       ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+       memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+                                                       keyconf->keylen);
+
+       ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
+       IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
+               keyconf->keylen, keyconf->keyidx, ret);
+
+       return ret;
+}
+
+static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
+                                       struct iwl_rxon_context *ctx,
+                                       struct ieee80211_key_conf *keyconf,
+                                       u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+       key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (keyconf->keylen == WEP_KEY_LEN_128)
+               key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+       priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+       memcpy(priv->stations[sta_id].keyinfo.key,
+                               keyconf->key, keyconf->keylen);
+
+       memcpy(&priv->stations[sta_id].sta.key.key[3],
+                               keyconf->key, keyconf->keylen);
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
+                                        struct iwl_rxon_context *ctx,
+                                        struct ieee80211_key_conf *keyconf,
+                                        u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
+              keyconf->keylen);
+
+       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
+              keyconf->keylen);
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                        sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
+                                        struct iwl_rxon_context *ctx,
+                                        struct ieee80211_key_conf *keyconf,
+                                        u8 sta_id)
+{
+       unsigned long flags;
+       int ret = 0;
+       __le16 key_flags = 0;
+
+       key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = 16;
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+
+
+       /* This copy is acutally not needed: we get the key with each TX */
+       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return ret;
+}
+
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+                        struct iwl_rxon_context *ctx,
+                        struct ieee80211_key_conf *keyconf,
+                        struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+       u8 sta_id;
+       unsigned long flags;
+       int i;
+
+       if (iwl_legacy_scan_cancel(priv)) {
+               /* cancel scan failed, just live w/ bad key and rely
+                  briefly on SW decryption */
+               return;
+       }
+
+       sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
+       if (sta_id == IWL_INVALID_STATION)
+               return;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+       for (i = 0; i < 5; i++)
+               priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+                       cpu_to_le16(phase1key[i]);
+
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
+
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+                          struct iwl_rxon_context *ctx,
+                          struct ieee80211_key_conf *keyconf,
+                          u8 sta_id)
+{
+       unsigned long flags;
+       u16 key_flags;
+       u8 keyidx;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       ctx->key_mapping_keys--;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
+       keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+       IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
+                     keyconf->keyidx, sta_id);
+
+       if (keyconf->keyidx != keyidx) {
+               /* We need to remove a key with index different that the one
+                * in the uCode. This means that the key we need to remove has
+                * been replaced by another one with different index.
+                * Don't do anything and return ok
+                */
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+
+       if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+               IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+                           keyconf->keyidx, key_flags);
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+
+       if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
+               &priv->ucode_key_table))
+               IWL_ERR(priv, "index %d not used in uCode key table.\n",
+                       priv->stations[sta_id].sta.key.key_offset);
+       memset(&priv->stations[sta_id].keyinfo, 0,
+                                       sizeof(struct iwl_hw_key));
+       memset(&priv->stations[sta_id].sta.key, 0,
+                                       sizeof(struct iwl4965_keyinfo));
+       priv->stations[sta_id].sta.key.key_flags =
+                       STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+       priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_WEP(priv,
+                "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                       struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       ctx->key_mapping_keys++;
+       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
+                                                       keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
+                                                       keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
+                                                       keyconf, sta_id);
+               break;
+       default:
+               IWL_ERR(priv,
+                       "Unknown alg: %s cipher = %x\n", __func__,
+                       keyconf->cipher);
+               ret = -EINVAL;
+       }
+
+       IWL_DEBUG_WEP(priv,
+               "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+                     sta_id, ret);
+
+       return ret;
+}
+
+/**
+ * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx)
+{
+       struct iwl_link_quality_cmd *link_cmd;
+       unsigned long flags;
+       u8 sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
+                                                               false, NULL);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Unable to prepare broadcast station\n");
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+               return -EINVAL;
+       }
+
+       priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+       priv->stations[sta_id].used |= IWL_STA_BCAST;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+       if (!link_cmd) {
+               IWL_ERR(priv,
+                       "Unable to initialize rate scaling for bcast station.\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+/**
+ * iwl4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int iwl4965_update_bcast_station(struct iwl_priv *priv,
+                                   struct iwl_rxon_context *ctx)
+{
+       unsigned long flags;
+       struct iwl_link_quality_cmd *link_cmd;
+       u8 sta_id = ctx->bcast_sta_id;
+
+       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+       if (!link_cmd) {
+               IWL_ERR(priv,
+               "Unable to initialize rate scaling for bcast station.\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       if (priv->stations[sta_id].lq)
+               kfree(priv->stations[sta_id].lq);
+       else
+               IWL_DEBUG_INFO(priv,
+               "Bcast station rate scaling has not been initialized yet.\n");
+       priv->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+int iwl4965_update_bcast_stations(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+       int ret = 0;
+
+       for_each_context(priv, ctx) {
+               ret = iwl4965_update_bcast_station(priv, ctx);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/**
+ * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+{
+       unsigned long flags;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /* Remove "disable" flag, to enable Tx for this TID */
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+       priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                        int tid, u16 ssn)
+{
+       unsigned long flags;
+       int sta_id;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       sta_id = iwl_legacy_sta_id(sta);
+       if (sta_id == IWL_INVALID_STATION)
+               return -ENXIO;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags_msk = 0;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+       priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
+       priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                                       sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                       int tid)
+{
+       unsigned long flags;
+       int sta_id;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&priv->mutex);
+
+       sta_id = iwl_legacy_sta_id(sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags_msk = 0;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+       priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                               sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+void
+iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+       priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+       priv->stations[sta_id].sta.sta.modify_mask =
+                                       STA_MODIFY_SLEEP_TX_COUNT_MSK;
+       priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       iwl_legacy_send_add_sta(priv,
+                               &priv->stations[sta_id].sta, CMD_ASYNC);
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644 (file)
index 0000000..5c40502
--- /dev/null
@@ -0,0 +1,1369 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ *     VO      0
+ *     VI      1
+ *     BE      2
+ *     BK      3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+       IEEE80211_AC_BE,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BE,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VO,
+       IEEE80211_AC_VO
+};
+
+static inline int iwl4965_get_ac_from_tid(u16 tid)
+{
+       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+               return tid_to_ac[tid];
+
+       /* no support for TIDs 8-15 yet */
+       return -EINVAL;
+}
+
+static inline int
+iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
+{
+       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+               return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+       /* no support for TIDs 8-15 yet */
+       return -EINVAL;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
+                                       struct sk_buff *skb,
+                                       struct iwl_tx_cmd *tx_cmd,
+                                       struct ieee80211_tx_info *info,
+                                       struct ieee80211_hdr *hdr,
+                                       u8 std_id)
+{
+       __le16 fc = hdr->frame_control;
+       __le32 tx_flags = tx_cmd->tx_flags;
+
+       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+               tx_flags |= TX_CMD_FLG_ACK_MSK;
+               if (ieee80211_is_mgmt(fc))
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (ieee80211_is_probe_resp(fc) &&
+                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+                       tx_flags |= TX_CMD_FLG_TSF_MSK;
+       } else {
+               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       if (ieee80211_is_back_req(fc))
+               tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+       tx_cmd->sta_id = std_id;
+       if (ieee80211_has_morefrags(fc))
+               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tx_cmd->tid_tspec = qc[0] & 0xf;
+               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+       } else {
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
+
+       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+               else
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+       } else {
+               tx_cmd->timeout.pm_frame_timeout = 0;
+       }
+
+       tx_cmd->driver_txop = 0;
+       tx_cmd->tx_flags = tx_flags;
+       tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT         60
+
+static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
+                             struct iwl_tx_cmd *tx_cmd,
+                             struct ieee80211_tx_info *info,
+                             __le16 fc)
+{
+       u32 rate_flags;
+       int rate_idx;
+       u8 rts_retry_limit;
+       u8 data_retry_limit;
+       u8 rate_plcp;
+
+       /* Set retry limit on DATA packets and Probe Responses*/
+       if (ieee80211_is_probe_resp(fc))
+               data_retry_limit = 3;
+       else
+               data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
+       tx_cmd->data_retry_limit = data_retry_limit;
+
+       /* Set retry limit on RTS packets */
+       rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+       if (data_retry_limit < rts_retry_limit)
+               rts_retry_limit = data_retry_limit;
+       tx_cmd->rts_retry_limit = rts_retry_limit;
+
+       /* DATA packets will use the uCode station table for rate/antenna
+        * selection */
+       if (ieee80211_is_data(fc)) {
+               tx_cmd->initial_rate_index = 0;
+               tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+               return;
+       }
+
+       /**
+        * If the current TX rate stored in mac80211 has the MCS bit set, it's
+        * not really a TX rate.  Thus, we use the lowest supported rate for
+        * this band.  Also use the lowest supported rate if the stored rate
+        * index is invalid.
+        */
+       rate_idx = info->control.rates[0].idx;
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+                       (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+               rate_idx = rate_lowest_index(&priv->bands[info->band],
+                               info->control.sta);
+       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rate_idx += IWL_FIRST_OFDM_RATE;
+       /* Get PLCP rate for tx_cmd->rate_n_flags */
+       rate_plcp = iwlegacy_rates[rate_idx].plcp;
+       /* Zero out flags for this packet */
+       rate_flags = 0;
+
+       /* Set CCK flag as needed */
+       if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       /* Set up antennas */
+       priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+                                     priv->hw_params.valid_tx_ant);
+
+       rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+       /* Set the rate in the TX cmd */
+       tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag,
+                                     int sta_id)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+               if (info->flags & IEEE80211_TX_CTL_AMPDU)
+                       tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+               IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_TKIP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+               ieee80211_get_tkip_key(keyconf, skb_frag,
+                       IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+               IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_WEP104:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_WEP40:
+               tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+                       (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+               memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+               IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+                            "with key %d\n", keyconf->keyidx);
+               break;
+
+       default:
+               IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
+               break;
+       }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_sta *sta = info->control.sta;
+       struct iwl_station_priv *sta_priv = NULL;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       struct iwl_device_cmd *out_cmd;
+       struct iwl_cmd_meta *out_meta;
+       struct iwl_tx_cmd *tx_cmd;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       int txq_id;
+       dma_addr_t phys_addr;
+       dma_addr_t txcmd_phys;
+       dma_addr_t scratch_phys;
+       u16 len, firstlen, secondlen;
+       u16 seq_number = 0;
+       __le16 fc;
+       u8 hdr_len;
+       u8 sta_id;
+       u8 wait_write_ptr = 0;
+       u8 tid = 0;
+       u8 *qc = NULL;
+       unsigned long flags;
+       bool is_agg = false;
+
+       if (info->control.vif)
+               ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+               goto drop_unlock;
+       }
+
+       fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (ieee80211_is_auth(fc))
+               IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+       else if (ieee80211_is_assoc_req(fc))
+               IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+       else if (ieee80211_is_reassoc_req(fc))
+               IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+       hdr_len = ieee80211_hdrlen(fc);
+
+       /* Find index into station table for destination station */
+       sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+                              hdr->addr1);
+               goto drop_unlock;
+       }
+
+       IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+       if (sta)
+               sta_priv = (void *)sta->drv_priv;
+
+       if (sta_priv && sta_priv->asleep &&
+           (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+               /*
+                * This sends an asynchronous command to the device,
+                * but we can rely on it being processed before the
+                * next frame is processed -- and the next frame to
+                * this station is the one that will consume this
+                * counter.
+                * For now set the counter to just 1 since we do not
+                * support uAPSD yet.
+                */
+               iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
+       }
+
+       /*
+        * Send this frame after DTIM -- there's a special queue
+        * reserved for this for contexts that support AP mode.
+        */
+       if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+               txq_id = ctx->mcast_queue;
+               /*
+                * The microcode will clear the more data
+                * bit in the last frame it transmits.
+                */
+               hdr->frame_control |=
+                       cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+       } else
+               txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+       /* irqs already disabled/saved above when locking priv->lock */
+       spin_lock(&priv->sta_lock);
+
+       if (ieee80211_is_data_qos(fc)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+                       spin_unlock(&priv->sta_lock);
+                       goto drop_unlock;
+               }
+               seq_number = priv->stations[sta_id].tid[tid].seq_number;
+               seq_number &= IEEE80211_SCTL_SEQ;
+               hdr->seq_ctrl = hdr->seq_ctrl &
+                               cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(seq_number);
+               seq_number += 0x10;
+               /* aggregation is on for this <sta,tid> */
+               if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+                   priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+                       txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+                       is_agg = true;
+               }
+       }
+
+       txq = &priv->txq[txq_id];
+       q = &txq->q;
+
+       if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
+               spin_unlock(&priv->sta_lock);
+               goto drop_unlock;
+       }
+
+       if (ieee80211_is_data_qos(fc)) {
+               priv->stations[sta_id].tid[tid].tfds_in_queue++;
+               if (!ieee80211_has_morefrags(fc))
+                       priv->stations[sta_id].tid[tid].seq_number = seq_number;
+       }
+
+       spin_unlock(&priv->sta_lock);
+
+       /* Set up driver data for this TFD */
+       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+       txq->txb[q->write_ptr].skb = skb;
+       txq->txb[q->write_ptr].ctx = ctx;
+
+       /* Set up first empty entry in queue's array of Tx/cmd buffers */
+       out_cmd = txq->cmd[q->write_ptr];
+       out_meta = &txq->meta[q->write_ptr];
+       tx_cmd = &out_cmd->cmd.tx;
+       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+       memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+       /*
+        * Set up the Tx-command (not MAC!) header.
+        * Store the chosen Tx queue and TFD index within the sequence field;
+        * after Tx, uCode's Tx response will return this value so driver can
+        * locate the frame within the tx queue and do post-tx processing.
+        */
+       out_cmd->hdr.cmd = REPLY_TX;
+       out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+                               INDEX_TO_SEQ(q->write_ptr)));
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+       /* Total # bytes to be transmitted */
+       len = (u16)skb->len;
+       tx_cmd->len = cpu_to_le16(len);
+
+       if (info->control.hw_key)
+               iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+       /* TODO need this for burst mode later on */
+       iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
+       iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+
+       iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+       iwl_legacy_update_stats(priv, true, fc, len);
+       /*
+        * Use the first empty entry in this queue's command buffer array
+        * to contain the Tx command and MAC header concatenated together
+        * (payload data will be in another buffer).
+        * Size of this varies, due to varying MAC header length.
+        * If end is not dword aligned, we'll have 2 extra bytes at the end
+        * of the MAC header (device reads on dword boundaries).
+        * We'll tell device about this padding later.
+        */
+       len = sizeof(struct iwl_tx_cmd) +
+               sizeof(struct iwl_cmd_header) + hdr_len;
+       firstlen = (len + 3) & ~3;
+
+       /* Tell NIC about any 2-byte padding after MAC header */
+       if (firstlen != len)
+               tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+       /* Physical address of this Tx command's header (not MAC header!),
+        * within command buffer array. */
+       txcmd_phys = pci_map_single(priv->pci_dev,
+                                   &out_cmd->hdr, firstlen,
+                                   PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, firstlen);
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                  txcmd_phys, firstlen, 1, 0);
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
+
+       /* Set up TFD's 2nd entry to point directly to remainder of skb,
+        * if any (802.11 null frames have no payload). */
+       secondlen = skb->len - hdr_len;
+       if (secondlen > 0) {
+               phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+                                          secondlen, PCI_DMA_TODEVICE);
+               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                          phys_addr, secondlen,
+                                                          0, 0);
+       }
+
+       scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+                               offsetof(struct iwl_tx_cmd, scratch);
+
+       /* take back ownership of DMA buffer to enable update */
+       pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+                                   firstlen, PCI_DMA_BIDIRECTIONAL);
+       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+       tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
+
+       IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+                    le16_to_cpu(out_cmd->hdr.sequence));
+       IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+       /* Set up entry for this TFD in Tx byte-count array */
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+                                                    le16_to_cpu(tx_cmd->len));
+
+       pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+                                      firstlen, PCI_DMA_BIDIRECTIONAL);
+
+       trace_iwlwifi_legacy_dev_tx(priv,
+                            &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+                            sizeof(struct iwl_tfd),
+                            &out_cmd->hdr, firstlen,
+                            skb->data + hdr_len, secondlen);
+
+       /* Tell device the write index *just past* this latest filled TFD */
+       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+       iwl_legacy_txq_update_write_ptr(priv, txq);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /*
+        * At this point the frame is "transmitted" successfully
+        * and we will get a TX status notification eventually,
+        * regardless of the value of ret. "ret" only indicates
+        * whether or not we should update the write pointer.
+        */
+
+       /*
+        * Avoid atomic ops if it isn't an associated client.
+        * Also, if this is a packet for aggregation, don't
+        * increase the counter because the ucode will stop
+        * aggregation queues when their respective station
+        * goes to sleep.
+        */
+       if (sta_priv && sta_priv->client && !is_agg)
+               atomic_inc(&sta_priv->pending_frames);
+
+       if ((iwl_legacy_queue_space(q) < q->high_mark) &&
+                       priv->mac80211_registered) {
+               if (wait_write_ptr) {
+                       spin_lock_irqsave(&priv->lock, flags);
+                       txq->need_update = 1;
+                       iwl_legacy_txq_update_write_ptr(priv, txq);
+                       spin_unlock_irqrestore(&priv->lock, flags);
+               } else {
+                       iwl_legacy_stop_queue(priv, txq);
+               }
+       }
+
+       return 0;
+
+drop_unlock:
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return -1;
+}
+
+static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
+                                   struct iwl_dma_ptr *ptr, size_t size)
+{
+       ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+                                      GFP_KERNEL);
+       if (!ptr->addr)
+               return -ENOMEM;
+       ptr->size = size;
+       return 0;
+}
+
+static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
+                                   struct iwl_dma_ptr *ptr)
+{
+       if (unlikely(!ptr->addr))
+               return;
+
+       dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+       memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwl4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+       int txq_id;
+
+       /* Tx queues */
+       if (priv->txq) {
+               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+                       if (txq_id == priv->cmd_queue)
+                               iwl_legacy_cmd_queue_free(priv);
+                       else
+                               iwl_legacy_tx_queue_free(priv, txq_id);
+       }
+       iwl4965_free_dma_ptr(priv, &priv->kw);
+
+       iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+       /* free tx queue structure */
+       iwl_legacy_txq_mem(priv);
+}
+
+/**
+ * iwl4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
+{
+       int ret;
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       /* Free all tx/cmd queues and keep-warm buffer */
+       iwl4965_hw_txq_ctx_free(priv);
+
+       ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+                               priv->hw_params.scd_bc_tbls_size);
+       if (ret) {
+               IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+               goto error_bc_tbls;
+       }
+       /* Alloc keep-warm buffer */
+       ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+       if (ret) {
+               IWL_ERR(priv, "Keep Warm allocation failed\n");
+               goto error_kw;
+       }
+
+       /* allocate tx queue structure */
+       ret = iwl_legacy_alloc_txq_mem(priv);
+       if (ret)
+               goto error;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       iwl4965_txq_set_sched(priv, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               slots_num = (txq_id == priv->cmd_queue) ?
+                                       TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               ret = iwl_legacy_tx_queue_init(priv,
+                                       &priv->txq[txq_id], slots_num,
+                                      txq_id);
+               if (ret) {
+                       IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+                       goto error;
+               }
+       }
+
+       return ret;
+
+ error:
+       iwl4965_hw_txq_ctx_free(priv);
+       iwl4965_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+       iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+       return ret;
+}
+
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
+{
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       iwl4965_txq_set_sched(priv, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4) */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+               slots_num = txq_id == priv->cmd_queue ?
+                           TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
+                                               slots_num, txq_id);
+       }
+}
+
+/**
+ * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
+{
+       int ch, txq_id;
+       unsigned long flags;
+
+       /* Turn off all Tx DMA fifos */
+       spin_lock_irqsave(&priv->lock, flags);
+
+       iwl4965_txq_set_sched(priv, 0);
+
+       /* Stop each Tx DMA channel, and wait for it to be idle */
+       for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+               iwl_legacy_write_direct32(priv,
+                               FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+               if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+                                   FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+                                   1000))
+                       IWL_ERR(priv, "Failing on timeout while stopping"
+                           " DMA channel %d [0x%08x]", ch,
+                           iwl_legacy_read_direct32(priv,
+                                       FH_TSSR_TX_STATUS_REG));
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!priv->txq)
+               return;
+
+       /* Unmap DMA from host system and free skb's */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+               if (txq_id == priv->cmd_queue)
+                       iwl_legacy_cmd_queue_unmap(priv);
+               else
+                       iwl_legacy_tx_queue_unmap(priv, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+       int txq_id;
+
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+               if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+                       return txq_id;
+       return -1;
+}
+
+/**
+ * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
+                                           u16 txq_id)
+{
+       /* Simply stop the queue, but don't change any configuration;
+        * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+       iwl_legacy_write_prph(priv,
+               IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+               (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+               (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+                                       u16 txq_id)
+{
+       u32 tbl_dw_addr;
+       u32 tbl_dw;
+       u16 scd_q2ratid;
+
+       scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+       tbl_dw_addr = priv->scd_base_addr +
+                       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+       tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
+
+       if (txq_id & 0x1)
+               tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+       else
+               tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+       iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+       return 0;
+}
+
+/**
+ * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE:  txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
+ *        i.e. it must be one of the higher queues used for aggregation
+ */
+static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+                                 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+       unsigned long flags;
+       u16 ra_tid;
+       int ret;
+
+       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+           (IWL49_FIRST_AMPDU_QUEUE +
+               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+               IWL_WARN(priv,
+                       "queue number out of range: %d, must be %d to %d\n",
+                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
+                       IWL49_FIRST_AMPDU_QUEUE +
+                       priv->cfg->base_params->num_of_ampdu_queues - 1);
+               return -EINVAL;
+       }
+
+       ra_tid = BUILD_RAxTID(sta_id, tid);
+
+       /* Modify device's station table to Tx this TID */
+       ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Stop this Tx queue before configuring it */
+       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+       /* Map receiver-address / traffic-ID to this queue */
+       iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+       /* Set this queue as a chain-building queue */
+       iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+       /* Place first TFD at index corresponding to start sequence number.
+        * Assumes that ssn_idx is valid (!= 0xFFF) */
+       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+       /* Set up Tx window size and frame limit for this queue */
+       iwl_legacy_write_targ_mem(priv,
+               priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+               (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+       iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+               IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+               (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
+               & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+       iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+       /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return 0;
+}
+
+
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+       int sta_id;
+       int tx_fifo;
+       int txq_id;
+       int ret;
+       unsigned long flags;
+       struct iwl_tid_data *tid_data;
+
+       tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+       if (unlikely(tx_fifo < 0))
+               return tx_fifo;
+
+       IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+                       __func__, sta->addr, tid);
+
+       sta_id = iwl_legacy_sta_id(sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Start AGG on invalid station\n");
+               return -ENXIO;
+       }
+       if (unlikely(tid >= MAX_TID_COUNT))
+               return -EINVAL;
+
+       if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+               IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+               return -ENXIO;
+       }
+
+       txq_id = iwl4965_txq_ctx_activate_free(priv);
+       if (txq_id == -1) {
+               IWL_ERR(priv, "No free aggregation queue available\n");
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       tid_data = &priv->stations[sta_id].tid[tid];
+       *ssn = SEQ_TO_SN(tid_data->seq_number);
+       tid_data->agg.txq_id = txq_id;
+       iwl_legacy_set_swq_id(&priv->txq[txq_id],
+                               iwl4965_get_ac_from_tid(tid), txq_id);
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
+                                                 sta_id, tid, *ssn);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       tid_data = &priv->stations[sta_id].tid[tid];
+       if (tid_data->tfds_in_queue == 0) {
+               IWL_DEBUG_HT(priv, "HW queue is empty\n");
+               tid_data->agg.state = IWL_AGG_ON;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+       } else {
+               IWL_DEBUG_HT(priv,
+                       "HW queue is NOT empty: %d packets in HW queue\n",
+                            tid_data->tfds_in_queue);
+               tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       return ret;
+}
+
+/**
+ * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
+ * priv->lock must be held by the caller
+ */
+static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+                                  u16 ssn_idx, u8 tx_fifo)
+{
+       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+           (IWL49_FIRST_AMPDU_QUEUE +
+               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+               IWL_WARN(priv,
+                       "queue number out of range: %d, must be %d to %d\n",
+                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
+                       IWL49_FIRST_AMPDU_QUEUE +
+                       priv->cfg->base_params->num_of_ampdu_queues - 1);
+               return -EINVAL;
+       }
+
+       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+       iwl_legacy_clear_bits_prph(priv,
+                       IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+       /* supposes that ssn_idx is valid (!= 0xFFF) */
+       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+       iwl_legacy_clear_bits_prph(priv,
+                        IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+       iwl_txq_ctx_deactivate(priv, txq_id);
+       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+       return 0;
+}
+
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta, u16 tid)
+{
+       int tx_fifo_id, txq_id, sta_id, ssn;
+       struct iwl_tid_data *tid_data;
+       int write_ptr, read_ptr;
+       unsigned long flags;
+
+       tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+       if (unlikely(tx_fifo_id < 0))
+               return tx_fifo_id;
+
+       sta_id = iwl_legacy_sta_id(sta);
+
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       tid_data = &priv->stations[sta_id].tid[tid];
+       ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+       txq_id = tid_data->agg.txq_id;
+
+       switch (priv->stations[sta_id].tid[tid].agg.state) {
+       case IWL_EMPTYING_HW_QUEUE_ADDBA:
+               /*
+                * This can happen if the peer stops aggregation
+                * again before we've had a chance to drain the
+                * queue we selected previously, i.e. before the
+                * session was really started completely.
+                */
+               IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+               goto turn_off;
+       case IWL_AGG_ON:
+               break;
+       default:
+               IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+       }
+
+       write_ptr = priv->txq[txq_id].q.write_ptr;
+       read_ptr = priv->txq[txq_id].q.read_ptr;
+
+       /* The queue is not empty */
+       if (write_ptr != read_ptr) {
+               IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+               priv->stations[sta_id].tid[tid].agg.state =
+                               IWL_EMPTYING_HW_QUEUE_DELBA;
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+               return 0;
+       }
+
+       IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ turn_off:
+       priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+       /* do not restore/save irqs */
+       spin_unlock(&priv->sta_lock);
+       spin_lock(&priv->lock);
+
+       /*
+        * the only reason this call can fail is queue number out of range,
+        * which can happen if uCode is reloaded and all the station
+        * information are lost. if it is outside the range, there is no need
+        * to deactivate the uCode queue, just return "success" to allow
+        *  mac80211 to clean up it own data.
+        */
+       iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+       return 0;
+}
+
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+                          int sta_id, u8 tid, int txq_id)
+{
+       struct iwl_queue *q = &priv->txq[txq_id].q;
+       u8 *addr = priv->stations[sta_id].sta.sta.addr;
+       struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+       struct iwl_rxon_context *ctx;
+
+       ctx = &priv->contexts[priv->stations[sta_id].ctxid];
+
+       lockdep_assert_held(&priv->sta_lock);
+
+       switch (priv->stations[sta_id].tid[tid].agg.state) {
+       case IWL_EMPTYING_HW_QUEUE_DELBA:
+               /* We are reclaiming the last packet of the */
+               /* aggregated HW queue */
+               if ((txq_id  == tid_data->agg.txq_id) &&
+                   (q->read_ptr == q->write_ptr)) {
+                       u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+                       int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
+                       IWL_DEBUG_HT(priv,
+                               "HW queue empty: continue DELBA flow\n");
+                       iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
+                       tid_data->agg.state = IWL_AGG_OFF;
+                       ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+               }
+               break;
+       case IWL_EMPTYING_HW_QUEUE_ADDBA:
+               /* We are reclaiming the last packet of the queue */
+               if (tid_data->tfds_in_queue == 0) {
+                       IWL_DEBUG_HT(priv,
+                               "HW queue empty: continue ADDBA flow\n");
+                       tid_data->agg.state = IWL_AGG_ON;
+                       ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
+                                    struct iwl_rxon_context *ctx,
+                                    const u8 *addr1)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_station_priv *sta_priv;
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(ctx->vif, addr1);
+       if (sta) {
+               sta_priv = (void *)sta->drv_priv;
+               /* avoid atomic ops if this isn't a client */
+               if (sta_priv->client &&
+                   atomic_dec_return(&sta_priv->pending_frames) == 0)
+                       ieee80211_sta_block_awake(priv->hw, sta, false);
+       }
+       rcu_read_unlock();
+}
+
+static void
+iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
+                            bool is_agg)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+
+       if (!is_agg)
+               iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
+
+       ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+}
+
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+       struct iwl_tx_info *tx_info;
+       int nfreed = 0;
+       struct ieee80211_hdr *hdr;
+
+       if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
+               IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+                         "is out of range [0-%d] %d %d.\n", txq_id,
+                         index, q->n_bd, q->write_ptr, q->read_ptr);
+               return 0;
+       }
+
+       for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+            q->read_ptr != index;
+            q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               tx_info = &txq->txb[txq->q.read_ptr];
+               iwl4965_tx_status(priv, tx_info,
+                                txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
+
+               hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+               if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+                       nfreed++;
+               tx_info->skb = NULL;
+
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+       }
+       return nfreed;
+}
+
+/**
+ * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+                                struct iwl_ht_agg *agg,
+                                struct iwl_compressed_ba_resp *ba_resp)
+
+{
+       int i, sh, ack;
+       u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+       int successes = 0;
+       struct ieee80211_tx_info *info;
+       u64 bitmap, sent_bitmap;
+
+       if (unlikely(!agg->wait_for_ba))  {
+               if (unlikely(ba_resp->bitmap))
+                       IWL_ERR(priv, "Received BA when not expected\n");
+               return -EINVAL;
+       }
+
+       /* Mark that the expected block-ack response arrived */
+       agg->wait_for_ba = 0;
+       IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
+                                                       ba_resp->seq_ctl);
+
+       /* Calculate shift to align block-ack bits with our Tx window bits */
+       sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+       if (sh < 0) /* tbw something is wrong with indices */
+               sh += 0x100;
+
+       if (agg->frame_count > (64 - sh)) {
+               IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+               return -1;
+       }
+
+       /* don't use 64-bit values for now */
+       bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+       /* check for success or failure according to the
+        * transmitted bitmap and block-ack bitmap */
+       sent_bitmap = bitmap & agg->bitmap;
+
+       /* For each frame attempted in aggregation,
+        * update driver's record of tx frame's status. */
+       i = 0;
+       while (sent_bitmap) {
+               ack = sent_bitmap & 1ULL;
+               successes += ack;
+               IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+                       ack ? "ACK" : "NACK", i,
+                       (agg->start_idx + i) & 0xff,
+                       agg->start_idx + i);
+               sent_bitmap >>= 1;
+               ++i;
+       }
+
+       IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
+                                  (unsigned long long)bitmap);
+
+       info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
+       memset(&info->status, 0, sizeof(info->status));
+       info->flags |= IEEE80211_TX_STAT_ACK;
+       info->flags |= IEEE80211_TX_STAT_AMPDU;
+       info->status.ampdu_ack_len = successes;
+       info->status.ampdu_len = agg->frame_count;
+       iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+       return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+                                 struct ieee80211_tx_info *info)
+{
+       struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+       info->antenna_sel_tx =
+               ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               r->flags |= IEEE80211_TX_RC_MCS;
+       if (rate_n_flags & RATE_MCS_GF_MSK)
+               r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+       if (rate_n_flags & RATE_MCS_DUP_MSK)
+               r->flags |= IEEE80211_TX_RC_DUP_DATA;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               r->flags |= IEEE80211_TX_RC_SHORT_GI;
+       r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                          struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+       struct iwl_tx_queue *txq = NULL;
+       struct iwl_ht_agg *agg;
+       int index;
+       int sta_id;
+       int tid;
+       unsigned long flags;
+
+       /* "flow" corresponds to Tx queue */
+       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+       /* "ssn" is start of block-ack Tx window, corresponds to index
+        * (in Tx queue's circular buffer) of first TFD/frame in window */
+       u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+       if (scd_flow >= priv->hw_params.max_txq_num) {
+               IWL_ERR(priv,
+                       "BUG_ON scd_flow is bigger than number of queues\n");
+               return;
+       }
+
+       txq = &priv->txq[scd_flow];
+       sta_id = ba_resp->sta_id;
+       tid = ba_resp->tid;
+       agg = &priv->stations[sta_id].tid[tid].agg;
+       if (unlikely(agg->txq_id != scd_flow)) {
+               /*
+                * FIXME: this is a uCode bug which need to be addressed,
+                * log the information and return for now!
+                * since it is possible happen very often and in order
+                * not to fill the syslog, don't enable the logging by default
+                */
+               IWL_DEBUG_TX_REPLY(priv,
+                       "BA scd_flow %d does not match txq_id %d\n",
+                       scd_flow, agg->txq_id);
+               return;
+       }
+
+       /* Find index just before block-ack window */
+       index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+                          "sta_id = %d\n",
+                          agg->wait_for_ba,
+                          (u8 *) &ba_resp->sta_addr_lo32,
+                          ba_resp->sta_id);
+       IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
+                       "scd_flow = "
+                          "%d, scd_ssn = %d\n",
+                          ba_resp->tid,
+                          ba_resp->seq_ctl,
+                          (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+                          ba_resp->scd_flow,
+                          ba_resp->scd_ssn);
+       IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+                          agg->start_idx,
+                          (unsigned long long)agg->bitmap);
+
+       /* Update driver's record of ACK vs. not for each frame in window */
+       iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+       /* Release all TFDs before the SSN, i.e. all TFDs in front of
+        * block-ack window (we assume that they've been successfully
+        * transmitted ... if not, it's too late anyway). */
+       if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+               /* calculate mac80211 ampdu sw queue to wake */
+               int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
+               iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+               if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
+                   priv->mac80211_registered &&
+                   (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+                       iwl_legacy_wake_queue(priv, txq);
+
+               iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
+       }
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+       switch (status & TX_STATUS_MSK) {
+       case TX_STATUS_SUCCESS:
+               return "SUCCESS";
+       TX_STATUS_POSTPONE(DELAY);
+       TX_STATUS_POSTPONE(FEW_BYTES);
+       TX_STATUS_POSTPONE(QUIET_PERIOD);
+       TX_STATUS_POSTPONE(CALC_TTAK);
+       TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+       TX_STATUS_FAIL(SHORT_LIMIT);
+       TX_STATUS_FAIL(LONG_LIMIT);
+       TX_STATUS_FAIL(FIFO_UNDERRUN);
+       TX_STATUS_FAIL(DRAIN_FLOW);
+       TX_STATUS_FAIL(RFKILL_FLUSH);
+       TX_STATUS_FAIL(LIFE_EXPIRE);
+       TX_STATUS_FAIL(DEST_PS);
+       TX_STATUS_FAIL(HOST_ABORTED);
+       TX_STATUS_FAIL(BT_RETRY);
+       TX_STATUS_FAIL(STA_INVALID);
+       TX_STATUS_FAIL(FRAG_DROPPED);
+       TX_STATUS_FAIL(TID_DISABLE);
+       TX_STATUS_FAIL(FIFO_FLUSHED);
+       TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+       TX_STATUS_FAIL(PASSIVE_NO_RX);
+       TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+       }
+
+       return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644 (file)
index 0000000..001d148
--- /dev/null
@@ -0,0 +1,166 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-4965-calib.h"
+
+#define IWL_AC_UNSET -1
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+       u32 val;
+       int ret = 0;
+       u32 errcnt = 0;
+       u32 i;
+
+       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+                       i + IWL4965_RTC_INST_LOWER_BOUND);
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       ret = -EIO;
+                       errcnt++;
+                       if (errcnt >= 3)
+                               break;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
+                                u32 len)
+{
+       u32 val;
+       u32 save_len = len;
+       int ret = 0;
+       u32 errcnt;
+
+       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+       iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+                          IWL4965_RTC_INST_LOWER_BOUND);
+
+       errcnt = 0;
+       for (; len > 0; len -= sizeof(u32), image++) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       IWL_ERR(priv, "uCode INST section is invalid at "
+                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                                 save_len - len, val, le32_to_cpu(*image));
+                       ret = -EIO;
+                       errcnt++;
+                       if (errcnt >= 20)
+                               break;
+               }
+       }
+
+       if (!errcnt)
+               IWL_DEBUG_INFO(priv,
+                   "ucode image in INSTRUCTION memory is good\n");
+
+       return ret;
+}
+
+/**
+ * iwl4965_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+int iwl4965_verify_ucode(struct iwl_priv *priv)
+{
+       __le32 *image;
+       u32 len;
+       int ret;
+
+       /* Try bootstrap */
+       image = (__le32 *)priv->ucode_boot.v_addr;
+       len = priv->ucode_boot.len;
+       ret = iwl4965_verify_inst_sparse(priv, image, len);
+       if (!ret) {
+               IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try initialize */
+       image = (__le32 *)priv->ucode_init.v_addr;
+       len = priv->ucode_init.len;
+       ret = iwl4965_verify_inst_sparse(priv, image, len);
+       if (!ret) {
+               IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try runtime/protocol */
+       image = (__le32 *)priv->ucode_code.v_addr;
+       len = priv->ucode_code.len;
+       ret = iwl4965_verify_inst_sparse(priv, image, len);
+       if (!ret) {
+               IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+       /* Since nothing seems to match, show first several data entries in
+        * instruction SRAM, so maybe visual inspection will give a clue.
+        * Selection of bootstrap image (vs. other images) is arbitrary. */
+       image = (__le32 *)priv->ucode_boot.v_addr;
+       len = priv->ucode_boot.len;
+       ret = iwl4965_verify_inst_full(priv, image, len);
+
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
new file mode 100644 (file)
index 0000000..f5433c7
--- /dev/null
@@ -0,0 +1,2188 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-calib.h"
+#include "iwl-sta.h"
+#include "iwl-4965-led.h"
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
+
+static int iwl4965_send_tx_power(struct iwl_priv *priv);
+static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
+
+/* Highest firmware API version supported */
+#define IWL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IWL4965_UCODE_API_MIN 2
+
+#define IWL4965_FW_PRE "iwlwifi-4965-"
+#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
+#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int iwl4965_verify_bsm(struct iwl_priv *priv)
+{
+       __le32 *image = priv->ucode_boot.v_addr;
+       u32 len = priv->ucode_boot.len;
+       u32 reg;
+       u32 val;
+
+       IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
+
+       /* verify BSM SRAM contents */
+       val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
+       for (reg = BSM_SRAM_LOWER_BOUND;
+            reg < BSM_SRAM_LOWER_BOUND + len;
+            reg += sizeof(u32), image++) {
+               val = iwl_legacy_read_prph(priv, reg);
+               if (val != le32_to_cpu(*image)) {
+                       IWL_ERR(priv, "BSM uCode verification failed at "
+                                 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+                                 BSM_SRAM_LOWER_BOUND,
+                                 reg - BSM_SRAM_LOWER_BOUND, len,
+                                 val, le32_to_cpu(*image));
+                       return -EIO;
+               }
+       }
+
+       IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
+
+       return 0;
+}
+
+/**
+ * iwl4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL.  When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image.  This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int iwl4965_load_bsm(struct iwl_priv *priv)
+{
+       __le32 *image = priv->ucode_boot.v_addr;
+       u32 len = priv->ucode_boot.len;
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+       u32 inst_len;
+       u32 data_len;
+       int i;
+       u32 done;
+       u32 reg_offset;
+       int ret;
+
+       IWL_DEBUG_INFO(priv, "Begin load bsm\n");
+
+       priv->ucode_type = UCODE_RT;
+
+       /* make sure bootstrap program is no larger than BSM's SRAM size */
+       if (len > IWL49_MAX_BSM_SIZE)
+               return -EINVAL;
+
+       /* Tell bootstrap uCode where to find the "Initialize" uCode
+        *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+        * NOTE:  iwl_init_alive_start() will replace these values,
+        *        after the "initialize" uCode has run, to point to
+        *        runtime/protocol instructions and backup data cache.
+        */
+       pinst = priv->ucode_init.p_addr >> 4;
+       pdata = priv->ucode_init_data.p_addr >> 4;
+       inst_len = priv->ucode_init.len;
+       data_len = priv->ucode_init_data.len;
+
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+       /* Fill BSM memory with bootstrap instructions */
+       for (reg_offset = BSM_SRAM_LOWER_BOUND;
+            reg_offset < BSM_SRAM_LOWER_BOUND + len;
+            reg_offset += sizeof(u32), image++)
+               _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
+
+       ret = iwl4965_verify_bsm(priv);
+       if (ret)
+               return ret;
+
+       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+       iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+       iwl_legacy_write_prph(priv,
+                       BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
+       iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+       /* Load bootstrap code into instruction SRAM now,
+        *   to prepare to load "initialize" uCode */
+       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+       /* Wait for load of bootstrap uCode to finish */
+       for (i = 0; i < 100; i++) {
+               done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
+               if (!(done & BSM_WR_CTRL_REG_BIT_START))
+                       break;
+               udelay(10);
+       }
+       if (i < 100)
+               IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
+       else {
+               IWL_ERR(priv, "BSM write did not complete!\n");
+               return -EIO;
+       }
+
+       /* Enable future boot loads whenever power management unit triggers it
+        *   (e.g. when powering back up after power-save shutdown) */
+       iwl_legacy_write_prph(priv,
+                       BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+
+       return 0;
+}
+
+/**
+ * iwl4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
+{
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+       int ret = 0;
+
+       /* bits 35:4 for 4965 */
+       pinst = priv->ucode_code.p_addr >> 4;
+       pdata = priv->ucode_data_backup.p_addr >> 4;
+
+       /* Tell bootstrap uCode where to find image to load */
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+                                priv->ucode_data.len);
+
+       /* Inst byte count must be last to set up, bit 31 signals uCode
+        *   that all new ptr/size info is in place */
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+                                priv->ucode_code.len | BSM_DRAM_INST_LOAD);
+       IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
+
+       return ret;
+}
+
+/**
+ * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
+ *
+ * Called after REPLY_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ *   Voltage, temperature, and MIMO tx gain correction, now stored in priv
+ *   (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void iwl4965_init_alive_start(struct iwl_priv *priv)
+{
+       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "initialize" alive if code weren't properly loaded.  */
+       if (iwl4965_verify_ucode(priv)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
+               goto restart;
+       }
+
+       /* Calculate temperature */
+       priv->temperature = iwl4965_hw_get_temperature(priv);
+
+       /* Send pointers to protocol/runtime uCode image ... init code will
+        * load and launch runtime uCode, which will send us another "Alive"
+        * notification. */
+       IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+       if (iwl4965_set_ucode_ptrs(priv)) {
+               /* Runtime instruction load won't happen;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
+               goto restart;
+       }
+       return;
+
+restart:
+       queue_work(priv->workqueue, &priv->restart);
+}
+
+static bool iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+       int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
+                                   >> RXON_FLG_CHANNEL_MODE_POS;
+       return ((chan_mod == CHANNEL_MODE_PURE_40) ||
+                 (chan_mod == CHANNEL_MODE_MIXED));
+}
+
+static void iwl4965_nic_config(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       u16 radio_cfg;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+       /* write radio config values to register */
+       if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                           EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+                           EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+                           EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+       /* set CSR_HW_CONFIG_REG for uCode use */
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+                   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+       priv->calib_info = (struct iwl_eeprom_calib_info *)
+               iwl_legacy_eeprom_query_addr(priv,
+                               EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ *  ... once chain noise is calibrated the first time, it's good forever.  */
+static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
+{
+       struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
+
+       if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+           iwl_legacy_is_any_associated(priv)) {
+               struct iwl_calib_diff_gain_cmd cmd;
+
+               /* clear data for chain noise calibration algorithm */
+               data->chain_noise_a = 0;
+               data->chain_noise_b = 0;
+               data->chain_noise_c = 0;
+               data->chain_signal_a = 0;
+               data->chain_signal_b = 0;
+               data->chain_signal_c = 0;
+               data->beacon_count = 0;
+
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+               cmd.diff_gain_a = 0;
+               cmd.diff_gain_b = 0;
+               cmd.diff_gain_c = 0;
+               if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+                                sizeof(cmd), &cmd))
+                       IWL_ERR(priv,
+                               "Could not send REPLY_PHY_CALIBRATION_CMD\n");
+               data->state = IWL_CHAIN_NOISE_ACCUMULATE;
+               IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
+       }
+}
+
+static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
+       .min_nrg_cck = 97,
+       .max_nrg_cck = 0, /* not used, set to 0 */
+
+       .auto_corr_min_ofdm = 85,
+       .auto_corr_min_ofdm_mrc = 170,
+       .auto_corr_min_ofdm_x1 = 105,
+       .auto_corr_min_ofdm_mrc_x1 = 220,
+
+       .auto_corr_max_ofdm = 120,
+       .auto_corr_max_ofdm_mrc = 210,
+       .auto_corr_max_ofdm_x1 = 140,
+       .auto_corr_max_ofdm_mrc_x1 = 270,
+
+       .auto_corr_min_cck = 125,
+       .auto_corr_max_cck = 200,
+       .auto_corr_min_cck_mrc = 200,
+       .auto_corr_max_cck_mrc = 400,
+
+       .nrg_th_cck = 100,
+       .nrg_th_ofdm = 100,
+
+       .barker_corr_th_min = 190,
+       .barker_corr_th_min_mrc = 390,
+       .nrg_th_cca = 62,
+};
+
+static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
+{
+       /* want Kelvin */
+       priv->hw_params.ct_kill_threshold =
+               CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * iwl4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
+{
+       if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+           priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
+               priv->cfg->base_params->num_of_queues =
+                       priv->cfg->mod_params->num_of_queues;
+
+       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+       priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+       priv->hw_params.scd_bc_tbls_size =
+                       priv->cfg->base_params->num_of_queues *
+                       sizeof(struct iwl4965_scd_bc_tbl);
+       priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+       priv->hw_params.max_stations = IWL4965_STATION_COUNT;
+       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
+       priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
+       priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
+       priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+       priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+       priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+       priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
+       priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
+       priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+       priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+       iwl4965_set_ct_threshold(priv);
+
+       priv->hw_params.sens = &iwl4965_sensitivity;
+       priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
+
+       return 0;
+}
+
+static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
+{
+       s32 sign = 1;
+
+       if (num < 0) {
+               sign = -sign;
+               num = -num;
+       }
+       if (denom < 0) {
+               sign = -sign;
+               denom = -denom;
+       }
+       *res = 1;
+       *res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+       return 1;
+}
+
+/**
+ * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table index,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table index).
+ */
+static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
+                                           s32 current_voltage)
+{
+       s32 comp = 0;
+
+       if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
+           (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
+               return 0;
+
+       iwl4965_math_div_round(current_voltage - eeprom_voltage,
+                              TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
+
+       if (current_voltage > eeprom_voltage)
+               comp *= 2;
+       if ((comp < -2) || (comp > 2))
+               comp = 0;
+
+       return comp;
+}
+
+static s32 iwl4965_get_tx_atten_grp(u16 channel)
+{
+       if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
+           channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
+               return CALIB_CH_GROUP_5;
+
+       if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
+           channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
+               return CALIB_CH_GROUP_1;
+
+       if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
+           channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
+               return CALIB_CH_GROUP_2;
+
+       if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
+           channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
+               return CALIB_CH_GROUP_3;
+
+       if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
+           channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
+               return CALIB_CH_GROUP_4;
+
+       return -1;
+}
+
+static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
+{
+       s32 b = -1;
+
+       for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+               if (priv->calib_info->band_info[b].ch_from == 0)
+                       continue;
+
+               if ((channel >= priv->calib_info->band_info[b].ch_from)
+                   && (channel <= priv->calib_info->band_info[b].ch_to))
+                       break;
+       }
+
+       return b;
+}
+
+static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+       s32 val;
+
+       if (x2 == x1)
+               return y1;
+       else {
+               iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+               return val + y2;
+       }
+}
+
+/**
+ * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest.  Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
+                                   struct iwl_eeprom_calib_ch_info *chan_info)
+{
+       s32 s = -1;
+       u32 c;
+       u32 m;
+       const struct iwl_eeprom_calib_measure *m1;
+       const struct iwl_eeprom_calib_measure *m2;
+       struct iwl_eeprom_calib_measure *omeas;
+       u32 ch_i1;
+       u32 ch_i2;
+
+       s = iwl4965_get_sub_band(priv, channel);
+       if (s >= EEPROM_TX_POWER_BANDS) {
+               IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
+               return -1;
+       }
+
+       ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
+       ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
+       chan_info->ch_num = (u8) channel;
+
+       IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
+                         channel, s, ch_i1, ch_i2);
+
+       for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+               for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+                       m1 = &(priv->calib_info->band_info[s].ch1.
+                              measurements[c][m]);
+                       m2 = &(priv->calib_info->band_info[s].ch2.
+                              measurements[c][m]);
+                       omeas = &(chan_info->measurements[c][m]);
+
+                       omeas->actual_pow =
+                           (u8) iwl4965_interpolate_value(channel, ch_i1,
+                                                          m1->actual_pow,
+                                                          ch_i2,
+                                                          m2->actual_pow);
+                       omeas->gain_idx =
+                           (u8) iwl4965_interpolate_value(channel, ch_i1,
+                                                          m1->gain_idx, ch_i2,
+                                                          m2->gain_idx);
+                       omeas->temperature =
+                           (u8) iwl4965_interpolate_value(channel, ch_i1,
+                                                          m1->temperature,
+                                                          ch_i2,
+                                                          m2->temperature);
+                       omeas->pa_det =
+                           (s8) iwl4965_interpolate_value(channel, ch_i1,
+                                                          m1->pa_det, ch_i2,
+                                                          m2->pa_det);
+
+                       IWL_DEBUG_TXPOWER(priv,
+                               "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
+                               m1->actual_pow, m2->actual_pow, omeas->actual_pow);
+                       IWL_DEBUG_TXPOWER(priv,
+                               "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
+                               m1->gain_idx, m2->gain_idx, omeas->gain_idx);
+                       IWL_DEBUG_TXPOWER(priv,
+                               "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
+                               m1->pa_det, m2->pa_det, omeas->pa_det);
+                       IWL_DEBUG_TXPOWER(priv,
+                               "chain %d meas %d  T1=%d  T2=%d  T=%d\n", c, m,
+                               m1->temperature, m2->temperature,
+                               omeas->temperature);
+               }
+       }
+
+       return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
+       10                      /* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct iwl4965_txpower_comp_entry {
+       s32 degrees_per_05db_a;
+       s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+       {9, 2},                 /* group 0 5.2, ch  34-43 */
+       {4, 1},                 /* group 1 5.2, ch  44-70 */
+       {4, 1},                 /* group 2 5.2, ch  71-124 */
+       {4, 1},                 /* group 3 5.2, ch 125-200 */
+       {3, 1}                  /* group 4 2.4, ch   all */
+};
+
+static s32 get_min_power_index(s32 rate_power_index, u32 band)
+{
+       if (!band) {
+               if ((rate_power_index & 7) <= 4)
+                       return MIN_TX_GAIN_INDEX_52GHZ_EXT;
+       }
+       return MIN_TX_GAIN_INDEX;
+}
+
+struct gain_entry {
+       u8 dsp;
+       u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+       /* 5.2GHz power gain index table */
+       {
+        {123, 0x3F},           /* highest txpower */
+        {117, 0x3F},
+        {110, 0x3F},
+        {104, 0x3F},
+        {98, 0x3F},
+        {110, 0x3E},
+        {104, 0x3E},
+        {98, 0x3E},
+        {110, 0x3D},
+        {104, 0x3D},
+        {98, 0x3D},
+        {110, 0x3C},
+        {104, 0x3C},
+        {98, 0x3C},
+        {110, 0x3B},
+        {104, 0x3B},
+        {98, 0x3B},
+        {110, 0x3A},
+        {104, 0x3A},
+        {98, 0x3A},
+        {110, 0x39},
+        {104, 0x39},
+        {98, 0x39},
+        {110, 0x38},
+        {104, 0x38},
+        {98, 0x38},
+        {110, 0x37},
+        {104, 0x37},
+        {98, 0x37},
+        {110, 0x36},
+        {104, 0x36},
+        {98, 0x36},
+        {110, 0x35},
+        {104, 0x35},
+        {98, 0x35},
+        {110, 0x34},
+        {104, 0x34},
+        {98, 0x34},
+        {110, 0x33},
+        {104, 0x33},
+        {98, 0x33},
+        {110, 0x32},
+        {104, 0x32},
+        {98, 0x32},
+        {110, 0x31},
+        {104, 0x31},
+        {98, 0x31},
+        {110, 0x30},
+        {104, 0x30},
+        {98, 0x30},
+        {110, 0x25},
+        {104, 0x25},
+        {98, 0x25},
+        {110, 0x24},
+        {104, 0x24},
+        {98, 0x24},
+        {110, 0x23},
+        {104, 0x23},
+        {98, 0x23},
+        {110, 0x22},
+        {104, 0x18},
+        {98, 0x18},
+        {110, 0x17},
+        {104, 0x17},
+        {98, 0x17},
+        {110, 0x16},
+        {104, 0x16},
+        {98, 0x16},
+        {110, 0x15},
+        {104, 0x15},
+        {98, 0x15},
+        {110, 0x14},
+        {104, 0x14},
+        {98, 0x14},
+        {110, 0x13},
+        {104, 0x13},
+        {98, 0x13},
+        {110, 0x12},
+        {104, 0x08},
+        {98, 0x08},
+        {110, 0x07},
+        {104, 0x07},
+        {98, 0x07},
+        {110, 0x06},
+        {104, 0x06},
+        {98, 0x06},
+        {110, 0x05},
+        {104, 0x05},
+        {98, 0x05},
+        {110, 0x04},
+        {104, 0x04},
+        {98, 0x04},
+        {110, 0x03},
+        {104, 0x03},
+        {98, 0x03},
+        {110, 0x02},
+        {104, 0x02},
+        {98, 0x02},
+        {110, 0x01},
+        {104, 0x01},
+        {98, 0x01},
+        {110, 0x00},
+        {104, 0x00},
+        {98, 0x00},
+        {93, 0x00},
+        {88, 0x00},
+        {83, 0x00},
+        {78, 0x00},
+        },
+       /* 2.4GHz power gain index table */
+       {
+        {110, 0x3f},           /* highest txpower */
+        {104, 0x3f},
+        {98, 0x3f},
+        {110, 0x3e},
+        {104, 0x3e},
+        {98, 0x3e},
+        {110, 0x3d},
+        {104, 0x3d},
+        {98, 0x3d},
+        {110, 0x3c},
+        {104, 0x3c},
+        {98, 0x3c},
+        {110, 0x3b},
+        {104, 0x3b},
+        {98, 0x3b},
+        {110, 0x3a},
+        {104, 0x3a},
+        {98, 0x3a},
+        {110, 0x39},
+        {104, 0x39},
+        {98, 0x39},
+        {110, 0x38},
+        {104, 0x38},
+        {98, 0x38},
+        {110, 0x37},
+        {104, 0x37},
+        {98, 0x37},
+        {110, 0x36},
+        {104, 0x36},
+        {98, 0x36},
+        {110, 0x35},
+        {104, 0x35},
+        {98, 0x35},
+        {110, 0x34},
+        {104, 0x34},
+        {98, 0x34},
+        {110, 0x33},
+        {104, 0x33},
+        {98, 0x33},
+        {110, 0x32},
+        {104, 0x32},
+        {98, 0x32},
+        {110, 0x31},
+        {104, 0x31},
+        {98, 0x31},
+        {110, 0x30},
+        {104, 0x30},
+        {98, 0x30},
+        {110, 0x6},
+        {104, 0x6},
+        {98, 0x6},
+        {110, 0x5},
+        {104, 0x5},
+        {98, 0x5},
+        {110, 0x4},
+        {104, 0x4},
+        {98, 0x4},
+        {110, 0x3},
+        {104, 0x3},
+        {98, 0x3},
+        {110, 0x2},
+        {104, 0x2},
+        {98, 0x2},
+        {110, 0x1},
+        {104, 0x1},
+        {98, 0x1},
+        {110, 0x0},
+        {104, 0x0},
+        {98, 0x0},
+        {97, 0},
+        {96, 0},
+        {95, 0},
+        {94, 0},
+        {93, 0},
+        {92, 0},
+        {91, 0},
+        {90, 0},
+        {89, 0},
+        {88, 0},
+        {87, 0},
+        {86, 0},
+        {85, 0},
+        {84, 0},
+        {83, 0},
+        {82, 0},
+        {81, 0},
+        {80, 0},
+        {79, 0},
+        {78, 0},
+        {77, 0},
+        {76, 0},
+        {75, 0},
+        {74, 0},
+        {73, 0},
+        {72, 0},
+        {71, 0},
+        {70, 0},
+        {69, 0},
+        {68, 0},
+        {67, 0},
+        {66, 0},
+        {65, 0},
+        {64, 0},
+        {63, 0},
+        {62, 0},
+        {61, 0},
+        {60, 0},
+        {59, 0},
+        }
+};
+
+static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
+                                   u8 is_ht40, u8 ctrl_chan_high,
+                                   struct iwl4965_tx_power_db *tx_power_tbl)
+{
+       u8 saturation_power;
+       s32 target_power;
+       s32 user_target_power;
+       s32 power_limit;
+       s32 current_temp;
+       s32 reg_limit;
+       s32 current_regulatory;
+       s32 txatten_grp = CALIB_CH_GROUP_MAX;
+       int i;
+       int c;
+       const struct iwl_channel_info *ch_info = NULL;
+       struct iwl_eeprom_calib_ch_info ch_eeprom_info;
+       const struct iwl_eeprom_calib_measure *measurement;
+       s16 voltage;
+       s32 init_voltage;
+       s32 voltage_compensation;
+       s32 degrees_per_05db_num;
+       s32 degrees_per_05db_denom;
+       s32 factory_temp;
+       s32 temperature_comp[2];
+       s32 factory_gain_index[2];
+       s32 factory_actual_pwr[2];
+       s32 power_index;
+
+       /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+        *   are used for indexing into txpower table) */
+       user_target_power = 2 * priv->tx_power_user_lmt;
+
+       /* Get current (RXON) channel, band, width */
+       IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
+                         is_ht40);
+
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
+
+       if (!iwl_legacy_is_channel_valid(ch_info))
+               return -EINVAL;
+
+       /* get txatten group, used to select 1) thermal txpower adjustment
+        *   and 2) mimo txpower balance between Tx chains. */
+       txatten_grp = iwl4965_get_tx_atten_grp(channel);
+       if (txatten_grp < 0) {
+               IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
+                         channel);
+               return -EINVAL;
+       }
+
+       IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
+                         channel, txatten_grp);
+
+       if (is_ht40) {
+               if (ctrl_chan_high)
+                       channel -= 2;
+               else
+                       channel += 2;
+       }
+
+       /* hardware txpower limits ...
+        * saturation (clipping distortion) txpowers are in half-dBm */
+       if (band)
+               saturation_power = priv->calib_info->saturation_power24;
+       else
+               saturation_power = priv->calib_info->saturation_power52;
+
+       if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
+           saturation_power > IWL_TX_POWER_SATURATION_MAX) {
+               if (band)
+                       saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
+               else
+                       saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
+       }
+
+       /* regulatory txpower limits ... reg_limit values are in half-dBm,
+        *   max_power_avg values are in dBm, convert * 2 */
+       if (is_ht40)
+               reg_limit = ch_info->ht40_max_power_avg * 2;
+       else
+               reg_limit = ch_info->max_power_avg * 2;
+
+       if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
+           (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
+               if (band)
+                       reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
+               else
+                       reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
+       }
+
+       /* Interpolate txpower calibration values for this channel,
+        *   based on factory calibration tests on spaced channels. */
+       iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
+
+       /* calculate tx gain adjustment based on power supply voltage */
+       voltage = le16_to_cpu(priv->calib_info->voltage);
+       init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
+       voltage_compensation =
+           iwl4965_get_voltage_compensation(voltage, init_voltage);
+
+       IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
+                         init_voltage,
+                         voltage, voltage_compensation);
+
+       /* get current temperature (Celsius) */
+       current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
+       current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
+       current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+       /* select thermal txpower adjustment params, based on channel group
+        *   (same frequency group used for mimo txatten adjustment) */
+       degrees_per_05db_num =
+           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+       degrees_per_05db_denom =
+           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+       /* get per-chain txpower values from factory measurements */
+       for (c = 0; c < 2; c++) {
+               measurement = &ch_eeprom_info.measurements[c][1];
+
+               /* txgain adjustment (in half-dB steps) based on difference
+                *   between factory and current temperature */
+               factory_temp = measurement->temperature;
+               iwl4965_math_div_round((current_temp - factory_temp) *
+                                      degrees_per_05db_denom,
+                                      degrees_per_05db_num,
+                                      &temperature_comp[c]);
+
+               factory_gain_index[c] = measurement->gain_idx;
+               factory_actual_pwr[c] = measurement->actual_pow;
+
+               IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
+               IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
+                                 "curr tmp %d, comp %d steps\n",
+                                 factory_temp, current_temp,
+                                 temperature_comp[c]);
+
+               IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
+                                 factory_gain_index[c],
+                                 factory_actual_pwr[c]);
+       }
+
+       /* for each of 33 bit-rates (including 1 for CCK) */
+       for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
+               u8 is_mimo_rate;
+               union iwl4965_tx_power_dual_stream tx_power;
+
+               /* for mimo, reduce each chain's txpower by half
+                * (3dB, 6 steps), so total output power is regulatory
+                * compliant. */
+               if (i & 0x8) {
+                       current_regulatory = reg_limit -
+                           IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+                       is_mimo_rate = 1;
+               } else {
+                       current_regulatory = reg_limit;
+                       is_mimo_rate = 0;
+               }
+
+               /* find txpower limit, either hardware or regulatory */
+               power_limit = saturation_power - back_off_table[i];
+               if (power_limit > current_regulatory)
+                       power_limit = current_regulatory;
+
+               /* reduce user's txpower request if necessary
+                * for this rate on this channel */
+               target_power = user_target_power;
+               if (target_power > power_limit)
+                       target_power = power_limit;
+
+               IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
+                                 i, saturation_power - back_off_table[i],
+                                 current_regulatory, user_target_power,
+                                 target_power);
+
+               /* for each of 2 Tx chains (radio transmitters) */
+               for (c = 0; c < 2; c++) {
+                       s32 atten_value;
+
+                       if (is_mimo_rate)
+                               atten_value =
+                                   (s32)le32_to_cpu(priv->card_alive_init.
+                                   tx_atten[txatten_grp][c]);
+                       else
+                               atten_value = 0;
+
+                       /* calculate index; higher index means lower txpower */
+                       power_index = (u8) (factory_gain_index[c] -
+                                           (target_power -
+                                            factory_actual_pwr[c]) -
+                                           temperature_comp[c] -
+                                           voltage_compensation +
+                                           atten_value);
+
+/*                     IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
+                                               power_index); */
+
+                       if (power_index < get_min_power_index(i, band))
+                               power_index = get_min_power_index(i, band);
+
+                       /* adjust 5 GHz index to support negative indexes */
+                       if (!band)
+                               power_index += 9;
+
+                       /* CCK, rate 32, reduce txpower for CCK */
+                       if (i == POWER_TABLE_CCK_ENTRY)
+                               power_index +=
+                                   IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+                       /* stay within the table! */
+                       if (power_index > 107) {
+                               IWL_WARN(priv, "txpower index %d > 107\n",
+                                           power_index);
+                               power_index = 107;
+                       }
+                       if (power_index < 0) {
+                               IWL_WARN(priv, "txpower index %d < 0\n",
+                                           power_index);
+                               power_index = 0;
+                       }
+
+                       /* fill txpower command for this rate/chain */
+                       tx_power.s.radio_tx_gain[c] =
+                               gain_table[band][power_index].radio;
+                       tx_power.s.dsp_predis_atten[c] =
+                               gain_table[band][power_index].dsp;
+
+                       IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
+                                         "gain 0x%02x dsp %d\n",
+                                         c, atten_value, power_index,
+                                       tx_power.s.radio_tx_gain[c],
+                                       tx_power.s.dsp_predis_atten[c]);
+               } /* for each chain */
+
+               tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+       } /* for each rate */
+
+       return 0;
+}
+
+/**
+ * iwl4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from priv->tx_power_user_lmt.
+ */
+static int iwl4965_send_tx_power(struct iwl_priv *priv)
+{
+       struct iwl4965_txpowertable_cmd cmd = { 0 };
+       int ret;
+       u8 band = 0;
+       bool is_ht40 = false;
+       u8 ctrl_chan_high = 0;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+                     "TX Power requested while scanning!\n"))
+               return -EAGAIN;
+
+       band = priv->band == IEEE80211_BAND_2GHZ;
+
+       is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+       if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+               ctrl_chan_high = 1;
+
+       cmd.band = band;
+       cmd.channel = ctx->active.channel;
+
+       ret = iwl4965_fill_txpower_tbl(priv, band,
+                               le16_to_cpu(ctx->active.channel),
+                               is_ht40, ctrl_chan_high, &cmd.tx_power);
+       if (ret)
+               goto out;
+
+       ret = iwl_legacy_send_cmd_pdu(priv,
+                        REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
+
+out:
+       return ret;
+}
+
+static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
+                                  struct iwl_rxon_context *ctx)
+{
+       int ret = 0;
+       struct iwl4965_rxon_assoc_cmd rxon_assoc;
+       const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+       const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
+
+       if ((rxon1->flags == rxon2->flags) &&
+           (rxon1->filter_flags == rxon2->filter_flags) &&
+           (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+           (rxon1->ofdm_ht_single_stream_basic_rates ==
+            rxon2->ofdm_ht_single_stream_basic_rates) &&
+           (rxon1->ofdm_ht_dual_stream_basic_rates ==
+            rxon2->ofdm_ht_dual_stream_basic_rates) &&
+           (rxon1->rx_chain == rxon2->rx_chain) &&
+           (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+               IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
+               return 0;
+       }
+
+       rxon_assoc.flags = ctx->staging.flags;
+       rxon_assoc.filter_flags = ctx->staging.filter_flags;
+       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+       rxon_assoc.reserved = 0;
+       rxon_assoc.ofdm_ht_single_stream_basic_rates =
+           ctx->staging.ofdm_ht_single_stream_basic_rates;
+       rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+           ctx->staging.ofdm_ht_dual_stream_basic_rates;
+       rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+       ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+                                    sizeof(rxon_assoc), &rxon_assoc, NULL);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       /* cast away the const for active_rxon in this function */
+       struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
+       int ret;
+       bool new_assoc =
+               !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EBUSY;
+
+       if (!ctx->is_active)
+               return 0;
+
+       /* always get timestamp with Rx frame */
+       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+       ret = iwl_legacy_check_rxon_cmd(priv, ctx);
+       if (ret) {
+               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * receive commit_rxon request
+        * abort any previous channel switch if still in process
+        */
+       if (priv->switch_rxon.switch_in_progress &&
+           (priv->switch_rxon.channel != ctx->staging.channel)) {
+               IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+                     le16_to_cpu(priv->switch_rxon.channel));
+               iwl_legacy_chswitch_done(priv, false);
+       }
+
+       /* If we don't need to send a full RXON, we can use
+        * iwl_rxon_assoc_cmd which is used to reconfigure filter
+        * and other flags for the current radio configuration. */
+       if (!iwl_legacy_full_rxon_required(priv, ctx)) {
+               ret = iwl_legacy_send_rxon_assoc(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
+                       return ret;
+               }
+
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+               iwl_legacy_print_rx_config_cmd(priv, ctx);
+               return 0;
+       }
+
+       /* If we are currently associated and the new config requires
+        * an RXON_ASSOC and the new config wants the associated mask enabled,
+        * we must clear the associated from the active configuration
+        * before we apply the new config */
+       if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
+               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+                                      sizeof(struct iwl_legacy_rxon_cmd),
+                                      active_rxon);
+
+               /* If the mask clearing failed then we set
+                * active_rxon back to what it was previously */
+               if (ret) {
+                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+                       IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
+                       return ret;
+               }
+               iwl_legacy_clear_ucode_stations(priv, ctx);
+               iwl_legacy_restore_stations(priv, ctx);
+               ret = iwl4965_restore_default_wep_keys(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+                       return ret;
+               }
+       }
+
+       IWL_DEBUG_INFO(priv, "Sending RXON\n"
+                      "* with%s RXON_FILTER_ASSOC_MSK\n"
+                      "* channel = %d\n"
+                      "* bssid = %pM\n",
+                      (new_assoc ? "" : "out"),
+                      le16_to_cpu(ctx->staging.channel),
+                      ctx->staging.bssid_addr);
+
+       iwl_legacy_set_rxon_hwcrypto(priv, ctx,
+                               !priv->cfg->mod_params->sw_crypto);
+
+       /* Apply the new configuration
+        * RXON unassoc clears the station table in uCode so restoration of
+        * stations is needed after it (the RXON command) completes
+        */
+       if (!new_assoc) {
+               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+               iwl_legacy_clear_ucode_stations(priv, ctx);
+               iwl_legacy_restore_stations(priv, ctx);
+               ret = iwl4965_restore_default_wep_keys(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+                       return ret;
+               }
+       }
+       if (new_assoc) {
+               priv->start_calib = 0;
+               /* Apply the new configuration
+                * RXON assoc doesn't clear the station table in uCode,
+                */
+               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+       }
+       iwl_legacy_print_rx_config_cmd(priv, ctx);
+
+       iwl4965_init_sensitivity(priv);
+
+       /* If we issue a new RXON command which required a tune then we must
+        * send a new TXPOWER command or we won't be able to Tx any frames */
+       ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
+       if (ret) {
+               IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
+                                    struct ieee80211_channel_switch *ch_switch)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       int rc;
+       u8 band = 0;
+       bool is_ht40 = false;
+       u8 ctrl_chan_high = 0;
+       struct iwl4965_channel_switch_cmd cmd;
+       const struct iwl_channel_info *ch_info;
+       u32 switch_time_in_usec, ucode_switch_time;
+       u16 ch;
+       u32 tsf_low;
+       u8 switch_count;
+       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+       struct ieee80211_vif *vif = ctx->vif;
+       band = priv->band == IEEE80211_BAND_2GHZ;
+
+       is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+       if (is_ht40 &&
+           (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+               ctrl_chan_high = 1;
+
+       cmd.band = band;
+       cmd.expect_beacon = 0;
+       ch = ch_switch->channel->hw_value;
+       cmd.channel = cpu_to_le16(ch);
+       cmd.rxon_flags = ctx->staging.flags;
+       cmd.rxon_filter_flags = ctx->staging.filter_flags;
+       switch_count = ch_switch->count;
+       tsf_low = ch_switch->timestamp & 0x0ffffffff;
+       /*
+        * calculate the ucode channel switch time
+        * adding TSF as one of the factor for when to switch
+        */
+       if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+               if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+                   beacon_interval)) {
+                       switch_count -= (priv->ucode_beacon_time -
+                               tsf_low) / beacon_interval;
+               } else
+                       switch_count = 0;
+       }
+       if (switch_count <= 1)
+               cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+       else {
+               switch_time_in_usec =
+                       vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+               ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
+                                                        switch_time_in_usec,
+                                                        beacon_interval);
+               cmd.switch_time = iwl_legacy_add_beacon_time(priv,
+                                                     priv->ucode_beacon_time,
+                                                     ucode_switch_time,
+                                                     beacon_interval);
+       }
+       IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+                     cmd.switch_time);
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
+       if (ch_info)
+               cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
+       else {
+               IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+                       ctx->active.channel, ch);
+               return -EFAULT;
+       }
+
+       rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
+                                     ctrl_chan_high, &cmd.tx_power);
+       if (rc) {
+               IWL_DEBUG_11H(priv, "error:%d  fill txpower_tbl\n", rc);
+               return rc;
+       }
+
+       priv->switch_rxon.channel = cmd.channel;
+       priv->switch_rxon.switch_in_progress = true;
+
+       return iwl_legacy_send_cmd_pdu(priv,
+                        REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+                                           struct iwl_tx_queue *txq,
+                                           u16 byte_cnt)
+{
+       struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+       int txq_id = txq->q.id;
+       int write_ptr = txq->q.write_ptr;
+       int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+       __le16 bc_ent;
+
+       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+       bc_ent = cpu_to_le16(len & 0xFFF);
+       /* Set up byte count within first 256 entries */
+       scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+       /* If within first 64 entries, duplicate at end */
+       if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+               scd_bc_tbl[txq_id].
+                       tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+/**
+ * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @statistics: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the statistics
+ */
+static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
+{
+       s32 temperature;
+       s32 vt;
+       s32 R1, R2, R3;
+       u32 R4;
+
+       if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
+           (priv->_4965.statistics.flag &
+                       STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
+               IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
+               R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
+               R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
+               R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
+               R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
+       } else {
+               IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
+               R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
+               R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
+               R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
+               R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
+       }
+
+       /*
+        * Temperature is only 23 bits, so sign extend out to 32.
+        *
+        * NOTE If we haven't received a statistics notification yet
+        * with an updated temperature, use R4 provided to us in the
+        * "initialize" ALIVE response.
+        */
+       if (!test_bit(STATUS_TEMPERATURE, &priv->status))
+               vt = sign_extend32(R4, 23);
+       else
+               vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
+                                general.common.temperature), 23);
+
+       IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+       if (R3 == R1) {
+               IWL_ERR(priv, "Calibration conflict R1 == R3\n");
+               return -1;
+       }
+
+       /* Calculate temperature in degrees Kelvin, adjust by 97%.
+        * Add offset to center the adjustment around 0 degrees Centigrade. */
+       temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+       temperature /= (R3 - R1);
+       temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+       IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
+                       temperature, KELVIN_TO_CELSIUS(temperature));
+
+       return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IWL_TEMPERATURE_THRESHOLD   3
+
+/**
+ * iwl4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace priv->last_temperature once calibration
+ * executed.
+ */
+static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
+{
+       int temp_diff;
+
+       if (!test_bit(STATUS_STATISTICS, &priv->status)) {
+               IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
+               return 0;
+       }
+
+       temp_diff = priv->temperature - priv->last_temperature;
+
+       /* get absolute value */
+       if (temp_diff < 0) {
+               IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
+               temp_diff = -temp_diff;
+       } else if (temp_diff == 0)
+               IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
+       else
+               IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
+
+       if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
+               IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
+               return 0;
+       }
+
+       IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
+
+       return 1;
+}
+
+static void iwl4965_temperature_calib(struct iwl_priv *priv)
+{
+       s32 temp;
+
+       temp = iwl4965_hw_get_temperature(priv);
+       if (temp < 0)
+               return;
+
+       if (priv->temperature != temp) {
+               if (priv->temperature)
+                       IWL_DEBUG_TEMP(priv, "Temperature changed "
+                                      "from %dC to %dC\n",
+                                      KELVIN_TO_CELSIUS(priv->temperature),
+                                      KELVIN_TO_CELSIUS(temp));
+               else
+                       IWL_DEBUG_TEMP(priv, "Temperature "
+                                      "initialized to %dC\n",
+                                      KELVIN_TO_CELSIUS(temp));
+       }
+
+       priv->temperature = temp;
+       set_bit(STATUS_TEMPERATURE, &priv->status);
+
+       if (!priv->disable_tx_power_cal &&
+            unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+            iwl4965_is_temp_calib_needed(priv))
+               queue_work(priv->workqueue, &priv->txpower_work);
+}
+
+static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+       switch (cmd_id) {
+       case REPLY_RXON:
+               return (u16) sizeof(struct iwl4965_rxon_cmd);
+       default:
+               return len;
+       }
+}
+
+static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+                                                               u8 *data)
+{
+       struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
+       addsta->mode = cmd->mode;
+       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+       memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
+       addsta->station_flags = cmd->station_flags;
+       addsta->station_flags_msk = cmd->station_flags_msk;
+       addsta->tid_disable_tx = cmd->tid_disable_tx;
+       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+       addsta->sleep_tx_count = cmd->sleep_tx_count;
+       addsta->reserved1 = cpu_to_le16(0);
+       addsta->reserved2 = cpu_to_le16(0);
+
+       return (u16)sizeof(struct iwl4965_addsta_cmd);
+}
+
+static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
+{
+       return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+/**
+ * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
+                                     struct iwl_ht_agg *agg,
+                                     struct iwl4965_tx_resp *tx_resp,
+                                     int txq_id, u16 start_idx)
+{
+       u16 status;
+       struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+       struct ieee80211_tx_info *info = NULL;
+       struct ieee80211_hdr *hdr = NULL;
+       u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+       int i, sh, idx;
+       u16 seq;
+       if (agg->wait_for_ba)
+               IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
+
+       agg->frame_count = tx_resp->frame_count;
+       agg->start_idx = start_idx;
+       agg->rate_n_flags = rate_n_flags;
+       agg->bitmap = 0;
+
+       /* num frames attempted by Tx command */
+       if (agg->frame_count == 1) {
+               /* Only one frame was attempted; no block-ack will arrive */
+               status = le16_to_cpu(frame_status[0].status);
+               idx = start_idx;
+
+               IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
+                                  agg->frame_count, agg->start_idx, idx);
+
+               info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
+               info->status.rates[0].count = tx_resp->failure_frame + 1;
+               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+               info->flags |= iwl4965_tx_status_to_mac80211(status);
+               iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
+
+               IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
+                                   status & 0xff, tx_resp->failure_frame);
+               IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+               agg->wait_for_ba = 0;
+       } else {
+               /* Two or more frames were attempted; expect block-ack */
+               u64 bitmap = 0;
+               int start = agg->start_idx;
+
+               /* Construct bit-map of pending frames within Tx window */
+               for (i = 0; i < agg->frame_count; i++) {
+                       u16 sc;
+                       status = le16_to_cpu(frame_status[i].status);
+                       seq  = le16_to_cpu(frame_status[i].sequence);
+                       idx = SEQ_TO_INDEX(seq);
+                       txq_id = SEQ_TO_QUEUE(seq);
+
+                       if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+                                     AGG_TX_STATE_ABORT_MSK))
+                               continue;
+
+                       IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
+                                          agg->frame_count, txq_id, idx);
+
+                       hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
+                       if (!hdr) {
+                               IWL_ERR(priv,
+                                       "BUG_ON idx doesn't point to valid skb"
+                                       " idx=%d, txq_id=%d\n", idx, txq_id);
+                               return -1;
+                       }
+
+                       sc = le16_to_cpu(hdr->seq_ctrl);
+                       if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+                               IWL_ERR(priv,
+                                       "BUG_ON idx doesn't match seq control"
+                                       " idx=%d, seq_idx=%d, seq=%d\n",
+                                       idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
+                               return -1;
+                       }
+
+                       IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
+                                          i, idx, SEQ_TO_SN(sc));
+
+                       sh = idx - start;
+                       if (sh > 64) {
+                               sh = (start - idx) + 0xff;
+                               bitmap = bitmap << sh;
+                               sh = 0;
+                               start = idx;
+                       } else if (sh < -64)
+                               sh  = 0xff - (start - idx);
+                       else if (sh < 0) {
+                               sh = start - idx;
+                               start = idx;
+                               bitmap = bitmap << sh;
+                               sh = 0;
+                       }
+                       bitmap |= 1ULL << sh;
+                       IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
+                                          start, (unsigned long long)bitmap);
+               }
+
+               agg->bitmap = bitmap;
+               agg->start_idx = start;
+               IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
+                                  agg->frame_count, agg->start_idx,
+                                  (unsigned long long)agg->bitmap);
+
+               if (bitmap)
+                       agg->wait_for_ba = 1;
+       }
+       return 0;
+}
+
+static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
+{
+       int i;
+       int start = 0;
+       int ret = IWL_INVALID_STATION;
+       unsigned long flags;
+
+       if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
+               start = IWL_STA_ID;
+
+       if (is_broadcast_ether_addr(addr))
+               return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       for (i = start; i < priv->hw_params.max_stations; i++)
+               if (priv->stations[i].used &&
+                   (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+                                        addr))) {
+                       ret = i;
+                       goto out;
+               }
+
+       IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
+                             addr, priv->num_stations);
+
+ out:
+       /*
+        * It may be possible that more commands interacting with stations
+        * arrive before we completed processing the adding of
+        * station
+        */
+       if (ret != IWL_INVALID_STATION &&
+           (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
+            ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
+             (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
+               IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
+                       ret);
+               ret = IWL_INVALID_STATION;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       return ret;
+}
+
+static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+{
+       if (priv->iw_mode == NL80211_IFTYPE_STATION) {
+               return IWL_AP_ID;
+       } else {
+               u8 *da = ieee80211_get_DA(hdr);
+               return iwl4965_find_station(priv, da);
+       }
+}
+
+/**
+ * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
+ */
+static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int index = SEQ_TO_INDEX(sequence);
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_tx_info *info;
+       struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+       u32  status = le32_to_cpu(tx_resp->u.status);
+       int uninitialized_var(tid);
+       int sta_id;
+       int freed;
+       u8 *qc = NULL;
+       unsigned long flags;
+
+       if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
+               IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
+                         "is out of range [0-%d] %d %d\n", txq_id,
+                         index, txq->q.n_bd, txq->q.write_ptr,
+                         txq->q.read_ptr);
+               return;
+       }
+
+       txq->time_stamp = jiffies;
+       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+       memset(&info->status, 0, sizeof(info->status));
+
+       hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       }
+
+       sta_id = iwl4965_get_ra_sta_id(priv, hdr);
+       if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
+               IWL_ERR(priv, "Station not known\n");
+               return;
+       }
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       if (txq->sched_retry) {
+               const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
+               struct iwl_ht_agg *agg = NULL;
+               WARN_ON(!qc);
+
+               agg = &priv->stations[sta_id].tid[tid].agg;
+
+               iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
+
+               /* check if BAR is needed */
+               if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
+                       info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+               if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+                       index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
+                                                               txq->q.n_bd);
+                       IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
+                                          "%d index %d\n", scd_ssn , index);
+                       freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
+                       if (qc)
+                               iwl4965_free_tfds_in_queue(priv, sta_id,
+                                                      tid, freed);
+
+                       if (priv->mac80211_registered &&
+                           (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
+                                && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+                               iwl_legacy_wake_queue(priv, txq);
+               }
+       } else {
+               info->status.rates[0].count = tx_resp->failure_frame + 1;
+               info->flags |= iwl4965_tx_status_to_mac80211(status);
+               iwl4965_hwrate_to_tx_control(priv,
+                                       le32_to_cpu(tx_resp->rate_n_flags),
+                                       info);
+
+               IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
+                                  "rate_n_flags 0x%x retries %d\n",
+                                  txq_id,
+                                  iwl4965_get_tx_fail_reason(status), status,
+                                  le32_to_cpu(tx_resp->rate_n_flags),
+                                  tx_resp->failure_frame);
+
+               freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
+               if (qc && likely(sta_id != IWL_INVALID_STATION))
+                       iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
+               else if (sta_id == IWL_INVALID_STATION)
+                       IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
+
+               if (priv->mac80211_registered &&
+                   (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
+                       iwl_legacy_wake_queue(priv, txq);
+       }
+       if (qc && likely(sta_id != IWL_INVALID_STATION))
+               iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
+
+       iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
+       u8 rate __maybe_unused =
+               iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+               "tsf:0x%.8x%.8x rate:%d\n",
+               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+               beacon->beacon_notify_hdr.failure_frame,
+               le32_to_cpu(beacon->ibss_mgr_status),
+               le32_to_cpu(beacon->high_tsf),
+               le32_to_cpu(beacon->low_tsf), rate);
+
+       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
+{
+       /* Legacy Rx frames */
+       priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
+       /* Tx response */
+       priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
+       priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+}
+
+static struct iwl_hcmd_ops iwl4965_hcmd = {
+       .rxon_assoc = iwl4965_send_rxon_assoc,
+       .commit_rxon = iwl4965_commit_rxon,
+       .set_rxon_chain = iwl4965_set_rxon_chain,
+};
+
+static void iwl4965_post_scan(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       /*
+        * Since setting the RXON may have been deferred while
+        * performing the scan, fire one off if needed
+        */
+       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+               iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static void iwl4965_post_associate(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif = ctx->vif;
+       struct ieee80211_conf *conf = NULL;
+       int ret = 0;
+
+       if (!vif || !priv->is_open)
+               return;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+
+       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwl_legacy_commit_rxon(priv, ctx);
+
+       ret = iwl_legacy_send_rxon_timing(priv, ctx);
+       if (ret)
+               IWL_WARN(priv, "RXON timing - "
+                           "Attempting to continue.\n");
+
+       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+       iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
+                       vif->bss_conf.aid, vif->bss_conf.beacon_int);
+
+       if (vif->bss_conf.use_short_preamble)
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+               if (vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+       }
+
+       iwl_legacy_commit_rxon(priv, ctx);
+
+       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+                       vif->bss_conf.aid, ctx->active.bssid_addr);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               iwl4965_send_beacon_cmd(priv);
+               break;
+       default:
+               IWL_ERR(priv, "%s Should not be called in %d mode\n",
+                         __func__, vif->type);
+               break;
+       }
+
+       /* the chain noise calibration will enabled PM upon completion
+        * If chain noise has already been run, then we need to enable
+        * power management here */
+       if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+               iwl_legacy_power_update_mode(priv, false);
+
+       /* Enable Rx differential gain and sensitivity calibrations */
+       iwl4965_chain_noise_reset(priv);
+       priv->start_calib = 1;
+}
+
+static void iwl4965_config_ap(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif = ctx->vif;
+       int ret = 0;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       /* The following should be done only at AP bring up */
+       if (!iwl_legacy_is_associated_ctx(ctx)) {
+
+               /* RXON - unassoc (to set timing command) */
+               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+               iwl_legacy_commit_rxon(priv, ctx);
+
+               /* RXON Timing */
+               ret = iwl_legacy_send_rxon_timing(priv, ctx);
+               if (ret)
+                       IWL_WARN(priv, "RXON timing failed - "
+                                       "Attempting to continue.\n");
+
+               /* AP has all antennas */
+               priv->chain_noise_data.active_chains =
+                       priv->hw_params.valid_rx_ant;
+               iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+               ctx->staging.assoc_id = 0;
+
+               if (vif->bss_conf.use_short_preamble)
+                       ctx->staging.flags |=
+                               RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &=
+                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+                       if (vif->bss_conf.use_short_slot)
+                               ctx->staging.flags |=
+                                       RXON_FLG_SHORT_SLOT_MSK;
+                       else
+                               ctx->staging.flags &=
+                                       ~RXON_FLG_SHORT_SLOT_MSK;
+               }
+               /* need to send beacon cmd before committing assoc RXON! */
+               iwl4965_send_beacon_cmd(priv);
+               /* restore RXON assoc */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               iwl_legacy_commit_rxon(priv, ctx);
+       }
+       iwl4965_send_beacon_cmd(priv);
+}
+
+static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
+       .get_hcmd_size = iwl4965_get_hcmd_size,
+       .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
+       .request_scan = iwl4965_request_scan,
+       .post_scan = iwl4965_post_scan,
+};
+
+static struct iwl_lib_ops iwl4965_lib = {
+       .set_hw_params = iwl4965_hw_set_hw_params,
+       .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
+       .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = iwl4965_hw_txq_free_tfd,
+       .txq_init = iwl4965_hw_tx_queue_init,
+       .rx_handler_setup = iwl4965_rx_handler_setup,
+       .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
+       .init_alive_start = iwl4965_init_alive_start,
+       .load_ucode = iwl4965_load_bsm,
+       .dump_nic_event_log = iwl4965_dump_nic_event_log,
+       .dump_nic_error_log = iwl4965_dump_nic_error_log,
+       .dump_fh = iwl4965_dump_fh,
+       .set_channel_switch = iwl4965_hw_channel_switch,
+       .apm_ops = {
+               .init = iwl_legacy_apm_init,
+               .config = iwl4965_nic_config,
+       },
+       .eeprom_ops = {
+               .regulatory_bands = {
+                       EEPROM_REGULATORY_BAND_1_CHANNELS,
+                       EEPROM_REGULATORY_BAND_2_CHANNELS,
+                       EEPROM_REGULATORY_BAND_3_CHANNELS,
+                       EEPROM_REGULATORY_BAND_4_CHANNELS,
+                       EEPROM_REGULATORY_BAND_5_CHANNELS,
+                       EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+                       EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
+               },
+               .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
+               .release_semaphore = iwl4965_eeprom_release_semaphore,
+       },
+       .send_tx_power  = iwl4965_send_tx_power,
+       .update_chain_flags = iwl4965_update_chain_flags,
+       .temp_ops = {
+               .temperature = iwl4965_temperature_calib,
+       },
+       .debugfs_ops = {
+               .rx_stats_read = iwl4965_ucode_rx_stats_read,
+               .tx_stats_read = iwl4965_ucode_tx_stats_read,
+               .general_stats_read = iwl4965_ucode_general_stats_read,
+       },
+       .check_plcp_health = iwl4965_good_plcp_health,
+};
+
+static const struct iwl_legacy_ops iwl4965_legacy_ops = {
+       .post_associate = iwl4965_post_associate,
+       .config_ap = iwl4965_config_ap,
+       .manage_ibss_station = iwl4965_manage_ibss_station,
+       .update_bcast_stations = iwl4965_update_bcast_stations,
+};
+
+struct ieee80211_ops iwl4965_hw_ops = {
+       .tx = iwl4965_mac_tx,
+       .start = iwl4965_mac_start,
+       .stop = iwl4965_mac_stop,
+       .add_interface = iwl_legacy_mac_add_interface,
+       .remove_interface = iwl_legacy_mac_remove_interface,
+       .change_interface = iwl_legacy_mac_change_interface,
+       .config = iwl_legacy_mac_config,
+       .configure_filter = iwl4965_configure_filter,
+       .set_key = iwl4965_mac_set_key,
+       .update_tkip_key = iwl4965_mac_update_tkip_key,
+       .conf_tx = iwl_legacy_mac_conf_tx,
+       .reset_tsf = iwl_legacy_mac_reset_tsf,
+       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
+       .ampdu_action = iwl4965_mac_ampdu_action,
+       .hw_scan = iwl_legacy_mac_hw_scan,
+       .sta_add = iwl4965_mac_sta_add,
+       .sta_remove = iwl_legacy_mac_sta_remove,
+       .channel_switch = iwl4965_mac_channel_switch,
+       .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
+};
+
+static const struct iwl_ops iwl4965_ops = {
+       .lib = &iwl4965_lib,
+       .hcmd = &iwl4965_hcmd,
+       .utils = &iwl4965_hcmd_utils,
+       .led = &iwl4965_led_ops,
+       .legacy = &iwl4965_legacy_ops,
+       .ieee80211_ops = &iwl4965_hw_ops,
+};
+
+static struct iwl_base_params iwl4965_base_params = {
+       .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
+       .num_of_queues = IWL49_NUM_QUEUES,
+       .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
+       .pll_cfg_val = 0,
+       .set_l0s = true,
+       .use_bsm = true,
+       .led_compensation = 61,
+       .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .wd_timeout = IWL_DEF_WD_TIMEOUT,
+       .temperature_kelvin = true,
+       .max_event_log_size = 512,
+       .ucode_tracing = true,
+       .sensitivity_calib_by_driver = true,
+       .chain_noise_calib_by_driver = true,
+};
+
+struct iwl_cfg iwl4965_cfg = {
+       .name = "Intel(R) Wireless WiFi Link 4965AGN",
+       .fw_name_pre = IWL4965_FW_PRE,
+       .ucode_api_max = IWL4965_UCODE_API_MAX,
+       .ucode_api_min = IWL4965_UCODE_API_MIN,
+       .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+       .valid_tx_ant = ANT_AB,
+       .valid_rx_ant = ANT_ABC,
+       .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+       .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+       .ops = &iwl4965_ops,
+       .mod_params = &iwl4965_mod_params,
+       .base_params = &iwl4965_base_params,
+       .led_mode = IWL_LED_BLINK,
+       /*
+        * Force use of chains B and C for scan RX on 5 GHz band
+        * because the device has off-channel reception on chain A.
+        */
+       .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644 (file)
index 0000000..01f8163
--- /dev/null
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_4965_h__
+#define __iwl_4965_h__
+
+#include "iwl-dev.h"
+
+/* configuration for the _4965 devices */
+extern struct iwl_cfg iwl4965_cfg;
+
+extern struct iwl_mod_params iwl4965_mod_params;
+
+extern struct ieee80211_ops iwl4965_hw_ops;
+
+/* tx queue */
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+                           int sta_id, int tid, int freed);
+
+/* RXON */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx);
+
+/* uCode */
+int iwl4965_verify_ucode(struct iwl_priv *priv);
+
+/* lib */
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+                           u8 frame_count, u32 status);
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_hw_nic_init(struct iwl_priv *priv);
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
+
+/* rx */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv);
+void iwl4965_rx_replenish(struct iwl_priv *priv);
+void iwl4965_rx_replenish_now(struct iwl_priv *priv);
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rxq_stop(struct iwl_priv *priv);
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+                    struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+                        struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_handle(struct iwl_priv *priv);
+
+/* tx */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                struct iwl_tx_queue *txq,
+                                dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+                        struct iwl_tx_queue *txq);
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+                             struct ieee80211_tx_info *info);
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta, u16 tid);
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+                          int sta_id, u8 tid, int txq_id);
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb);
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
+/**
+ * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE:  Acquire priv->lock before calling this function !
+ */
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq,
+                                       int tx_fifo_id, int scd_retry);
+
+static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
+{
+       status &= TX_STATUS_MSK;
+
+       switch (status) {
+       case TX_STATUS_SUCCESS:
+       case TX_STATUS_DIRECT_DONE:
+               return IEEE80211_TX_STAT_ACK;
+       case TX_STATUS_FAIL_DEST_PS:
+               return IEEE80211_TX_STAT_TX_FILTERED;
+       default:
+               return 0;
+       }
+}
+
+static inline bool iwl4965_is_tx_success(u32 status)
+{
+       status &= TX_STATUS_MSK;
+       return (status == TX_STATUS_SUCCESS) ||
+              (status == TX_STATUS_DIRECT_DONE);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
+
+/* rx */
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb);
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
+                         struct iwl_rx_packet *pkt);
+void iwl4965_rx_statistics(struct iwl_priv *priv,
+                      struct iwl_rx_mem_buffer *rxb);
+void iwl4965_reply_statistics(struct iwl_priv *priv,
+                         struct iwl_rx_mem_buffer *rxb);
+
+/* scan */
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+                              struct ieee80211_vif *vif, bool add);
+
+/* hcmd */
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+iwl4965_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+
+/* station management */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx);
+int iwl4965_add_bssid_station(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx,
+                            const u8 *addr, u8 *sta_id_r);
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx,
+                              struct ieee80211_key_conf *key);
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_key_conf *key);
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+                                struct iwl_rxon_context *ctx);
+int iwl4965_set_dynamic_key(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       struct ieee80211_key_conf *key, u8 sta_id);
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       struct ieee80211_key_conf *key, u8 sta_id);
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+                        struct iwl_rxon_context *ctx,
+                        struct ieee80211_key_conf *keyconf,
+                        struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
+                       int sta_id, int tid);
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                        int tid, u16 ssn);
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                       int tid);
+void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
+                       int sta_id, int cnt);
+int iwl4965_update_bcast_stations(struct iwl_priv *priv);
+
+/* rate */
+static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
+{
+       return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
+{
+       return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+       return cpu_to_le32(flags|(u32)rate);
+}
+
+/* eeprom */
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
+int  iwl4965_eeprom_check_version(struct iwl_priv *priv);
+
+/* mac80211 handlers (for 4965) */
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwl4965_mac_start(struct ieee80211_hw *hw);
+void iwl4965_mac_stop(struct ieee80211_hw *hw);
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast);
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key);
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta,
+                               u32 iv32, u16 *phase1key);
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size);
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch);
+
+#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644 (file)
index 0000000..17a1d50
--- /dev/null
@@ -0,0 +1,3405 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-commands.h) only for uCode API definitions.
+ * Please use iwl-xxxx-hw.h for hardware-related definitions.
+ * Please use iwl-dev.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_legacy_commands_h__
+#define __iwl_legacy_commands_h__
+
+struct iwl_priv;
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver)   (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver)     (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver)  ((ver) & 0x000000FF)
+
+
+/* Tx rates */
+#define IWL_CCK_RATES  4
+#define IWL_OFDM_RATES 8
+#define IWL_MAX_RATES  (IWL_CCK_RATES + IWL_OFDM_RATES)
+
+enum {
+       REPLY_ALIVE = 0x1,
+       REPLY_ERROR = 0x2,
+
+       /* RXON and QOS commands */
+       REPLY_RXON = 0x10,
+       REPLY_RXON_ASSOC = 0x11,
+       REPLY_QOS_PARAM = 0x13,
+       REPLY_RXON_TIMING = 0x14,
+
+       /* Multi-Station support */
+       REPLY_ADD_STA = 0x18,
+       REPLY_REMOVE_STA = 0x19,
+
+       /* Security */
+       REPLY_WEPKEY = 0x20,
+
+       /* RX, TX, LEDs */
+       REPLY_3945_RX = 0x1b,           /* 3945 only */
+       REPLY_TX = 0x1c,
+       REPLY_RATE_SCALE = 0x47,        /* 3945 only */
+       REPLY_LEDS_CMD = 0x48,
+       REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+
+       /* 802.11h related */
+       REPLY_CHANNEL_SWITCH = 0x72,
+       CHANNEL_SWITCH_NOTIFICATION = 0x73,
+       REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
+       SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+
+       /* Power Management */
+       POWER_TABLE_CMD = 0x77,
+       PM_SLEEP_NOTIFICATION = 0x7A,
+       PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+
+       /* Scan commands and notifications */
+       REPLY_SCAN_CMD = 0x80,
+       REPLY_SCAN_ABORT_CMD = 0x81,
+       SCAN_START_NOTIFICATION = 0x82,
+       SCAN_RESULTS_NOTIFICATION = 0x83,
+       SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+       /* IBSS/AP commands */
+       BEACON_NOTIFICATION = 0x90,
+       REPLY_TX_BEACON = 0x91,
+
+       /* Miscellaneous commands */
+       REPLY_TX_PWR_TABLE_CMD = 0x97,
+
+       /* Bluetooth device coexistence config command */
+       REPLY_BT_CONFIG = 0x9b,
+
+       /* Statistics */
+       REPLY_STATISTICS_CMD = 0x9c,
+       STATISTICS_NOTIFICATION = 0x9d,
+
+       /* RF-KILL commands and notifications */
+       CARD_STATE_NOTIFICATION = 0xa1,
+
+       /* Missed beacons notification */
+       MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+       REPLY_CT_KILL_CONFIG_CMD = 0xa4,
+       SENSITIVITY_CMD = 0xa8,
+       REPLY_PHY_CALIBRATION_CMD = 0xb0,
+       REPLY_RX_PHY_CMD = 0xc0,
+       REPLY_RX_MPDU_CMD = 0xc1,
+       REPLY_RX = 0xc3,
+       REPLY_COMPRESSED_BA = 0xc5,
+
+       REPLY_MAX = 0xff
+};
+
+/******************************************************************************
+ * (0)
+ * Commonly used structures and definitions:
+ * Command header, rate_n_flags, txpower
+ *
+ *****************************************************************************/
+
+/* iwl_cmd_header flags value */
+#define IWL_CMD_FAILED_MSK 0x40
+
+#define SEQ_TO_QUEUE(s)        (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q)        (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s)        ((s) & 0xff)
+#define INDEX_TO_SEQ(i)        ((i) & 0xff)
+#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
+#define SEQ_RX_FRAME   cpu_to_le16(0x8000)
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+       u8 cmd;         /* Command ID:  REPLY_RXON, etc. */
+       u8 flags;       /* 0:5 reserved, 6 abort, 7 internal */
+       /*
+        * The driver sets up the sequence number to values of its choosing.
+        * uCode does not use this value, but passes it back to the driver
+        * when sending the response to each driver-originated command, so
+        * the driver can match the response to the command.  Since the values
+        * don't get used by uCode, the driver may set up an arbitrary format.
+        *
+        * There is one exception:  uCode sets bit 15 when it originates
+        * the response/notification, i.e. when the response/notification
+        * is not a direct response to a command sent by the driver.  For
+        * example, uCode issues REPLY_3945_RX when it sends a received frame
+        * to the driver; it is not a direct response to any driver command.
+        *
+        * The Linux driver uses the following format:
+        *
+        *  0:7         tfd index - position within TX queue
+        *  8:12        TX queue id
+        *  13          reserved
+        *  14          huge - driver sets this to indicate command is in the
+        *              'huge' storage at the end of the command buffers
+        *  15          unsolicited RX or uCode-originated notification
+       */
+       __le16 sequence;
+
+       /* command or response/notification data follows immediately */
+       u8 data[0];
+} __packed;
+
+
+/**
+ * struct iwl3945_tx_power
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ */
+struct iwl3945_tx_power {
+       u8 tx_gain;             /* gain for analog radio */
+       u8 dsp_atten;           /* gain for DSP */
+} __packed;
+
+/**
+ * struct iwl3945_power_per_rate
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl3945_power_per_rate {
+       u8 rate;                /* plcp */
+       struct iwl3945_tx_power tpc;
+       u8 reserved;
+} __packed;
+
+/**
+ * iwl4965 rate_n_flags bit fields
+ *
+ * rate_n_flags format is used in following iwl4965 commands:
+ *  REPLY_RX (response only)
+ *  REPLY_RX_MPDU (response only)
+ *  REPLY_TX (both command and response)
+ *  REPLY_TX_LINK_QUALITY_CMD
+ *
+ * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
+ *  2-0:  0)   6 Mbps
+ *        1)  12 Mbps
+ *        2)  18 Mbps
+ *        3)  24 Mbps
+ *        4)  36 Mbps
+ *        5)  48 Mbps
+ *        6)  54 Mbps
+ *        7)  60 Mbps
+ *
+ *  4-3:  0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ *
+ *    5:  Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *
+ * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
+ *  3-0:  0xD)   6 Mbps
+ *        0xF)   9 Mbps
+ *        0x5)  12 Mbps
+ *        0x7)  18 Mbps
+ *        0x9)  24 Mbps
+ *        0xB)  36 Mbps
+ *        0x1)  48 Mbps
+ *        0x3)  54 Mbps
+ *
+ * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
+ *  6-0:   10)  1 Mbps
+ *         20)  2 Mbps
+ *         55)  5.5 Mbps
+ *        110)  11 Mbps
+ */
+#define RATE_MCS_CODE_MSK 0x7
+#define RATE_MCS_SPATIAL_POS 3
+#define RATE_MCS_SPATIAL_MSK 0x18
+#define RATE_MCS_HT_DUP_POS 5
+#define RATE_MCS_HT_DUP_MSK 0x20
+
+/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
+#define RATE_MCS_FLAGS_POS 8
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK 0x100
+
+/* Bit 9: (1) CCK, (0) OFDM.  HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK 0x200
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_MCS_GF_POS 10
+#define RATE_MCS_GF_MSK 0x400
+
+/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
+#define RATE_MCS_HT40_POS 11
+#define RATE_MCS_HT40_MSK 0x800
+
+/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
+#define RATE_MCS_DUP_POS 12
+#define RATE_MCS_DUP_MSK 0x1000
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK 0x2000
+
+/**
+ * rate_n_flags Tx antenna masks
+ * 4965 has 2 transmitters
+ * bit14:16
+ */
+#define RATE_MCS_ANT_POS       14
+#define RATE_MCS_ANT_A_MSK     0x04000
+#define RATE_MCS_ANT_B_MSK     0x08000
+#define RATE_MCS_ANT_C_MSK     0x10000
+#define RATE_MCS_ANT_AB_MSK    (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK   (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
+#define RATE_ANT_NUM 3
+
+#define POWER_TABLE_NUM_ENTRIES                        33
+#define POWER_TABLE_NUM_HT_OFDM_ENTRIES                32
+#define POWER_TABLE_CCK_ENTRY                  32
+
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES            24
+#define IWL_PWR_CCK_ENTRIES                    2
+
+/**
+ * union iwl4965_tx_power_dual_stream
+ *
+ * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Use __le32 version (struct tx_power_dual_stream) when building command.
+ *
+ * Driver provides radio gain and DSP attenuation settings to device in pairs,
+ * one value for each transmitter chain.  The first value is for transmitter A,
+ * second for transmitter B.
+ *
+ * For SISO bit rates, both values in a pair should be identical.
+ * For MIMO rates, one value may be different from the other,
+ * in order to balance the Tx output between the two transmitters.
+ *
+ * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ */
+union iwl4965_tx_power_dual_stream {
+       struct {
+               u8 radio_tx_gain[2];
+               u8 dsp_predis_atten[2];
+       } s;
+       u32 dw;
+};
+
+/**
+ * struct tx_power_dual_stream
+ *
+ * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Same format as iwl_tx_power_dual_stream, but __le32
+ */
+struct tx_power_dual_stream {
+       __le32 dw;
+} __packed;
+
+/**
+ * struct iwl4965_tx_power_db
+ *
+ * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl4965_tx_power_db {
+       struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+} __packed;
+
+/******************************************************************************
+ * (0a)
+ * Alive and Error Commands & Responses:
+ *
+ *****************************************************************************/
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+#define INITIALIZE_SUBTYPE    (9)
+
+/*
+ * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "initialize alive" notification once the initialization
+ * uCode image has completed its work, and is ready to load the runtime image.
+ * This is the *first* "alive" notification that the driver will receive after
+ * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * For 4965, this notification contains important calibration data for
+ * calculating txpower settings:
+ *
+ * 1)  Power supply voltage indication.  The voltage sensor outputs higher
+ *     values for lower voltage, and vice verse.
+ *
+ * 2)  Temperature measurement parameters, for each of two channel widths
+ *     (20 MHz and 40 MHz) supported by the radios.  Temperature sensing
+ *     is done via one of the receiver chains, and channel width influences
+ *     the results.
+ *
+ * 3)  Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
+ *     for each of 5 frequency ranges.
+ */
+struct iwl_init_alive_resp {
+       u8 ucode_minor;
+       u8 ucode_major;
+       __le16 reserved1;
+       u8 sw_rev[8];
+       u8 ver_type;
+       u8 ver_subtype;         /* "9" for initialize alive */
+       __le16 reserved2;
+       __le32 log_event_table_ptr;
+       __le32 error_event_table_ptr;
+       __le32 timestamp;
+       __le32 is_valid;
+
+       /* calibration values from "initialize" uCode */
+       __le32 voltage;         /* signed, higher value is lower voltage */
+       __le32 therm_r1[2];     /* signed, 1st for normal, 2nd for HT40 */
+       __le32 therm_r2[2];     /* signed */
+       __le32 therm_r3[2];     /* signed */
+       __le32 therm_r4[2];     /* signed */
+       __le32 tx_atten[5][2];  /* signed MIMO gain comp, 5 freq groups,
+                                * 2 Tx chains */
+} __packed;
+
+
+/**
+ * REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "alive" notification once the runtime image is ready
+ * to receive commands from the driver.  This is the *second* "alive"
+ * notification that the driver will receive after rebooting uCode;
+ * this "alive" is indicated by subtype field != 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * This response includes two pointers to structures within the device's
+ * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
+ *
+ * 1)  log_event_table_ptr indicates base of the event log.  This traces
+ *     a 256-entry history of uCode execution within a circular buffer.
+ *     Its header format is:
+ *
+ *     __le32 log_size;     log capacity (in number of entries)
+ *     __le32 type;         (1) timestamp with each entry, (0) no timestamp
+ *     __le32 wraps;        # times uCode has wrapped to top of circular buffer
+ *      __le32 write_index;  next circular buffer entry that uCode would fill
+ *
+ *     The header is followed by the circular buffer of log entries.  Entries
+ *     with timestamps have the following format:
+ *
+ *     __le32 event_id;     range 0 - 1500
+ *     __le32 timestamp;    low 32 bits of TSF (of network, if associated)
+ *     __le32 data;         event_id-specific data value
+ *
+ *     Entries without timestamps contain only event_id and data.
+ *
+ *
+ * 2)  error_event_table_ptr indicates base of the error log.  This contains
+ *     information about any uCode error that occurs.  For 4965, the format
+ *     of the error log is:
+ *
+ *     __le32 valid;        (nonzero) valid, (0) log is empty
+ *     __le32 error_id;     type of error
+ *     __le32 pc;           program counter
+ *     __le32 blink1;       branch link
+ *     __le32 blink2;       branch link
+ *     __le32 ilink1;       interrupt link
+ *     __le32 ilink2;       interrupt link
+ *     __le32 data1;        error-specific data
+ *     __le32 data2;        error-specific data
+ *     __le32 line;         source code line of error
+ *     __le32 bcon_time;    beacon timer
+ *     __le32 tsf_low;      network timestamp function timer
+ *     __le32 tsf_hi;       network timestamp function timer
+ *     __le32 gp1;          GP1 timer register
+ *     __le32 gp2;          GP2 timer register
+ *     __le32 gp3;          GP3 timer register
+ *     __le32 ucode_ver;    uCode version
+ *     __le32 hw_ver;       HW Silicon version
+ *     __le32 brd_ver;      HW board version
+ *     __le32 log_pc;       log program counter
+ *     __le32 frame_ptr;    frame pointer
+ *     __le32 stack_ptr;    stack pointer
+ *     __le32 hcmd;         last host command
+ *     __le32 isr0;         isr status register LMPM_NIC_ISR0: rxtx_flag
+ *     __le32 isr1;         isr status register LMPM_NIC_ISR1: host_flag
+ *     __le32 isr2;         isr status register LMPM_NIC_ISR2: enc_flag
+ *     __le32 isr3;         isr status register LMPM_NIC_ISR3: time_flag
+ *     __le32 isr4;         isr status register LMPM_NIC_ISR4: wico interrupt
+ *     __le32 isr_pref;     isr status register LMPM_NIC_PREF_STAT
+ *     __le32 wait_event;   wait event() caller address
+ *     __le32 l2p_control;  L2pControlField
+ *     __le32 l2p_duration; L2pDurationField
+ *     __le32 l2p_mhvalid;  L2pMhValidBits
+ *     __le32 l2p_addr_match; L2pAddrMatchStat
+ *     __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ *     __le32 u_timestamp;  indicate when the date and time of the compilation
+ *     __le32 reserved;
+ *
+ * The Linux driver can print both logs to the system log when a uCode error
+ * occurs.
+ */
+struct iwl_alive_resp {
+       u8 ucode_minor;
+       u8 ucode_major;
+       __le16 reserved1;
+       u8 sw_rev[8];
+       u8 ver_type;
+       u8 ver_subtype;                 /* not "9" for runtime alive */
+       __le16 reserved2;
+       __le32 log_event_table_ptr;     /* SRAM address for event log */
+       __le32 error_event_table_ptr;   /* SRAM address for error log */
+       __le32 timestamp;
+       __le32 is_valid;
+} __packed;
+
+/*
+ * REPLY_ERROR = 0x2 (response only, not a command)
+ */
+struct iwl_error_resp {
+       __le32 error_type;
+       u8 cmd_id;
+       u8 reserved1;
+       __le16 bad_cmd_seq_num;
+       __le32 error_info;
+       __le64 timestamp;
+} __packed;
+
+/******************************************************************************
+ * (1)
+ * RXON Commands & Responses:
+ *
+ *****************************************************************************/
+
+/*
+ * Rx config defines & structure
+ */
+/* rx_config device types  */
+enum {
+       RXON_DEV_TYPE_AP = 1,
+       RXON_DEV_TYPE_ESS = 3,
+       RXON_DEV_TYPE_IBSS = 4,
+       RXON_DEV_TYPE_SNIFFER = 6,
+};
+
+
+#define RXON_RX_CHAIN_DRIVER_FORCE_MSK         cpu_to_le16(0x1 << 0)
+#define RXON_RX_CHAIN_DRIVER_FORCE_POS         (0)
+#define RXON_RX_CHAIN_VALID_MSK                        cpu_to_le16(0x7 << 1)
+#define RXON_RX_CHAIN_VALID_POS                        (1)
+#define RXON_RX_CHAIN_FORCE_SEL_MSK            cpu_to_le16(0x7 << 4)
+#define RXON_RX_CHAIN_FORCE_SEL_POS            (4)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK       cpu_to_le16(0x7 << 7)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS       (7)
+#define RXON_RX_CHAIN_CNT_MSK                  cpu_to_le16(0x3 << 10)
+#define RXON_RX_CHAIN_CNT_POS                  (10)
+#define RXON_RX_CHAIN_MIMO_CNT_MSK             cpu_to_le16(0x3 << 12)
+#define RXON_RX_CHAIN_MIMO_CNT_POS             (12)
+#define RXON_RX_CHAIN_MIMO_FORCE_MSK           cpu_to_le16(0x1 << 14)
+#define RXON_RX_CHAIN_MIMO_FORCE_POS           (14)
+
+/* rx_config flags */
+/* band & modulation selection */
+#define RXON_FLG_BAND_24G_MSK           cpu_to_le32(1 << 0)
+#define RXON_FLG_CCK_MSK                cpu_to_le32(1 << 1)
+/* auto detection enable */
+#define RXON_FLG_AUTO_DETECT_MSK        cpu_to_le32(1 << 2)
+/* TGg protection when tx */
+#define RXON_FLG_TGG_PROTECT_MSK        cpu_to_le32(1 << 3)
+/* cck short slot & preamble */
+#define RXON_FLG_SHORT_SLOT_MSK          cpu_to_le32(1 << 4)
+#define RXON_FLG_SHORT_PREAMBLE_MSK     cpu_to_le32(1 << 5)
+/* antenna selection */
+#define RXON_FLG_DIS_DIV_MSK            cpu_to_le32(1 << 7)
+#define RXON_FLG_ANT_SEL_MSK            cpu_to_le32(0x0f00)
+#define RXON_FLG_ANT_A_MSK              cpu_to_le32(1 << 8)
+#define RXON_FLG_ANT_B_MSK              cpu_to_le32(1 << 9)
+/* radar detection enable */
+#define RXON_FLG_RADAR_DETECT_MSK       cpu_to_le32(1 << 12)
+#define RXON_FLG_TGJ_NARROW_BAND_MSK    cpu_to_le32(1 << 13)
+/* rx response to host with 8-byte TSF
+* (according to ON_AIR deassertion) */
+#define RXON_FLG_TSF2HOST_MSK           cpu_to_le32(1 << 15)
+
+
+/* HT flags */
+#define RXON_FLG_CTRL_CHANNEL_LOC_POS          (22)
+#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK       cpu_to_le32(0x1 << 22)
+
+#define RXON_FLG_HT_OPERATING_MODE_POS         (23)
+
+#define RXON_FLG_HT_PROT_MSK                   cpu_to_le32(0x1 << 23)
+#define RXON_FLG_HT40_PROT_MSK                 cpu_to_le32(0x2 << 23)
+
+#define RXON_FLG_CHANNEL_MODE_POS              (25)
+#define RXON_FLG_CHANNEL_MODE_MSK              cpu_to_le32(0x3 << 25)
+
+/* channel mode */
+enum {
+       CHANNEL_MODE_LEGACY = 0,
+       CHANNEL_MODE_PURE_40 = 1,
+       CHANNEL_MODE_MIXED = 2,
+       CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY                   \
+       cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40                  \
+       cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED                    \
+       cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
+/* CTS to self (if spec allows) flag */
+#define RXON_FLG_SELF_CTS_EN                   cpu_to_le32(0x1<<30)
+
+/* rx_config filter flags */
+/* accept all data frames */
+#define RXON_FILTER_PROMISC_MSK         cpu_to_le32(1 << 0)
+/* pass control & management to host */
+#define RXON_FILTER_CTL2HOST_MSK        cpu_to_le32(1 << 1)
+/* accept multi-cast */
+#define RXON_FILTER_ACCEPT_GRP_MSK      cpu_to_le32(1 << 2)
+/* don't decrypt uni-cast frames */
+#define RXON_FILTER_DIS_DECRYPT_MSK     cpu_to_le32(1 << 3)
+/* don't decrypt multi-cast frames */
+#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
+/* STA is associated */
+#define RXON_FILTER_ASSOC_MSK           cpu_to_le32(1 << 5)
+/* transfer to host non bssid beacons in associated state */
+#define RXON_FILTER_BCON_AWARE_MSK      cpu_to_le32(1 << 6)
+
+/**
+ * REPLY_RXON = 0x10 (command, has simple generic response)
+ *
+ * RXON tunes the radio tuner to a service channel, and sets up a number
+ * of parameters that are used primarily for Rx, but also for Tx operations.
+ *
+ * NOTE:  When tuning to a new channel, driver must set the
+ *        RXON_FILTER_ASSOC_MSK to 0.  This will clear station-dependent
+ *        info within the device, including the station tables, tx retry
+ *        rate tables, and txpower tables.  Driver must build a new station
+ *        table and txpower table before transmitting anything on the RXON
+ *        channel.
+ *
+ * NOTE:  All RXONs wipe clean the internal txpower table.  Driver must
+ *        issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ *        regardless of whether RXON_FILTER_ASSOC_MSK is set.
+ */
+
+struct iwl3945_rxon_cmd {
+       u8 node_addr[6];
+       __le16 reserved1;
+       u8 bssid_addr[6];
+       __le16 reserved2;
+       u8 wlap_bssid_addr[6];
+       __le16 reserved3;
+       u8 dev_type;
+       u8 air_propagation;
+       __le16 reserved4;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 assoc_id;
+       __le32 flags;
+       __le32 filter_flags;
+       __le16 channel;
+       __le16 reserved5;
+} __packed;
+
+struct iwl4965_rxon_cmd {
+       u8 node_addr[6];
+       __le16 reserved1;
+       u8 bssid_addr[6];
+       __le16 reserved2;
+       u8 wlap_bssid_addr[6];
+       __le16 reserved3;
+       u8 dev_type;
+       u8 air_propagation;
+       __le16 rx_chain;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 assoc_id;
+       __le32 flags;
+       __le32 filter_flags;
+       __le16 channel;
+       u8 ofdm_ht_single_stream_basic_rates;
+       u8 ofdm_ht_dual_stream_basic_rates;
+} __packed;
+
+/* Create a common rxon cmd which will be typecast into the 3945 or 4965
+ * specific rxon cmd, depending on where it is called from.
+ */
+struct iwl_legacy_rxon_cmd {
+       u8 node_addr[6];
+       __le16 reserved1;
+       u8 bssid_addr[6];
+       __le16 reserved2;
+       u8 wlap_bssid_addr[6];
+       __le16 reserved3;
+       u8 dev_type;
+       u8 air_propagation;
+       __le16 rx_chain;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 assoc_id;
+       __le32 flags;
+       __le32 filter_flags;
+       __le16 channel;
+       u8 ofdm_ht_single_stream_basic_rates;
+       u8 ofdm_ht_dual_stream_basic_rates;
+       u8 reserved4;
+       u8 reserved5;
+} __packed;
+
+
+/*
+ * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ */
+struct iwl3945_rxon_assoc_cmd {
+       __le32 flags;
+       __le32 filter_flags;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       __le16 reserved;
+} __packed;
+
+struct iwl4965_rxon_assoc_cmd {
+       __le32 flags;
+       __le32 filter_flags;
+       u8 ofdm_basic_rates;
+       u8 cck_basic_rates;
+       u8 ofdm_ht_single_stream_basic_rates;
+       u8 ofdm_ht_dual_stream_basic_rates;
+       __le16 rx_chain_select_flags;
+       __le16 reserved;
+} __packed;
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL   10
+#define IWL_MAX_UCODE_BEACON_INTERVAL  4 /* 4096 */
+#define IWL39_MAX_UCODE_BEACON_INTERVAL        1 /* 1024 */
+
+/*
+ * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ */
+struct iwl_rxon_time_cmd {
+       __le64 timestamp;
+       __le16 beacon_interval;
+       __le16 atim_window;
+       __le32 beacon_init_val;
+       __le16 listen_interval;
+       u8 dtim_period;
+       u8 delta_cp_bss_tbtts;
+} __packed;
+
+/*
+ * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ */
+struct iwl3945_channel_switch_cmd {
+       u8 band;
+       u8 expect_beacon;
+       __le16 channel;
+       __le32 rxon_flags;
+       __le32 rxon_filter_flags;
+       __le32 switch_time;
+       struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_channel_switch_cmd {
+       u8 band;
+       u8 expect_beacon;
+       __le16 channel;
+       __le32 rxon_flags;
+       __le32 rxon_filter_flags;
+       __le32 switch_time;
+       struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+/*
+ * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ */
+struct iwl_csa_notification {
+       __le16 band;
+       __le16 channel;
+       __le32 status;          /* 0 - OK, 1 - fail */
+} __packed;
+
+/******************************************************************************
+ * (2)
+ * Quality-of-Service (QOS) Commands & Responses:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ *
+ * @cw_min: Contention window, start value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x3f.
+ * @aifsn:  Number of slots in Arbitration Interframe Space (before
+ *          performing random backoff timing prior to Tx).  Device default 1.
+ * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+       __le16 cw_min;
+       __le16 cw_max;
+       u8 aifsn;
+       u8 reserved1;
+       __le16 edca_txop;
+} __packed;
+
+/* QoS flags defines */
+#define QOS_PARAM_FLG_UPDATE_EDCA_MSK  cpu_to_le32(0x01)
+#define QOS_PARAM_FLG_TGN_MSK          cpu_to_le32(0x02)
+#define QOS_PARAM_FLG_TXOP_TYPE_MSK    cpu_to_le32(0x10)
+
+/* Number of Access Categories (AC) (EDCA), queues 0..3 */
+#define AC_NUM                4
+
+/*
+ * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ *
+ * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
+ * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
+ */
+struct iwl_qosparam_cmd {
+       __le32 qos_flags;
+       struct iwl_ac_qos ac[AC_NUM];
+} __packed;
+
+/******************************************************************************
+ * (3)
+ * Add/Modify Stations Commands & Responses:
+ *
+ *****************************************************************************/
+/*
+ * Multi station support
+ */
+
+/* Special, dedicated locations within device's station table */
+#define        IWL_AP_ID               0
+#define        IWL_STA_ID              2
+#define        IWL3945_BROADCAST_ID    24
+#define IWL3945_STATION_COUNT  25
+#define IWL4965_BROADCAST_ID   31
+#define        IWL4965_STATION_COUNT   32
+
+#define        IWL_STATION_COUNT       32      /* MAX(3945,4965)*/
+#define        IWL_INVALID_STATION     255
+
+#define STA_FLG_TX_RATE_MSK            cpu_to_le32(1 << 2)
+#define STA_FLG_PWR_SAVE_MSK           cpu_to_le32(1 << 8)
+#define STA_FLG_RTS_MIMO_PROT_MSK      cpu_to_le32(1 << 17)
+#define STA_FLG_AGG_MPDU_8US_MSK       cpu_to_le32(1 << 18)
+#define STA_FLG_MAX_AGG_SIZE_POS       (19)
+#define STA_FLG_MAX_AGG_SIZE_MSK       cpu_to_le32(3 << 19)
+#define STA_FLG_HT40_EN_MSK            cpu_to_le32(1 << 21)
+#define STA_FLG_MIMO_DIS_MSK           cpu_to_le32(1 << 22)
+#define STA_FLG_AGG_MPDU_DENSITY_POS   (23)
+#define STA_FLG_AGG_MPDU_DENSITY_MSK   cpu_to_le32(7 << 23)
+
+/* Use in mode field.  1: modify existing entry, 0: add new station entry */
+#define STA_CONTROL_MODIFY_MSK         0x01
+
+/* key flags __le16*/
+#define STA_KEY_FLG_ENCRYPT_MSK        cpu_to_le16(0x0007)
+#define STA_KEY_FLG_NO_ENC     cpu_to_le16(0x0000)
+#define STA_KEY_FLG_WEP                cpu_to_le16(0x0001)
+#define STA_KEY_FLG_CCMP       cpu_to_le16(0x0002)
+#define STA_KEY_FLG_TKIP       cpu_to_le16(0x0003)
+
+#define STA_KEY_FLG_KEYID_POS  8
+#define STA_KEY_FLG_INVALID    cpu_to_le16(0x0800)
+/* wep key is either from global key (0) or from station info array (1) */
+#define STA_KEY_FLG_MAP_KEY_MSK        cpu_to_le16(0x0008)
+
+/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
+#define STA_KEY_FLG_KEY_SIZE_MSK       cpu_to_le16(0x1000)
+#define STA_KEY_MULTICAST_MSK          cpu_to_le16(0x4000)
+#define STA_KEY_MAX_NUM                8
+
+/* Flags indicate whether to modify vs. don't change various station params */
+#define        STA_MODIFY_KEY_MASK             0x01
+#define        STA_MODIFY_TID_DISABLE_TX       0x02
+#define        STA_MODIFY_TX_RATE_MSK          0x04
+#define STA_MODIFY_ADDBA_TID_MSK       0x08
+#define STA_MODIFY_DELBA_TID_MSK       0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK  0x20
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid)      (((sta_id) << 4) + (tid))
+
+struct iwl4965_keyinfo {
+       __le16 key_flags;
+       u8 tkip_rx_tsc_byte2;   /* TSC[2] for key mix ph1 detection */
+       u8 reserved1;
+       __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
+       u8 key_offset;
+       u8 reserved2;
+       u8 key[16];             /* 16-byte unicast decryption key */
+} __packed;
+
+/**
+ * struct sta_id_modify
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
+ *
+ * Driver selects unused table index when adding new station,
+ * or the index to a pre-existing station entry when modifying that station.
+ * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ *
+ * modify_mask flags select which parameters to modify vs. leave alone.
+ */
+struct sta_id_modify {
+       u8 addr[ETH_ALEN];
+       __le16 reserved1;
+       u8 sta_id;
+       u8 modify_mask;
+       __le16 reserved2;
+} __packed;
+
+/*
+ * REPLY_ADD_STA = 0x18 (command)
+ *
+ * The device contains an internal table of per-station information,
+ * with info on security keys, aggregation parameters, and Tx rates for
+ * initial Tx attempt and any retries (4965 devices uses
+ * REPLY_TX_LINK_QUALITY_CMD,
+ * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ *
+ * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * a new entry, or modifying a pre-existing one.
+ *
+ * NOTE:  RXON command (without "associated" bit set) wipes the station table
+ *        clean.  Moving into RF_KILL state does this also.  Driver must set up
+ *        new station table before transmitting anything on the RXON channel
+ *        (except active scans or active measurements; those commands carry
+ *        their own txpower/rate setup data).
+ *
+ *        When getting started on a new channel, driver must set up the
+ *        IWL_BROADCAST_ID entry (last entry in the table).  For a client
+ *        station in a BSS, once an AP is selected, driver sets up the AP STA
+ *        in the IWL_AP_ID entry (1st entry in the table).  BROADCAST and AP
+ *        are all that are needed for a BSS client station.  If the device is
+ *        used as AP, or in an IBSS network, driver must set up station table
+ *        entries for all STAs in network, starting with index IWL_STA_ID.
+ */
+
+struct iwl3945_addsta_cmd {
+       u8 mode;                /* 1: modify existing, 0: add new station */
+       u8 reserved[3];
+       struct sta_id_modify sta;
+       struct iwl4965_keyinfo key;
+       __le32 station_flags;           /* STA_FLG_* */
+       __le32 station_flags_msk;       /* STA_FLG_* */
+
+       /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+        * corresponding to bit (e.g. bit 5 controls TID 5).
+        * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+       __le16 tid_disable_tx;
+
+       __le16 rate_n_flags;
+
+       /* TID for which to add block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       u8 add_immediate_ba_tid;
+
+       /* TID for which to remove block-ack support.
+        * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+       u8 remove_immediate_ba_tid;
+
+       /* Starting Sequence Number for added block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       __le16 add_immediate_ba_ssn;
+} __packed;
+
+struct iwl4965_addsta_cmd {
+       u8 mode;                /* 1: modify existing, 0: add new station */
+       u8 reserved[3];
+       struct sta_id_modify sta;
+       struct iwl4965_keyinfo key;
+       __le32 station_flags;           /* STA_FLG_* */
+       __le32 station_flags_msk;       /* STA_FLG_* */
+
+       /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+        * corresponding to bit (e.g. bit 5 controls TID 5).
+        * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+       __le16 tid_disable_tx;
+
+       __le16  reserved1;
+
+       /* TID for which to add block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       u8 add_immediate_ba_tid;
+
+       /* TID for which to remove block-ack support.
+        * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+       u8 remove_immediate_ba_tid;
+
+       /* Starting Sequence Number for added block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       __le16 add_immediate_ba_ssn;
+
+       /*
+        * Number of packets OK to transmit to station even though
+        * it is asleep -- used to synchronise PS-poll and u-APSD
+        * responses while ucode keeps track of STA sleep state.
+        */
+       __le16 sleep_tx_count;
+
+       __le16 reserved2;
+} __packed;
+
+/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
+struct iwl_legacy_addsta_cmd {
+       u8 mode;                /* 1: modify existing, 0: add new station */
+       u8 reserved[3];
+       struct sta_id_modify sta;
+       struct iwl4965_keyinfo key;
+       __le32 station_flags;           /* STA_FLG_* */
+       __le32 station_flags_msk;       /* STA_FLG_* */
+
+       /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+        * corresponding to bit (e.g. bit 5 controls TID 5).
+        * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+       __le16 tid_disable_tx;
+
+       __le16  rate_n_flags;           /* 3945 only */
+
+       /* TID for which to add block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       u8 add_immediate_ba_tid;
+
+       /* TID for which to remove block-ack support.
+        * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+       u8 remove_immediate_ba_tid;
+
+       /* Starting Sequence Number for added block-ack support.
+        * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+       __le16 add_immediate_ba_ssn;
+
+       /*
+        * Number of packets OK to transmit to station even though
+        * it is asleep -- used to synchronise PS-poll and u-APSD
+        * responses while ucode keeps track of STA sleep state.
+        */
+       __le16 sleep_tx_count;
+
+       __le16 reserved2;
+} __packed;
+
+
+#define ADD_STA_SUCCESS_MSK            0x1
+#define ADD_STA_NO_ROOM_IN_TABLE       0x2
+#define ADD_STA_NO_BLOCK_ACK_RESOURCE  0x4
+#define ADD_STA_MODIFY_NON_EXIST_STA   0x8
+/*
+ * REPLY_ADD_STA = 0x18 (response)
+ */
+struct iwl_add_sta_resp {
+       u8 status;      /* ADD_STA_* */
+} __packed;
+
+#define REM_STA_SUCCESS_MSK              0x1
+/*
+ *  REPLY_REM_STA = 0x19 (response)
+ */
+struct iwl_rem_sta_resp {
+       u8 status;
+} __packed;
+
+/*
+ *  REPLY_REM_STA = 0x19 (command)
+ */
+struct iwl_rem_sta_cmd {
+       u8 num_sta;     /* number of removed stations */
+       u8 reserved[3];
+       u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+       u8 reserved2[2];
+} __packed;
+
+#define IWL_TX_FIFO_BK_MSK             cpu_to_le32(BIT(0))
+#define IWL_TX_FIFO_BE_MSK             cpu_to_le32(BIT(1))
+#define IWL_TX_FIFO_VI_MSK             cpu_to_le32(BIT(2))
+#define IWL_TX_FIFO_VO_MSK             cpu_to_le32(BIT(3))
+#define IWL_AGG_TX_QUEUE_MSK           cpu_to_le32(0xffc00)
+
+#define IWL_DROP_SINGLE                0
+#define IWL_DROP_SELECTED      1
+#define IWL_DROP_ALL           2
+
+/*
+ * REPLY_WEP_KEY = 0x20
+ */
+struct iwl_wep_key {
+       u8 key_index;
+       u8 key_offset;
+       u8 reserved1[2];
+       u8 key_size;
+       u8 reserved2[3];
+       u8 key[16];
+} __packed;
+
+struct iwl_wep_cmd {
+       u8 num_keys;
+       u8 global_key_type;
+       u8 flags;
+       u8 reserved;
+       struct iwl_wep_key key[0];
+} __packed;
+
+#define WEP_KEY_WEP_TYPE 1
+#define WEP_KEYS_MAX 4
+#define WEP_INVALID_OFFSET 0xff
+#define WEP_KEY_LEN_64 5
+#define WEP_KEY_LEN_128 13
+
+/******************************************************************************
+ * (4)
+ * Rx Responses:
+ *
+ *****************************************************************************/
+
+#define RX_RES_STATUS_NO_CRC32_ERROR   cpu_to_le32(1 << 0)
+#define RX_RES_STATUS_NO_RXE_OVERFLOW  cpu_to_le32(1 << 1)
+
+#define RX_RES_PHY_FLAGS_BAND_24_MSK   cpu_to_le16(1 << 0)
+#define RX_RES_PHY_FLAGS_MOD_CCK_MSK           cpu_to_le16(1 << 1)
+#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK    cpu_to_le16(1 << 2)
+#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK       cpu_to_le16(1 << 3)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_POS           4
+
+#define RX_RES_STATUS_SEC_TYPE_MSK     (0x7 << 8)
+#define RX_RES_STATUS_SEC_TYPE_NONE    (0x0 << 8)
+#define RX_RES_STATUS_SEC_TYPE_WEP     (0x1 << 8)
+#define RX_RES_STATUS_SEC_TYPE_CCMP    (0x2 << 8)
+#define RX_RES_STATUS_SEC_TYPE_TKIP    (0x3 << 8)
+#define        RX_RES_STATUS_SEC_TYPE_ERR      (0x7 << 8)
+
+#define RX_RES_STATUS_STATION_FOUND    (1<<6)
+#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
+
+#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
+#define RX_RES_STATUS_NOT_DECRYPT      (0x0 << 11)
+#define RX_RES_STATUS_DECRYPT_OK       (0x3 << 11)
+#define RX_RES_STATUS_BAD_ICV_MIC      (0x1 << 11)
+#define RX_RES_STATUS_BAD_KEY_TTAK     (0x2 << 11)
+
+#define RX_MPDU_RES_STATUS_ICV_OK      (0x20)
+#define RX_MPDU_RES_STATUS_MIC_OK      (0x40)
+#define RX_MPDU_RES_STATUS_TTAK_OK     (1 << 7)
+#define RX_MPDU_RES_STATUS_DEC_DONE_MSK        (0x800)
+
+
+struct iwl3945_rx_frame_stats {
+       u8 phy_count;
+       u8 id;
+       u8 rssi;
+       u8 agc;
+       __le16 sig_avg;
+       __le16 noise_diff;
+       u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_hdr {
+       __le16 channel;
+       __le16 phy_flags;
+       u8 reserved1;
+       u8 rate;
+       __le16 len;
+       u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_end {
+       __le32 status;
+       __le64 timestamp;
+       __le32 beacon_timestamp;
+} __packed;
+
+/*
+ * REPLY_3945_RX = 0x1b (response only, not a command)
+ *
+ * NOTE:  DO NOT dereference from casts to this structure
+ * It is provided only for calculating minimum data set size.
+ * The actual offsets of the hdr and end are dynamic based on
+ * stats.phy_count
+ */
+struct iwl3945_rx_frame {
+       struct iwl3945_rx_frame_stats stats;
+       struct iwl3945_rx_frame_hdr hdr;
+       struct iwl3945_rx_frame_end end;
+} __packed;
+
+#define IWL39_RX_FRAME_SIZE    (4 + sizeof(struct iwl3945_rx_frame))
+
+/* Fixed (non-configurable) rx data from phy */
+
+#define IWL49_RX_RES_PHY_CNT 14
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET     (4)
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK       (0x70)
+#define IWL49_AGC_DB_MASK                      (0x3f80)        /* MASK(7,13) */
+#define IWL49_AGC_DB_POS                       (7)
+struct iwl4965_rx_non_cfg_phy {
+       __le16 ant_selection;   /* ant A bit 4, ant B bit 5, ant C bit 6 */
+       __le16 agc_info;        /* agc code 0:6, agc dB 7:13, reserved 14:15 */
+       u8 rssi_info[6];        /* we use even entries, 0/2/4 for A/B/C rssi */
+       u8 pad[0];
+} __packed;
+
+
+/*
+ * REPLY_RX = 0xc3 (response only, not a command)
+ * Used only for legacy (non 11n) frames.
+ */
+struct iwl_rx_phy_res {
+       u8 non_cfg_phy_cnt;     /* non configurable DSP phy data byte count */
+       u8 cfg_phy_cnt;         /* configurable DSP phy data byte count */
+       u8 stat_id;             /* configurable DSP phy data set ID */
+       u8 reserved1;
+       __le64 timestamp;       /* TSF at on air rise */
+       __le32 beacon_time_stamp; /* beacon at on-air rise */
+       __le16 phy_flags;       /* general phy flags: band, modulation, ... */
+       __le16 channel;         /* channel number */
+       u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+       __le32 rate_n_flags;    /* RATE_MCS_* */
+       __le16 byte_count;      /* frame's byte-count */
+       __le16 frame_time;      /* frame's time on the air */
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+       __le16 byte_count;
+       __le16 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (5)
+ * Tx Commands & Responses:
+ *
+ * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * queues in host DRAM, shared between driver and device (see comments for
+ * SCD registers and Tx/Rx Queues).  When the device's Tx scheduler and uCode
+ * are preparing to transmit, the device pulls the Tx command over the PCI
+ * bus via one of the device's Tx DMA channels, to fill an internal FIFO
+ * from which data will be transmitted.
+ *
+ * uCode handles all timing and protocol related to control frames
+ * (RTS/CTS/ACK), based on flags in the Tx command.  uCode and Tx scheduler
+ * handle reception of block-acks; uCode updates the host driver via
+ * REPLY_COMPRESSED_BA.
+ *
+ * uCode handles retrying Tx when an ACK is expected but not received.
+ * This includes trying lower data rates than the one requested in the Tx
+ * command, as set up by the REPLY_RATE_SCALE (for 3945) or
+ * REPLY_TX_LINK_QUALITY_CMD (4965).
+ *
+ * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * This command must be executed after every RXON command, before Tx can occur.
+ *****************************************************************************/
+
+/* REPLY_TX Tx flags field */
+
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ */
+#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
+
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
+ * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ */
+#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
+
+/* 1: Expect ACK from receiving station
+ * 0: Don't expect ACK (MAC header's duration field s/b 0)
+ * Set this for unicast frames, but not broadcast/multicast. */
+#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
+
+/* For 4965 devices:
+ * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
+ *    Tx command's initial_rate_index indicates first rate to try;
+ *    uCode walks through table for additional Tx attempts.
+ * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
+ *    This rate will be used for all Tx attempts; it will not be scaled. */
+#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
+
+/* 1: Expect immediate block-ack.
+ * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
+#define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
+
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ */
+#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
+
+/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
+ * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
+#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+
+/* 1: uCode overrides sequence control field in MAC header.
+ * 0: Driver provides sequence control field in MAC header.
+ * Set this for management frames, non-QOS data frames, non-unicast frames,
+ * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
+
+/* 1: This frame is non-last MPDU; more fragments are coming.
+ * 0: Last fragment, or not using fragmentation. */
+#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
+
+/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
+ * 0: No TSF required in outgoing frame.
+ * Set this for transmitting beacons and probe responses. */
+#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
+
+/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
+ *    alignment of frame's payload data field.
+ * 0: No pad
+ * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
+ * field (but not both).  Driver must align frame data (i.e. data following
+ * MAC header) to DWORD boundary. */
+#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
+
+/* accelerate aggregation support
+ * 0 - no CCMP encryption; 1 - CCMP encryption */
+#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
+
+/* HCCA-AP - disable duration overwriting. */
+#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
+
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP         0x01
+#define TX_CMD_SEC_CCM         0x02
+#define TX_CMD_SEC_TKIP                0x03
+#define TX_CMD_SEC_MSK         0x03
+#define TX_CMD_SEC_SHIFT       6
+#define TX_CMD_SEC_KEY128      0x08
+
+/*
+ * security overhead sizes
+ */
+#define WEP_IV_LEN 4
+#define WEP_ICV_LEN 4
+#define CCMP_MIC_LEN 8
+#define TKIP_ICV_LEN 4
+
+/*
+ * REPLY_TX = 0x1c (command)
+ */
+
+struct iwl3945_tx_cmd {
+       /*
+        * MPDU byte count:
+        * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+        * + 8 byte IV for CCM or TKIP (not used for WEP)
+        * + Data payload
+        * + 8-byte MIC (not used for CCM/WEP)
+        * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+        *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+        * Range: 14-2342 bytes.
+        */
+       __le16 len;
+
+       /*
+        * MPDU or MSDU byte count for next frame.
+        * Used for fragmentation and bursting, but not 11n aggregation.
+        * Same as "len", but for next frame.  Set to 0 if not applicable.
+        */
+       __le16 next_frame_len;
+
+       __le32 tx_flags;        /* TX_CMD_FLG_* */
+
+       u8 rate;
+
+       /* Index of recipient station in uCode's station table */
+       u8 sta_id;
+       u8 tid_tspec;
+       u8 sec_ctl;
+       u8 key[16];
+       union {
+               u8 byte[8];
+               __le16 word[4];
+               __le32 dw[2];
+       } tkip_mic;
+       __le32 next_frame_info;
+       union {
+               __le32 life_time;
+               __le32 attempt;
+       } stop_time;
+       u8 supp_rates[2];
+       u8 rts_retry_limit;     /*byte 50 */
+       u8 data_retry_limit;    /*byte 51 */
+       union {
+               __le16 pm_frame_timeout;
+               __le16 attempt_duration;
+       } timeout;
+
+       /*
+        * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+        * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+        */
+       __le16 driver_txop;
+
+       /*
+        * MAC header goes here, followed by 2 bytes padding if MAC header
+        * length is 26 or 30 bytes, followed by payload data
+        */
+       u8 payload[0];
+       struct ieee80211_hdr hdr[0];
+} __packed;
+
+/*
+ * REPLY_TX = 0x1c (response)
+ */
+struct iwl3945_tx_resp {
+       u8 failure_rts;
+       u8 failure_frame;
+       u8 bt_kill_count;
+       u8 rate;
+       __le32 wireless_media_time;
+       __le32 status;          /* TX status */
+} __packed;
+
+
+/*
+ * 4965 uCode updates these Tx attempt count values in host DRAM.
+ * Used for managing Tx retries when expecting block-acks.
+ * Driver should set these fields to 0.
+ */
+struct iwl_dram_scratch {
+       u8 try_cnt;             /* Tx attempts */
+       u8 bt_kill_cnt;         /* Tx attempts blocked by Bluetooth device */
+       __le16 reserved;
+} __packed;
+
+struct iwl_tx_cmd {
+       /*
+        * MPDU byte count:
+        * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+        * + 8 byte IV for CCM or TKIP (not used for WEP)
+        * + Data payload
+        * + 8-byte MIC (not used for CCM/WEP)
+        * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+        *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+        * Range: 14-2342 bytes.
+        */
+       __le16 len;
+
+       /*
+        * MPDU or MSDU byte count for next frame.
+        * Used for fragmentation and bursting, but not 11n aggregation.
+        * Same as "len", but for next frame.  Set to 0 if not applicable.
+        */
+       __le16 next_frame_len;
+
+       __le32 tx_flags;        /* TX_CMD_FLG_* */
+
+       /* uCode may modify this field of the Tx command (in host DRAM!).
+        * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+       struct iwl_dram_scratch scratch;
+
+       /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+       __le32 rate_n_flags;    /* RATE_MCS_* */
+
+       /* Index of destination station in uCode's station table */
+       u8 sta_id;
+
+       /* Type of security encryption:  CCM or TKIP */
+       u8 sec_ctl;             /* TX_CMD_SEC_* */
+
+       /*
+        * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+        * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set.  Normally "0" for
+        * data frames, this field may be used to selectively reduce initial
+        * rate (via non-0 value) for special frames (e.g. management), while
+        * still supporting rate scaling for all frames.
+        */
+       u8 initial_rate_index;
+       u8 reserved;
+       u8 key[16];
+       __le16 next_frame_flags;
+       __le16 reserved2;
+       union {
+               __le32 life_time;
+               __le32 attempt;
+       } stop_time;
+
+       /* Host DRAM physical address pointer to "scratch" in this command.
+        * Must be dword aligned.  "0" in dram_lsb_ptr disables usage. */
+       __le32 dram_lsb_ptr;
+       u8 dram_msb_ptr;
+
+       u8 rts_retry_limit;     /*byte 50 */
+       u8 data_retry_limit;    /*byte 51 */
+       u8 tid_tspec;
+       union {
+               __le16 pm_frame_timeout;
+               __le16 attempt_duration;
+       } timeout;
+
+       /*
+        * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+        * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+        */
+       __le16 driver_txop;
+
+       /*
+        * MAC header goes here, followed by 2 bytes padding if MAC header
+        * length is 26 or 30 bytes, followed by payload data
+        */
+       u8 payload[0];
+       struct ieee80211_hdr hdr[0];
+} __packed;
+
+/* TX command response is sent after *3945* transmission attempts.
+ *
+ * NOTES:
+ *
+ * TX_STATUS_FAIL_NEXT_FRAG
+ *
+ * If the fragment flag in the MAC header for the frame being transmitted
+ * is set and there is insufficient time to transmit the next frame, the
+ * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
+ *
+ * TX_STATUS_FIFO_UNDERRUN
+ *
+ * Indicates the host did not provide bytes to the FIFO fast enough while
+ * a TX was in progress.
+ *
+ * TX_STATUS_FAIL_MGMNT_ABORT
+ *
+ * This status is only possible if the ABORT ON MGMT RX parameter was
+ * set to true with the TX command.
+ *
+ * If the MSB of the status parameter is set then an abort sequence is
+ * required.  This sequence consists of the host activating the TX Abort
+ * control line, and then waiting for the TX Abort command response.  This
+ * indicates that a the device is no longer in a transmit state, and that the
+ * command FIFO has been cleared.  The host must then deactivate the TX Abort
+ * control line.  Receiving is still allowed in this case.
+ */
+enum {
+       TX_3945_STATUS_SUCCESS = 0x01,
+       TX_3945_STATUS_DIRECT_DONE = 0x02,
+       TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+       TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+       TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+       TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+       TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+       TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+       TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+       TX_3945_STATUS_FAIL_ABORTED = 0x89,
+       TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+       TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+       TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+       TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+       TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+       TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+       TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+       TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *4965* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
+       TX_STATUS_SUCCESS = 0x01,
+       TX_STATUS_DIRECT_DONE = 0x02,
+       /* postpone TX */
+       TX_STATUS_POSTPONE_DELAY = 0x40,
+       TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+       TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+       TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+       /* abort TX */
+       TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+       TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+       TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+       TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+       TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+       TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+       TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+       TX_STATUS_FAIL_DEST_PS = 0x88,
+       TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+       TX_STATUS_FAIL_BT_RETRY = 0x8a,
+       TX_STATUS_FAIL_STA_INVALID = 0x8b,
+       TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+       TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+       TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+       TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+       TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
+       TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+#define        TX_PACKET_MODE_REGULAR          0x0000
+#define        TX_PACKET_MODE_BURST_SEQ        0x0100
+#define        TX_PACKET_MODE_BURST_FIRST      0x0200
+
+enum {
+       TX_POWER_PA_NOT_ACTIVE = 0x0,
+};
+
+enum {
+       TX_STATUS_MSK = 0x000000ff,             /* bits 0:7 */
+       TX_STATUS_DELAY_MSK = 0x00000040,
+       TX_STATUS_ABORT_MSK = 0x00000080,
+       TX_PACKET_MODE_MSK = 0x0000ff00,        /* bits 8:15 */
+       TX_FIFO_NUMBER_MSK = 0x00070000,        /* bits 16:18 */
+       TX_RESERVED = 0x00780000,               /* bits 19:22 */
+       TX_POWER_PA_DETECT_MSK = 0x7f800000,    /* bits 23:30 */
+       TX_ABORT_REQUIRED_MSK = 0x80000000,     /* bits 31:31 */
+};
+
+/* *******************************
+ * TX aggregation status
+ ******************************* */
+
+enum {
+       AGG_TX_STATE_TRANSMITTED = 0x00,
+       AGG_TX_STATE_UNDERRUN_MSK = 0x01,
+       AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
+       AGG_TX_STATE_ABORT_MSK = 0x08,
+       AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
+       AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
+       AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
+       AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
+       AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
+       AGG_TX_STATE_DUMP_TX_MSK = 0x200,
+       AGG_TX_STATE_DELAY_TX_MSK = 0x400
+};
+
+#define AGG_TX_STATUS_MSK      0x00000fff      /* bits 0:11 */
+#define AGG_TX_TRY_MSK         0x0000f000      /* bits 12:15 */
+
+#define AGG_TX_STATE_LAST_SENT_MSK  (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
+                                    AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
+
+/* # tx attempts for first frame in aggregation */
+#define AGG_TX_STATE_TRY_CNT_POS 12
+#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
+
+/* Command ID and sequence number of Tx command for this frame */
+#define AGG_TX_STATE_SEQ_NUM_POS 16
+#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1)  No aggregation (frame_count == 1).  This reports Tx results for
+ *     a single frame.  Multiple attempts, at various bit rates, may have
+ *     been made for this frame.
+ *
+ * 2)  Aggregation (frame_count > 1).  This reports Tx results for
+ *     2 or more frames that used block-acknowledge.  All frames were
+ *     transmitted at same rate.  Rate scaling may have been used if first
+ *     frame in this new agg block failed in previous agg block(s).
+ *
+ *     Note that, for aggregation, ACK (block-ack) status is not delivered here;
+ *     block-ack has not been received by the time the 4965 device records
+ *     this status.
+ *     This status relates to reasons the tx might have been blocked or aborted
+ *     within the sending station (this 4965 device), rather than whether it was
+ *     received successfully by the destination station.
+ */
+struct agg_tx_status {
+       __le16 status;
+       __le16 sequence;
+} __packed;
+
+struct iwl4965_tx_resp {
+       u8 frame_count;         /* 1 no aggregation, >1 aggregation */
+       u8 bt_kill_count;       /* # blocked by bluetooth (unused for agg) */
+       u8 failure_rts;         /* # failures due to unsuccessful RTS */
+       u8 failure_frame;       /* # failures due to no ACK (unused for agg) */
+
+       /* For non-agg:  Rate at which frame was successful.
+        * For agg:  Rate at which all frames were transmitted. */
+       __le32 rate_n_flags;    /* RATE_MCS_*  */
+
+       /* For non-agg:  RTS + CTS + frame tx attempts time + ACK.
+        * For agg:  RTS + CTS + aggregation tx time + block-ack time. */
+       __le16 wireless_media_time;     /* uSecs */
+
+       __le16 reserved;
+       __le32 pa_power1;       /* RF power amplifier measurement (not used) */
+       __le32 pa_power2;
+
+       /*
+        * For non-agg:  frame status TX_STATUS_*
+        * For agg:  status of 1st frame, AGG_TX_STATE_*; other frame status
+        *           fields follow this one, up to frame_count.
+        *           Bit fields:
+        *           11- 0:  AGG_TX_STATE_* status code
+        *           15-12:  Retry count for 1st frame in aggregation (retries
+        *                   occur if tx failed for this frame when it was a
+        *                   member of a previous aggregation block).  If rate
+        *                   scaling is used, retry count indicates the rate
+        *                   table entry used for all frames in the new agg.
+        *           31-16:  Sequence # for this frame's Tx cmd (not SSN!)
+        */
+       union {
+               __le32 status;
+               struct agg_tx_status agg_status[0]; /* for each agg frame */
+       } u;
+} __packed;
+
+/*
+ * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ *
+ * Reports Block-Acknowledge from recipient station
+ */
+struct iwl_compressed_ba_resp {
+       __le32 sta_addr_lo32;
+       __le16 sta_addr_hi16;
+       __le16 reserved;
+
+       /* Index of recipient (BA-sending) station in uCode's station table */
+       u8 sta_id;
+       u8 tid;
+       __le16 seq_ctl;
+       __le64 bitmap;
+       __le16 scd_flow;
+       __le16 scd_ssn;
+} __packed;
+
+/*
+ * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ *
+ * See details under "TXPOWER" in iwl-4965-hw.h.
+ */
+
+struct iwl3945_txpowertable_cmd {
+       u8 band;                /* 0: 5 GHz, 1: 2.4 GHz */
+       u8 reserved;
+       __le16 channel;
+       struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_txpowertable_cmd {
+       u8 band;                /* 0: 5 GHz, 1: 2.4 GHz */
+       u8 reserved;
+       __le16 channel;
+       struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+
+/**
+ * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ *
+ * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ *
+ * NOTE: The table of rates passed to the uCode via the
+ * RATE_SCALE command sets up the corresponding order of
+ * rates used for all related commands, including rate
+ * masks, etc.
+ *
+ * For example, if you set 9MB (PLCP 0x0f) as the first
+ * rate in the rate table, the bit mask for that rate
+ * when passed through ofdm_basic_rates on the REPLY_RXON
+ * command would be bit 0 (1 << 0)
+ */
+struct iwl3945_rate_scaling_info {
+       __le16 rate_n_flags;
+       u8 try_cnt;
+       u8 next_rate_index;
+} __packed;
+
+struct iwl3945_rate_scaling_cmd {
+       u8 table_id;
+       u8 reserved[3];
+       struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+} __packed;
+
+
+/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
+#define  LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK   (1 << 0)
+
+/* # of EDCA prioritized tx fifos */
+#define  LINK_QUAL_AC_NUM AC_NUM
+
+/* # entries in rate scale table to support Tx retries */
+#define  LINK_QUAL_MAX_RETRY_NUM 16
+
+/* Tx antenna selection values */
+#define  LINK_QUAL_ANT_A_MSK (1 << 0)
+#define  LINK_QUAL_ANT_B_MSK (1 << 1)
+#define  LINK_QUAL_ANT_MSK   (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
+
+
+/**
+ * struct iwl_link_qual_general_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_general_params {
+       u8 flags;
+
+       /* No entries at or above this (driver chosen) index contain MIMO */
+       u8 mimo_delimiter;
+
+       /* Best single antenna to use for single stream (legacy, SISO). */
+       u8 single_stream_ant_msk;       /* LINK_QUAL_ANT_* */
+
+       /* Best antennas to use for MIMO (unused for 4965, assumes both). */
+       u8 dual_stream_ant_msk;         /* LINK_QUAL_ANT_* */
+
+       /*
+        * If driver needs to use different initial rates for different
+        * EDCA QOS access categories (as implemented by tx fifos 0-3),
+        * this table will set that up, by indicating the indexes in the
+        * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
+        * Otherwise, driver should set all entries to 0.
+        *
+        * Entry usage:
+        * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
+        * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
+        */
+       u8 start_rate_index[LINK_QUAL_AC_NUM];
+} __packed;
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX   (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN   (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF        (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX        (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN        (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF  (31)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX  (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN  (0)
+
+/**
+ * struct iwl_link_qual_agg_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_agg_params {
+
+       /*
+        *Maximum number of uSec in aggregation.
+        * default set to 4000 (4 milliseconds) if not configured in .cfg
+        */
+       __le16 agg_time_limit;
+
+       /*
+        * Number of Tx retries allowed for a frame, before that frame will
+        * no longer be considered for the start of an aggregation sequence
+        * (scheduler will then try to tx it as single frame).
+        * Driver should set this to 3.
+        */
+       u8 agg_dis_start_th;
+
+       /*
+        * Maximum number of frames in aggregation.
+        * 0 = no limit (default).  1 = no aggregation.
+        * Other values = max # frames in aggregation.
+        */
+       u8 agg_frame_cnt_limit;
+
+       __le32 reserved;
+} __packed;
+
+/*
+ * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ *
+ * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ *
+ * Each station in the 4965 device's internal station table has its own table
+ * of 16
+ * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
+ * an ACK is not received.  This command replaces the entire table for
+ * one station.
+ *
+ * NOTE:  Station must already be in 4965 device's station table.
+ *       Use REPLY_ADD_STA.
+ *
+ * The rate scaling procedures described below work well.  Of course, other
+ * procedures are possible, and may work better for particular environments.
+ *
+ *
+ * FILLING THE RATE TABLE
+ *
+ * Given a particular initial rate and mode, as determined by the rate
+ * scaling algorithm described below, the Linux driver uses the following
+ * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
+ * Link Quality command:
+ *
+ *
+ * 1)  If using High-throughput (HT) (SISO or MIMO) initial rate:
+ *     a) Use this same initial rate for first 3 entries.
+ *     b) Find next lower available rate using same mode (SISO or MIMO),
+ *        use for next 3 entries.  If no lower rate available, switch to
+ *        legacy mode (no HT40 channel, no MIMO, no short guard interval).
+ *     c) If using MIMO, set command's mimo_delimiter to number of entries
+ *        using MIMO (3 or 6).
+ *     d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
+ *        no MIMO, no short guard interval), at the next lower bit rate
+ *        (e.g. if second HT bit rate was 54, try 48 legacy), and follow
+ *        legacy procedure for remaining table entries.
+ *
+ * 2)  If using legacy initial rate:
+ *     a) Use the initial rate for only one entry.
+ *     b) For each following entry, reduce the rate to next lower available
+ *        rate, until reaching the lowest available rate.
+ *     c) When reducing rate, also switch antenna selection.
+ *     d) Once lowest available rate is reached, repeat this rate until
+ *        rate table is filled (16 entries), switching antenna each entry.
+ *
+ *
+ * ACCUMULATING HISTORY
+ *
+ * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
+ * uses two sets of frame Tx success history:  One for the current/active
+ * modulation mode, and one for a speculative/search mode that is being
+ * attempted. If the speculative mode turns out to be more effective (i.e.
+ * actual transfer rate is better), then the driver continues to use the
+ * speculative mode as the new current active mode.
+ *
+ * Each history set contains, separately for each possible rate, data for a
+ * sliding window of the 62 most recent tx attempts at that rate.  The data
+ * includes a shifting bitmap of success(1)/failure(0), and sums of successful
+ * and attempted frames, from which the driver can additionally calculate a
+ * success ratio (success / attempted) and number of failures
+ * (attempted - success), and control the size of the window (attempted).
+ * The driver uses the bit map to remove successes from the success sum, as
+ * the oldest tx attempts fall out of the window.
+ *
+ * When the 4965 device makes multiple tx attempts for a given frame, each
+ * attempt might be at a different rate, and have different modulation
+ * characteristics (e.g. antenna, fat channel, short guard interval), as set
+ * up in the rate scaling table in the Link Quality command.  The driver must
+ * determine which rate table entry was used for each tx attempt, to determine
+ * which rate-specific history to update, and record only those attempts that
+ * match the modulation characteristics of the history set.
+ *
+ * When using block-ack (aggregation), all frames are transmitted at the same
+ * rate, since there is no per-attempt acknowledgment from the destination
+ * station.  The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * rate_n_flags field.  After receiving a block-ack, the driver can update
+ * history for the entire block all at once.
+ *
+ *
+ * FINDING BEST STARTING RATE:
+ *
+ * When working with a selected initial modulation mode (see below), the
+ * driver attempts to find a best initial rate.  The initial rate is the
+ * first entry in the Link Quality command's rate table.
+ *
+ * 1)  Calculate actual throughput (success ratio * expected throughput, see
+ *     table below) for current initial rate.  Do this only if enough frames
+ *     have been attempted to make the value meaningful:  at least 6 failed
+ *     tx attempts, or at least 8 successes.  If not enough, don't try rate
+ *     scaling yet.
+ *
+ * 2)  Find available rates adjacent to current initial rate.  Available means:
+ *     a)  supported by hardware &&
+ *     b)  supported by association &&
+ *     c)  within any constraints selected by user
+ *
+ * 3)  Gather measured throughputs for adjacent rates.  These might not have
+ *     enough history to calculate a throughput.  That's okay, we might try
+ *     using one of them anyway!
+ *
+ * 4)  Try decreasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  lower adjacent rate has better measured throughput ||
+ *     c)  higher adjacent rate has worse throughput, and lower is unmeasured
+ *
+ *     As a sanity check, if decrease was determined above, leave rate
+ *     unchanged if:
+ *     a)  lower rate unavailable
+ *     b)  success ratio at current rate > 85% (very good)
+ *     c)  current measured throughput is better than expected throughput
+ *         of lower rate (under perfect 100% tx conditions, see table below)
+ *
+ * 5)  Try increasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  both adjacent rates' throughputs are unmeasured (try it!) ||
+ *     b)  higher adjacent rate has better measured throughput ||
+ *     c)  lower adjacent rate has worse throughput, and higher is unmeasured
+ *
+ *     As a sanity check, if increase was determined above, leave rate
+ *     unchanged if:
+ *     a)  success ratio at current rate < 70%.  This is not particularly
+ *         good performance; higher rate is sure to have poorer success.
+ *
+ * 6)  Re-evaluate the rate after each tx frame.  If working with block-
+ *     acknowledge, history and statistics may be calculated for the entire
+ *     block (including prior history that fits within the history windows),
+ *     before re-evaluation.
+ *
+ * FINDING BEST STARTING MODULATION MODE:
+ *
+ * After working with a modulation mode for a "while" (and doing rate scaling),
+ * the driver searches for a new initial mode in an attempt to improve
+ * throughput.  The "while" is measured by numbers of attempted frames:
+ *
+ * For legacy mode, search for new mode after:
+ *   480 successful frames, or 160 failed frames
+ * For high-throughput modes (SISO or MIMO), search for new mode after:
+ *   4500 successful frames, or 400 failed frames
+ *
+ * Mode switch possibilities are (3 for each mode):
+ *
+ * For legacy:
+ *   Change antenna, try SISO (if HT association), try MIMO (if HT association)
+ * For SISO:
+ *   Change antenna, try MIMO, try shortened guard interval (SGI)
+ * For MIMO:
+ *   Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
+ *
+ * When trying a new mode, use the same bit rate as the old/current mode when
+ * trying antenna switches and shortened guard interval.  When switching to
+ * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
+ * for which the expected throughput (under perfect conditions) is about the
+ * same or slightly better than the actual measured throughput delivered by
+ * the old/current mode.
+ *
+ * Actual throughput can be estimated by multiplying the expected throughput
+ * by the success ratio (successful / attempted tx frames).  Frame size is
+ * not considered in this calculation; it assumes that frame size will average
+ * out to be fairly consistent over several samples.  The following are
+ * metric values for expected throughput assuming 100% success ratio.
+ * Only G band has support for CCK rates:
+ *
+ *           RATE:  1    2    5   11    6   9   12   18   24   36   48   54   60
+ *
+ *              G:  7   13   35   58   40  57   72   98  121  154  177  186  186
+ *              A:  0    0    0    0   40  57   72   98  121  154  177  186  186
+ *     SISO 20MHz:  0    0    0    0   42  42   76  102  124  159  183  193  202
+ * SGI SISO 20MHz:  0    0    0    0   46  46   82  110  132  168  192  202  211
+ *     MIMO 20MHz:  0    0    0    0   74  74  123  155  179  214  236  244  251
+ * SGI MIMO 20MHz:  0    0    0    0   81  81  131  164  188  222  243  251  257
+ *     SISO 40MHz:  0    0    0    0   77  77  127  160  184  220  242  250  257
+ * SGI SISO 40MHz:  0    0    0    0   83  83  135  169  193  229  250  257  264
+ *     MIMO 40MHz:  0    0    0    0  123 123  182  214  235  264  279  285  289
+ * SGI MIMO 40MHz:  0    0    0    0  131 131  191  222  242  270  284  289  293
+ *
+ * After the new mode has been tried for a short while (minimum of 6 failed
+ * frames or 8 successful frames), compare success ratio and actual throughput
+ * estimate of the new mode with the old.  If either is better with the new
+ * mode, continue to use the new mode.
+ *
+ * Continue comparing modes until all 3 possibilities have been tried.
+ * If moving from legacy to HT, try all 3 possibilities from the new HT
+ * mode.  After trying all 3, a best mode is found.  Continue to use this mode
+ * for the longer "while" described above (e.g. 480 successful frames for
+ * legacy), and then repeat the search process.
+ *
+ */
+struct iwl_link_quality_cmd {
+
+       /* Index of destination/recipient station in uCode's station table */
+       u8 sta_id;
+       u8 reserved1;
+       __le16 control;         /* not used */
+       struct iwl_link_qual_general_params general_params;
+       struct iwl_link_qual_agg_params agg_params;
+
+       /*
+        * Rate info; when using rate-scaling, Tx command's initial_rate_index
+        * specifies 1st Tx rate attempted, via index into this table.
+        * 4965 devices works its way through table when retrying Tx.
+        */
+       struct {
+               __le32 rate_n_flags;    /* RATE_MCS_*, IWL_RATE_* */
+       } rs_table[LINK_QUAL_MAX_RETRY_NUM];
+       __le32 reserved2;
+} __packed;
+
+/*
+ * BT configuration enable flags:
+ *   bit 0 - 1: BT channel announcement enabled
+ *           0: disable
+ *   bit 1 - 1: priority of BT device enabled
+ *           0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY        BIT(1)
+
+#define BT_COEX_ENABLE  (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
+
+#define BT_LEAD_TIME_DEF (0x1E)
+
+#define BT_MAX_KILL_DEF (0x5)
+
+/*
+ * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ *
+ * 3945 and 4965 devices support hardware handshake with Bluetooth device on
+ * same platform.  Bluetooth device alerts wireless device when it will Tx;
+ * wireless device can delay or kill its own Tx to accommodate.
+ */
+struct iwl_bt_cmd {
+       u8 flags;
+       u8 lead_time;
+       u8 max_kill;
+       u8 reserved;
+       __le32 kill_ack_mask;
+       __le32 kill_cts_mask;
+} __packed;
+
+
+/******************************************************************************
+ * (6)
+ * Spectrum Management (802.11h) Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/*
+ * Spectrum Management
+ */
+#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK         | \
+                                RXON_FILTER_CTL2HOST_MSK        | \
+                                RXON_FILTER_ACCEPT_GRP_MSK      | \
+                                RXON_FILTER_DIS_DECRYPT_MSK     | \
+                                RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
+                                RXON_FILTER_ASSOC_MSK           | \
+                                RXON_FILTER_BCON_AWARE_MSK)
+
+struct iwl_measure_channel {
+       __le32 duration;        /* measurement duration in extended beacon
+                                * format */
+       u8 channel;             /* channel to measure */
+       u8 type;                /* see enum iwl_measure_type */
+       __le16 reserved;
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ */
+struct iwl_spectrum_cmd {
+       __le16 len;             /* number of bytes starting from token */
+       u8 token;               /* token id */
+       u8 id;                  /* measurement id -- 0 or 1 */
+       u8 origin;              /* 0 = TGh, 1 = other, 2 = TGk */
+       u8 periodic;            /* 1 = periodic */
+       __le16 path_loss_timeout;
+       __le32 start_time;      /* start time in extended beacon format */
+       __le32 reserved2;
+       __le32 flags;           /* rxon flags */
+       __le32 filter_flags;    /* rxon filter flags */
+       __le16 channel_count;   /* minimum 1, maximum 10 */
+       __le16 reserved3;
+       struct iwl_measure_channel channels[10];
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ */
+struct iwl_spectrum_resp {
+       u8 token;
+       u8 id;                  /* id of the prior command replaced, or 0xff */
+       __le16 status;          /* 0 - command will be handled
+                                * 1 - cannot handle (conflicts with another
+                                *     measurement) */
+} __packed;
+
+enum iwl_measurement_state {
+       IWL_MEASUREMENT_START = 0,
+       IWL_MEASUREMENT_STOP = 1,
+};
+
+enum iwl_measurement_status {
+       IWL_MEASUREMENT_OK = 0,
+       IWL_MEASUREMENT_CONCURRENT = 1,
+       IWL_MEASUREMENT_CSA_CONFLICT = 2,
+       IWL_MEASUREMENT_TGH_CONFLICT = 3,
+       /* 4-5 reserved */
+       IWL_MEASUREMENT_STOPPED = 6,
+       IWL_MEASUREMENT_TIMEOUT = 7,
+       IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+};
+
+#define NUM_ELEMENTS_IN_HISTOGRAM 8
+
+struct iwl_measurement_histogram {
+       __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
+       __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];  /* in 1usec counts */
+} __packed;
+
+/* clear channel availability counters */
+struct iwl_measurement_cca_counters {
+       __le32 ofdm;
+       __le32 cck;
+} __packed;
+
+enum iwl_measure_type {
+       IWL_MEASURE_BASIC = (1 << 0),
+       IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
+       IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+       IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+       IWL_MEASURE_FRAME = (1 << 4),
+       /* bits 5:6 are reserved */
+       IWL_MEASURE_IDLE = (1 << 7),
+};
+
+/*
+ * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ */
+struct iwl_spectrum_notification {
+       u8 id;                  /* measurement id -- 0 or 1 */
+       u8 token;
+       u8 channel_index;       /* index in measurement channel list */
+       u8 state;               /* 0 - start, 1 - stop */
+       __le32 start_time;      /* lower 32-bits of TSF */
+       u8 band;                /* 0 - 5.2GHz, 1 - 2.4GHz */
+       u8 channel;
+       u8 type;                /* see enum iwl_measurement_type */
+       u8 reserved1;
+       /* NOTE:  cca_ofdm, cca_cck, basic_type, and histogram are only only
+        * valid if applicable for measurement type requested. */
+       __le32 cca_ofdm;        /* cca fraction time in 40Mhz clock periods */
+       __le32 cca_cck;         /* cca fraction time in 44Mhz clock periods */
+       __le32 cca_time;        /* channel load time in usecs */
+       u8 basic_type;          /* 0 - bss, 1 - ofdm preamble, 2 -
+                                * unidentified */
+       u8 reserved2[3];
+       struct iwl_measurement_histogram histogram;
+       __le32 stop_time;       /* lower 32-bits of TSF */
+       __le32 status;          /* see iwl_measurement_status */
+} __packed;
+
+/******************************************************************************
+ * (7)
+ * Power Management Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * @flags: See below:
+ *
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * PM allow:
+ *   bit 0 - '0' Driver not allow power management
+ *           '1' Driver allow PM (use rest of parameters)
+ *
+ * uCode send sleep notifications:
+ *   bit 1 - '0' Don't send sleep notification
+ *           '1' send sleep notification (SEND_PM_NOTIFICATION)
+ *
+ * Sleep over DTIM
+ *   bit 2 - '0' PM have to walk up every DTIM
+ *           '1' PM could sleep over DTIM till listen Interval.
+ *
+ * PCI power managed
+ *   bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ *           '1' !(PCI_CFG_LINK_CTRL & 0x1)
+ *
+ * Fast PD
+ *   bit 4 - '1' Put radio to sleep when receiving frame for others
+ *
+ * Force sleep Modes
+ *   bit 31/30- '00' use both mac/xtal sleeps
+ *              '01' force Mac sleep
+ *              '10' force xtal sleep
+ *              '11' Illegal set
+ *
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * ucode assume sleep over DTIM is allowed and we don't need to wake up
+ * for every DTIM.
+ */
+#define IWL_POWER_VEC_SIZE 5
+
+#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK       cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_SAVE_ENA_MSK           cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK     cpu_to_le16(BIT(1))
+#define IWL_POWER_SLEEP_OVER_DTIM_MSK          cpu_to_le16(BIT(2))
+#define IWL_POWER_PCI_PM_MSK                   cpu_to_le16(BIT(3))
+#define IWL_POWER_FAST_PD                      cpu_to_le16(BIT(4))
+#define IWL_POWER_BEACON_FILTERING             cpu_to_le16(BIT(5))
+#define IWL_POWER_SHADOW_REG_ENA               cpu_to_le16(BIT(6))
+#define IWL_POWER_CT_KILL_SET                  cpu_to_le16(BIT(7))
+
+struct iwl3945_powertable_cmd {
+       __le16 flags;
+       u8 reserved[2];
+       __le32 rx_data_timeout;
+       __le32 tx_data_timeout;
+       __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+} __packed;
+
+struct iwl_powertable_cmd {
+       __le16 flags;
+       u8 keep_alive_seconds;          /* 3945 reserved */
+       u8 debug_flags;                 /* 3945 reserved */
+       __le32 rx_data_timeout;
+       __le32 tx_data_timeout;
+       __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+       __le32 keep_alive_beacons;
+} __packed;
+
+/*
+ * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * all devices identical.
+ */
+struct iwl_sleep_notification {
+       u8 pm_sleep_mode;
+       u8 pm_wakeup_src;
+       __le16 reserved;
+       __le32 sleep_time;
+       __le32 tsf_low;
+       __le32 bcon_timer;
+} __packed;
+
+/* Sleep states.  all devices identical. */
+enum {
+       IWL_PM_NO_SLEEP = 0,
+       IWL_PM_SLP_MAC = 1,
+       IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+       IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+       IWL_PM_SLP_PHY = 4,
+       IWL_PM_SLP_REPENT = 5,
+       IWL_PM_WAKEUP_BY_TIMER = 6,
+       IWL_PM_WAKEUP_BY_DRIVER = 7,
+       IWL_PM_WAKEUP_BY_RFKILL = 8,
+       /* 3 reserved */
+       IWL_PM_NUM_OF_MODES = 12,
+};
+
+/*
+ * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ */
+struct iwl_card_state_notif {
+       __le32 flags;
+} __packed;
+
+#define HW_CARD_DISABLED   0x01
+#define SW_CARD_DISABLED   0x02
+#define CT_CARD_DISABLED   0x04
+#define RXON_CARD_DISABLED 0x10
+
+struct iwl_ct_kill_config {
+       __le32   reserved;
+       __le32   critical_temperature_M;
+       __le32   critical_temperature_R;
+}  __packed;
+
+/******************************************************************************
+ * (8)
+ * Scan Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
+#define SCAN_CHANNEL_TYPE_ACTIVE  cpu_to_le32(1)
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ *
+ * One for each channel in the scan list.
+ * Each channel can independently select:
+ * 1)  SSID for directed active scans
+ * 2)  Txpower setting (for rate specified within Tx command)
+ * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
+ *     quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1)  If using passive_dwell (i.e. passive_dwell != 0):
+ *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2)  quiet_time <= active_dwell
+ * 3)  If restricting off-channel time (i.e. max_out_time !=0):
+ *     passive_dwell < max_out_time
+ *     active_dwell < max_out_time
+ */
+struct iwl3945_scan_channel {
+       /*
+        * type is defined as:
+        * 0:0 1 = active, 0 = passive
+        * 1:4 SSID direct bit map; if a bit is set, then corresponding
+        *     SSID IE is transmitted in probe request.
+        * 5:7 reserved
+        */
+       u8 type;
+       u8 channel;     /* band is selected by iwl3945_scan_cmd "flags" field */
+       struct iwl3945_tx_power tpc;
+       __le16 active_dwell;    /* in 1024-uSec TU (time units), typ 5-50 */
+       __le16 passive_dwell;   /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes u8 type */
+#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+
+struct iwl_scan_channel {
+       /*
+        * type is defined as:
+        * 0:0 1 = active, 0 = passive
+        * 1:20 SSID direct bit map; if a bit is set, then corresponding
+        *     SSID IE is transmitted in probe request.
+        * 21:31 reserved
+        */
+       __le32 type;
+       __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+       u8 tx_gain;             /* gain for analog radio */
+       u8 dsp_atten;           /* gain for DSP */
+       __le16 active_dwell;    /* in 1024-uSec TU (time units), typ 5-50 */
+       __le16 passive_dwell;   /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes __le32 type */
+#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 (4) entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+       u8 id;
+       u8 len;
+       u8 ssid[32];
+} __packed;
+
+#define PROBE_OPTION_MAX_3945          4
+#define PROBE_OPTION_MAX               20
+#define TX_CMD_LIFE_TIME_INFINITE      cpu_to_le32(0xFFFFFFFF)
+#define IWL_GOOD_CRC_TH_DISABLED       0
+#define IWL_GOOD_CRC_TH_DEFAULT                cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_NEVER          cpu_to_le16(0xffff)
+#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_CMD_SIZE 4096
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (command)
+ *
+ * The hardware scan command is very powerful; the driver can set it up to
+ * maintain (relatively) normal network traffic while doing a scan in the
+ * background.  The max_out_time and suspend_time control the ratio of how
+ * long the device stays on an associated network channel ("service channel")
+ * vs. how long it's away from the service channel, i.e. tuned to other channels
+ * for scanning.
+ *
+ * max_out_time is the max time off-channel (in usec), and suspend_time
+ * is how long (in "extended beacon" format) that the scan is "suspended"
+ * after returning to the service channel.  That is, suspend_time is the
+ * time that we stay on the service channel, doing normal work, between
+ * scan segments.  The driver may set these parameters differently to support
+ * scanning when associated vs. not associated, and light vs. heavy traffic
+ * loads when associated.
+ *
+ * After receiving this command, the device's scan engine does the following;
+ *
+ * 1)  Sends SCAN_START notification to driver
+ * 2)  Checks to see if it has time to do scan for one channel
+ * 3)  Sends NULL packet, with power-save (PS) bit set to 1,
+ *     to tell AP that we're going off-channel
+ * 4)  Tunes to first channel in scan list, does active or passive scan
+ * 5)  Sends SCAN_RESULT notification to driver
+ * 6)  Checks to see if it has time to do scan on *next* channel in list
+ * 7)  Repeats 4-6 until it no longer has time to scan the next channel
+ *     before max_out_time expires
+ * 8)  Returns to service channel
+ * 9)  Sends NULL packet with PS=0 to tell AP that we're back
+ * 10) Stays on service channel until suspend_time expires
+ * 11) Repeats entire process 2-10 until list is complete
+ * 12) Sends SCAN_COMPLETE notification
+ *
+ * For fast, efficient scans, the scan command also has support for staying on
+ * a channel for just a short time, if doing active scanning and getting no
+ * responses to the transmitted probe request.  This time is controlled by
+ * quiet_time, and the number of received packets below which a channel is
+ * considered "quiet" is controlled by quiet_plcp_threshold.
+ *
+ * For active scanning on channels that have regulatory restrictions against
+ * blindly transmitting, the scan can listen before transmitting, to make sure
+ * that there is already legitimate activity on the channel.  If enough
+ * packets are cleanly received on the channel (controlled by good_CRC_th,
+ * typical value 1), the scan engine starts transmitting probe requests.
+ *
+ * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
+ *
+ * To avoid uCode errors, see timing restrictions described under
+ * struct iwl_scan_channel.
+ */
+
+struct iwl3945_scan_cmd {
+       __le16 len;
+       u8 reserved0;
+       u8 channel_count;       /* # channels in channel list */
+       __le16 quiet_time;      /* dwell only this # millisecs on quiet channel
+                                * (only for active scan) */
+       __le16 quiet_plcp_th;   /* quiet chnl is < this # pkts (typ. 1) */
+       __le16 good_CRC_th;     /* passive -> active promotion threshold */
+       __le16 reserved1;
+       __le32 max_out_time;    /* max usec to be away from associated (service)
+                                * channel */
+       __le32 suspend_time;    /* pause scan this long (in "extended beacon
+                                * format") when returning to service channel:
+                                * 3945; 31:24 # beacons, 19:0 additional usec,
+                                * 4965; 31:22 # beacons, 21:0 additional usec.
+                                */
+       __le32 flags;           /* RXON_FLG_* */
+       __le32 filter_flags;    /* RXON_FILTER_* */
+
+       /* For active scans (set to all-0s for passive scans).
+        * Does not include payload.  Must specify Tx rate; no rate scaling. */
+       struct iwl3945_tx_cmd tx_cmd;
+
+       /* For directed active scans (set to all-0s otherwise) */
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+
+       /*
+        * Probe request frame, followed by channel list.
+        *
+        * Size of probe request frame is specified by byte count in tx_cmd.
+        * Channel list follows immediately after probe request frame.
+        * Number of channels in list is specified by channel_count.
+        * Each channel in list is of type:
+        *
+        * struct iwl3945_scan_channel channels[0];
+        *
+        * NOTE:  Only one band of channels can be scanned per pass.  You
+        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * before requesting another scan.
+        */
+       u8 data[0];
+} __packed;
+
+struct iwl_scan_cmd {
+       __le16 len;
+       u8 reserved0;
+       u8 channel_count;       /* # channels in channel list */
+       __le16 quiet_time;      /* dwell only this # millisecs on quiet channel
+                                * (only for active scan) */
+       __le16 quiet_plcp_th;   /* quiet chnl is < this # pkts (typ. 1) */
+       __le16 good_CRC_th;     /* passive -> active promotion threshold */
+       __le16 rx_chain;        /* RXON_RX_CHAIN_* */
+       __le32 max_out_time;    /* max usec to be away from associated (service)
+                                * channel */
+       __le32 suspend_time;    /* pause scan this long (in "extended beacon
+                                * format") when returning to service chnl:
+                                * 3945; 31:24 # beacons, 19:0 additional usec,
+                                * 4965; 31:22 # beacons, 21:0 additional usec.
+                                */
+       __le32 flags;           /* RXON_FLG_* */
+       __le32 filter_flags;    /* RXON_FILTER_* */
+
+       /* For active scans (set to all-0s for passive scans).
+        * Does not include payload.  Must specify Tx rate; no rate scaling. */
+       struct iwl_tx_cmd tx_cmd;
+
+       /* For directed active scans (set to all-0s otherwise) */
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+
+       /*
+        * Probe request frame, followed by channel list.
+        *
+        * Size of probe request frame is specified by byte count in tx_cmd.
+        * Channel list follows immediately after probe request frame.
+        * Number of channels in list is specified by channel_count.
+        * Each channel in list is of type:
+        *
+        * struct iwl_scan_channel channels[0];
+        *
+        * NOTE:  Only one band of channels can be scanned per pass.  You
+        * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * before requesting another scan.
+        */
+       u8 data[0];
+} __packed;
+
+/* Can abort will notify by complete notification with abort status. */
+#define CAN_ABORT_STATUS       cpu_to_le32(0x1)
+/* complete notification statuses */
+#define ABORT_STATUS            0x2
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (response)
+ */
+struct iwl_scanreq_notification {
+       __le32 status;          /* 1: okay, 2: cannot fulfill request */
+} __packed;
+
+/*
+ * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ */
+struct iwl_scanstart_notification {
+       __le32 tsf_low;
+       __le32 tsf_high;
+       __le32 beacon_timer;
+       u8 channel;
+       u8 band;
+       u8 reserved[2];
+       __le32 status;
+} __packed;
+
+#define  SCAN_OWNER_STATUS 0x1;
+#define  MEASURE_OWNER_STATUS 0x2;
+
+#define IWL_PROBE_STATUS_OK            0
+#define IWL_PROBE_STATUS_TX_FAILED     BIT(0)
+/* error statuses combined with TX_FAILED */
+#define IWL_PROBE_STATUS_FAIL_TTL      BIT(1)
+#define IWL_PROBE_STATUS_FAIL_BT       BIT(2)
+
+#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+/*
+ * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ */
+struct iwl_scanresults_notification {
+       u8 channel;
+       u8 band;
+       u8 probe_status;
+       u8 num_probe_not_sent; /* not enough time to send */
+       __le32 tsf_low;
+       __le32 tsf_high;
+       __le32 statistics[NUMBER_OF_STATISTICS];
+} __packed;
+
+/*
+ * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ */
+struct iwl_scancomplete_notification {
+       u8 scanned_channels;
+       u8 status;
+       u8 last_channel;
+       __le32 tsf_low;
+       __le32 tsf_high;
+} __packed;
+
+
+/******************************************************************************
+ * (9)
+ * IBSS/AP Commands and Notifications:
+ *
+ *****************************************************************************/
+
+enum iwl_ibss_manager {
+       IWL_NOT_IBSS_MANAGER = 0,
+       IWL_IBSS_MANAGER = 1,
+};
+
+/*
+ * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ */
+
+struct iwl3945_beacon_notif {
+       struct iwl3945_tx_resp beacon_notify_hdr;
+       __le32 low_tsf;
+       __le32 high_tsf;
+       __le32 ibss_mgr_status;
+} __packed;
+
+struct iwl4965_beacon_notif {
+       struct iwl4965_tx_resp beacon_notify_hdr;
+       __le32 low_tsf;
+       __le32 high_tsf;
+       __le32 ibss_mgr_status;
+} __packed;
+
+/*
+ * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ */
+
+struct iwl3945_tx_beacon_cmd {
+       struct iwl3945_tx_cmd tx;
+       __le16 tim_idx;
+       u8 tim_size;
+       u8 reserved1;
+       struct ieee80211_hdr frame[0];  /* beacon frame */
+} __packed;
+
+struct iwl_tx_beacon_cmd {
+       struct iwl_tx_cmd tx;
+       __le16 tim_idx;
+       u8 tim_size;
+       u8 reserved1;
+       struct ieee80211_hdr frame[0];  /* beacon frame */
+} __packed;
+
+/******************************************************************************
+ * (10)
+ * Statistics Commands and Notifications:
+ *
+ *****************************************************************************/
+
+#define IWL_TEMP_CONVERT 260
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/* Used for passing to driver number of successes and failures per rate */
+struct rate_histogram {
+       union {
+               __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+               __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+               __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+       } success;
+       union {
+               __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+               __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+               __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+       } failed;
+} __packed;
+
+/* statistics command response */
+
+struct iwl39_statistics_rx_phy {
+       __le32 ina_cnt;
+       __le32 fina_cnt;
+       __le32 plcp_err;
+       __le32 crc32_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 false_alarm_cnt;
+       __le32 fina_sync_err_cnt;
+       __le32 sfd_timeout;
+       __le32 fina_timeout;
+       __le32 unresponded_rts;
+       __le32 rxe_frame_limit_overrun;
+       __le32 sent_ack_cnt;
+       __le32 sent_cts_cnt;
+} __packed;
+
+struct iwl39_statistics_rx_non_phy {
+       __le32 bogus_cts;       /* CTS received when not expecting CTS */
+       __le32 bogus_ack;       /* ACK received when not expecting ACK */
+       __le32 non_bssid_frames;        /* number of frames with BSSID that
+                                        * doesn't belong to the STA BSSID */
+       __le32 filtered_frames; /* count frames that were dumped in the
+                                * filtering process */
+       __le32 non_channel_beacons;     /* beacons with our bss id but not on
+                                        * our serving channel */
+} __packed;
+
+struct iwl39_statistics_rx {
+       struct iwl39_statistics_rx_phy ofdm;
+       struct iwl39_statistics_rx_phy cck;
+       struct iwl39_statistics_rx_non_phy general;
+} __packed;
+
+struct iwl39_statistics_tx {
+       __le32 preamble_cnt;
+       __le32 rx_detected_cnt;
+       __le32 bt_prio_defer_cnt;
+       __le32 bt_prio_kill_cnt;
+       __le32 few_bytes_cnt;
+       __le32 cts_timeout;
+       __le32 ack_timeout;
+       __le32 expected_ack_cnt;
+       __le32 actual_ack_cnt;
+} __packed;
+
+struct statistics_dbg {
+       __le32 burst_check;
+       __le32 burst_count;
+       __le32 wait_for_silence_timeout_cnt;
+       __le32 reserved[3];
+} __packed;
+
+struct iwl39_statistics_div {
+       __le32 tx_on_a;
+       __le32 tx_on_b;
+       __le32 exec_time;
+       __le32 probe_time;
+} __packed;
+
+struct iwl39_statistics_general {
+       __le32 temperature;
+       struct statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct iwl39_statistics_div div;
+} __packed;
+
+struct statistics_rx_phy {
+       __le32 ina_cnt;
+       __le32 fina_cnt;
+       __le32 plcp_err;
+       __le32 crc32_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 false_alarm_cnt;
+       __le32 fina_sync_err_cnt;
+       __le32 sfd_timeout;
+       __le32 fina_timeout;
+       __le32 unresponded_rts;
+       __le32 rxe_frame_limit_overrun;
+       __le32 sent_ack_cnt;
+       __le32 sent_cts_cnt;
+       __le32 sent_ba_rsp_cnt;
+       __le32 dsp_self_kill;
+       __le32 mh_format_err;
+       __le32 re_acq_main_rssi_sum;
+       __le32 reserved3;
+} __packed;
+
+struct statistics_rx_ht_phy {
+       __le32 plcp_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 crc32_err;
+       __le32 mh_format_err;
+       __le32 agg_crc32_good;
+       __le32 agg_mpdu_cnt;
+       __le32 agg_cnt;
+       __le32 unsupport_mcs;
+} __packed;
+
+#define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
+
+struct statistics_rx_non_phy {
+       __le32 bogus_cts;       /* CTS received when not expecting CTS */
+       __le32 bogus_ack;       /* ACK received when not expecting ACK */
+       __le32 non_bssid_frames;        /* number of frames with BSSID that
+                                        * doesn't belong to the STA BSSID */
+       __le32 filtered_frames; /* count frames that were dumped in the
+                                * filtering process */
+       __le32 non_channel_beacons;     /* beacons with our bss id but not on
+                                        * our serving channel */
+       __le32 channel_beacons; /* beacons with our bss id and in our
+                                * serving channel */
+       __le32 num_missed_bcon; /* number of missed beacons */
+       __le32 adc_rx_saturation_time;  /* count in 0.8us units the time the
+                                        * ADC was in saturation */
+       __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+                                         * for INA */
+       __le32 beacon_silence_rssi_a;   /* RSSI silence after beacon frame */
+       __le32 beacon_silence_rssi_b;   /* RSSI silence after beacon frame */
+       __le32 beacon_silence_rssi_c;   /* RSSI silence after beacon frame */
+       __le32 interference_data_flag;  /* flag for interference data
+                                        * availability. 1 when data is
+                                        * available. */
+       __le32 channel_load;            /* counts RX Enable time in uSec */
+       __le32 dsp_false_alarms;        /* DSP false alarm (both OFDM
+                                        * and CCK) counter */
+       __le32 beacon_rssi_a;
+       __le32 beacon_rssi_b;
+       __le32 beacon_rssi_c;
+       __le32 beacon_energy_a;
+       __le32 beacon_energy_b;
+       __le32 beacon_energy_c;
+} __packed;
+
+struct statistics_rx {
+       struct statistics_rx_phy ofdm;
+       struct statistics_rx_phy cck;
+       struct statistics_rx_non_phy general;
+       struct statistics_rx_ht_phy ofdm_ht;
+} __packed;
+
+/**
+ * struct statistics_tx_power - current tx power
+ *
+ * @ant_a: current tx power on chain a in 1/2 dB step
+ * @ant_b: current tx power on chain b in 1/2 dB step
+ * @ant_c: current tx power on chain c in 1/2 dB step
+ */
+struct statistics_tx_power {
+       u8 ant_a;
+       u8 ant_b;
+       u8 ant_c;
+       u8 reserved;
+} __packed;
+
+struct statistics_tx_non_phy_agg {
+       __le32 ba_timeout;
+       __le32 ba_reschedule_frames;
+       __le32 scd_query_agg_frame_cnt;
+       __le32 scd_query_no_agg;
+       __le32 scd_query_agg;
+       __le32 scd_query_mismatch;
+       __le32 frame_not_ready;
+       __le32 underrun;
+       __le32 bt_prio_kill;
+       __le32 rx_ba_rsp_cnt;
+} __packed;
+
+struct statistics_tx {
+       __le32 preamble_cnt;
+       __le32 rx_detected_cnt;
+       __le32 bt_prio_defer_cnt;
+       __le32 bt_prio_kill_cnt;
+       __le32 few_bytes_cnt;
+       __le32 cts_timeout;
+       __le32 ack_timeout;
+       __le32 expected_ack_cnt;
+       __le32 actual_ack_cnt;
+       __le32 dump_msdu_cnt;
+       __le32 burst_abort_next_frame_mismatch_cnt;
+       __le32 burst_abort_missing_next_frame_cnt;
+       __le32 cts_timeout_collision;
+       __le32 ack_or_ba_timeout_collision;
+       struct statistics_tx_non_phy_agg agg;
+
+       __le32 reserved1;
+} __packed;
+
+
+struct statistics_div {
+       __le32 tx_on_a;
+       __le32 tx_on_b;
+       __le32 exec_time;
+       __le32 probe_time;
+       __le32 reserved1;
+       __le32 reserved2;
+} __packed;
+
+struct statistics_general_common {
+       __le32 temperature;   /* radio temperature */
+       struct statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct statistics_div div;
+       __le32 rx_enable_counter;
+       /*
+        * num_of_sos_states:
+        *  count the number of times we have to re-tune
+        *  in order to get out of bad PHY status
+        */
+       __le32 num_of_sos_states;
+} __packed;
+
+struct statistics_general {
+       struct statistics_general_common common;
+       __le32 reserved2;
+       __le32 reserved3;
+} __packed;
+
+#define UCODE_STATISTICS_CLEAR_MSK             (0x1 << 0)
+#define UCODE_STATISTICS_FREQUENCY_MSK         (0x1 << 1)
+#define UCODE_STATISTICS_NARROW_BAND_MSK       (0x1 << 2)
+
+/*
+ * REPLY_STATISTICS_CMD = 0x9c,
+ * all devices identical.
+ *
+ * This command triggers an immediate response containing uCode statistics.
+ * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ *
+ * If the CLEAR_STATS configuration flag is set, uCode will clear its
+ * internal copy of the statistics (counters) after issuing the response.
+ * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ *
+ * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
+ * STATISTICS_NOTIFICATIONs after received beacons (see below).  This flag
+ * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ */
+#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)    /* see above */
+#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
+struct iwl_statistics_cmd {
+       __le32 configuration_flags;     /* IWL_STATS_CONF_* */
+} __packed;
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans.  uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+#define STATISTICS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
+#define STATISTICS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
+
+struct iwl3945_notif_statistics {
+       __le32 flag;
+       struct iwl39_statistics_rx rx;
+       struct iwl39_statistics_tx tx;
+       struct iwl39_statistics_general general;
+} __packed;
+
+struct iwl_notif_statistics {
+       __le32 flag;
+       struct statistics_rx rx;
+       struct statistics_tx tx;
+       struct statistics_general general;
+} __packed;
+
+/*
+ * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
+ */
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN        (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF        (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX        IWL_MISSED_BEACON_THRESHOLD_DEF
+
+struct iwl_missed_beacon_notif {
+       __le32 consecutive_missed_beacons;
+       __le32 total_missed_becons;
+       __le32 num_expected_beacons;
+       __le32 num_recvd_beacons;
+} __packed;
+
+
+/******************************************************************************
+ * (11)
+ * Rx Calibration Commands:
+ *
+ * With the uCode used for open source drivers, most Tx calibration (except
+ * for Tx Power) and most Rx calibration is done by uCode during the
+ * "initialize" phase of uCode boot.  Driver must calibrate only:
+ *
+ * 1)  Tx power (depends on temperature), described elsewhere
+ * 2)  Receiver gain balance (optimize MIMO, and detect disconnected antennas)
+ * 3)  Receiver sensitivity (to optimize signal detection)
+ *
+ *****************************************************************************/
+
+/**
+ * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ *
+ * This command sets up the Rx signal detector for a sensitivity level that
+ * is high enough to lock onto all signals within the associated network,
+ * but low enough to ignore signals that are below a certain threshold, so as
+ * not to have too many "false alarms".  False alarms are signals that the
+ * Rx DSP tries to lock onto, but then discards after determining that they
+ * are noise.
+ *
+ * The optimum number of false alarms is between 5 and 50 per 200 TUs
+ * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
+ * time listening, not transmitting).  Driver must adjust sensitivity so that
+ * the ratio of actual false alarms to actual Rx time falls within this range.
+ *
+ * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * received beacon.  These provide information to the driver to analyze the
+ * sensitivity.  Don't analyze statistics that come in from scanning, or any
+ * other non-associated-network source.  Pertinent statistics include:
+ *
+ * From "general" statistics (struct statistics_rx_non_phy):
+ *
+ * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
+ *   Measure of energy of desired signal.  Used for establishing a level
+ *   below which the device does not detect signals.
+ *
+ * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
+ *   Measure of background noise in silent period after beacon.
+ *
+ * channel_load
+ *   uSecs of actual Rx time during beacon period (varies according to
+ *   how much time was spent transmitting).
+ *
+ * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ *
+ * false_alarm_cnt
+ *   Signal locks abandoned early (before phy-level header).
+ *
+ * plcp_err
+ *   Signal locks abandoned late (during phy-level header).
+ *
+ * NOTE:  Both false_alarm_cnt and plcp_err increment monotonically from
+ *        beacon to beacon, i.e. each value is an accumulation of all errors
+ *        before and including the latest beacon.  Values will wrap around to 0
+ *        after counting up to 2^32 - 1.  Driver must differentiate vs.
+ *        previous beacon's values to determine # false alarms in the current
+ *        beacon period.
+ *
+ * Total number of false alarms = false_alarms + plcp_errs
+ *
+ * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for OFDM are at or close to settings for
+ * maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          90   /   85  /  120
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX     170   /  170  /  210
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX         105   /  105  /  140
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX     220   /  220  /  270
+ *
+ *   If actual rate of OFDM false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), reduce sensitivity
+ *   by *adding* 1 to all 4 of the table entries above, up to the max for
+ *   each entry.  Conversely, if false alarm rate is too low (less than 5
+ *   for each 204.8 msecs listening), *subtract* 1 from each entry to
+ *   increase sensitivity.
+ *
+ * For CCK sensitivity, keep track of the following:
+ *
+ *   1).  20-beacon history of maximum background noise, indicated by
+ *        (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
+ *        3 receivers.  For any given beacon, the "silence reference" is
+ *        the maximum of last 60 samples (20 beacons * 3 receivers).
+ *
+ *   2).  10-beacon history of strongest signal level, as indicated
+ *        by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
+ *        i.e. the strength of the signal through the best receiver at the
+ *        moment.  These measurements are "upside down", with lower values
+ *        for stronger signals, so max energy will be *minimum* value.
+ *
+ *        Then for any given beacon, the driver must determine the *weakest*
+ *        of the strongest signals; this is the minimum level that needs to be
+ *        successfully detected, when using the best receiver at the moment.
+ *        "Max cck energy" is the maximum (higher value means lower energy!)
+ *        of the last 10 minima.  Once this is determined, driver must add
+ *        a little margin by adding "6" to it.
+ *
+ *   3).  Number of consecutive beacon periods with too few false alarms.
+ *        Reset this to 0 at the first beacon period that falls within the
+ *        "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
+ *
+ * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for CCK are at maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX         125   /  125  /  200
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX     200   /  200  /  400
+ *   HD_MIN_ENERGY_CCK_DET_INDEX                100   /    0  /  100
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), method for reducing
+ *   sensitivity is:
+ *
+ *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *       up to max 400.
+ *
+ *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ *       sensitivity has been reduced a significant amount; bring it up to
+ *       a moderate 161.  Otherwise, *add* 3, up to max 200.
+ *
+ *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ *       sensitivity has been reduced only a moderate or small amount;
+ *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ *       down to min 0.  Otherwise (if gain has been significantly reduced),
+ *       don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *
+ *       b)  Save a snapshot of the "silence reference".
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too low
+ *   (less than 5 for each 204.8 msecs listening), method for increasing
+ *   sensitivity is used only if:
+ *
+ *   1a)  Previous beacon did not have too many false alarms
+ *   1b)  AND difference between previous "silence reference" and current
+ *        "silence reference" (prev - current) is 2 or more,
+ *   OR 2)  100 or more consecutive beacon periods have had rate of
+ *          less than 5 false alarms per 204.8 milliseconds rx time.
+ *
+ *   Method for increasing sensitivity:
+ *
+ *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ *       down to min 125.
+ *
+ *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *       down to min 200.
+ *
+ *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is within good range
+ *   (between 5 and 50 for each 204.8 msecs listening):
+ *
+ *   1)  Save a snapshot of the silence reference.
+ *
+ *   2)  If previous beacon had too many CCK false alarms (+ plcp_errors),
+ *       give some extra margin to energy threshold by *subtracting* 8
+ *       from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *
+ *   For all cases (too few, too many, good range), make sure that the CCK
+ *   detection threshold (energy) is below the energy level for robust
+ *   detection over the past 10 beacon periods, the "Max cck energy".
+ *   Lower values mean higher energy; this means making sure that the value
+ *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *
+ */
+
+/*
+ * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
+ */
+#define HD_TABLE_SIZE  (11)    /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_INDEX                 (0)        /* table indexes */
+#define HD_MIN_ENERGY_OFDM_DET_INDEX                (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX      (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX      (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX          (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX      (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_INDEX             (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX         (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX          (9)
+#define HD_OFDM_ENERGY_TH_IN_INDEX                  (10)
+
+/* Control field in struct iwl_sensitivity_cmd */
+#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE  cpu_to_le16(0)
+#define SENSITIVITY_CMD_CONTROL_WORK_TABLE     cpu_to_le16(1)
+
+/**
+ * struct iwl_sensitivity_cmd
+ * @control:  (1) updates working table, (0) updates default table
+ * @table:  energy threshold values, use HD_* as index into table
+ *
+ * Always use "1" in "control" to update uCode's working table and DSP.
+ */
+struct iwl_sensitivity_cmd {
+       __le16 control;                 /* always use "1" */
+       __le16 table[HD_TABLE_SIZE];    /* use HD_* as index */
+} __packed;
+
+
+/**
+ * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ *
+ * This command sets the relative gains of 4965 device's 3 radio receiver chains.
+ *
+ * After the first association, driver should accumulate signal and noise
+ * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
+ * beacons from the associated network (don't collect statistics that come
+ * in from scanning, or any other non-network source).
+ *
+ * DISCONNECTED ANTENNA:
+ *
+ * Driver should determine which antennas are actually connected, by comparing
+ * average beacon signal levels for the 3 Rx chains.  Accumulate (add) the
+ * following values over 20 beacons, one accumulator for each of the chains
+ * a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the strongest signal from among a/b/c.  Compare the other two to the
+ * strongest.  If any signal is more than 15 dB (times 20, unless you
+ * divide the accumulated values by 20) below the strongest, the driver
+ * considers that antenna to be disconnected, and should not try to use that
+ * antenna/chain for Rx or Tx.  If both A and B seem to be disconnected,
+ * driver should declare the stronger one as connected, and attempt to use it
+ * (A and B are the only 2 Tx chains!).
+ *
+ *
+ * RX BALANCE:
+ *
+ * Driver should balance the 3 receivers (but just the ones that are connected
+ * to antennas, see above) for gain, by comparing the average signal levels
+ * detected during the silence after each beacon (background noise).
+ * Accumulate (add) the following values over 20 beacons, one accumulator for
+ * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the weakest background noise level from among a/b/c.  This Rx chain
+ * will be the reference, with 0 gain adjustment.  Attenuate other channels by
+ * finding noise difference:
+ *
+ * (accum_noise[i] - accum_noise[reference]) / 30
+ *
+ * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
+ * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
+ * and set bit 2 to indicate "reduce gain".  The value for the reference
+ * (weakest) chain should be "0".
+ *
+ * diff_gain_[abc] bit fields:
+ *   2: (1) reduce gain, (0) increase gain
+ * 1-0: amount of gain, units of 1.5 dB
+ */
+
+/* Phy calibration command for series */
+/* The default calibrate table size if not specified by firmware */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE    18
+enum {
+       IWL_PHY_CALIBRATE_DIFF_GAIN_CMD         = 7,
+       IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+};
+
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE         (253)
+
+struct iwl_calib_hdr {
+       u8 op_code;
+       u8 first_group;
+       u8 groups_num;
+       u8 data_valid;
+} __packed;
+
+/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct iwl_calib_diff_gain_cmd {
+       struct iwl_calib_hdr hdr;
+       s8 diff_gain_a;         /* see above */
+       s8 diff_gain_b;
+       s8 diff_gain_c;
+       u8 reserved1;
+} __packed;
+
+/******************************************************************************
+ * (12)
+ * Miscellaneous Commands:
+ *
+ *****************************************************************************/
+
+/*
+ * LEDs Command & Response
+ * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ *
+ * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
+ * this command turns it on or off, or sets up a periodic blinking cycle.
+ */
+struct iwl_led_cmd {
+       __le32 interval;        /* "interval" in uSec */
+       u8 id;                  /* 1: Activity, 2: Link, 3: Tech */
+       u8 off;                 /* # intervals off while blinking;
+                                * "0", with >0 "on" value, turns LED on */
+       u8 on;                  /* # intervals on while blinking;
+                                * "0", regardless of "off", turns LED off */
+       u8 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (13)
+ * Union of all expected notifications/responses:
+ *
+ *****************************************************************************/
+
+struct iwl_rx_packet {
+       /*
+        * The first 4 bytes of the RX frame header contain both the RX frame
+        * size and some flags.
+        * Bit fields:
+        * 31:    flag flush RB request
+        * 30:    flag ignore TC (terminal counter) request
+        * 29:    flag fast IRQ request
+        * 28-14: Reserved
+        * 13-00: RX frame size
+        */
+       __le32 len_n_flags;
+       struct iwl_cmd_header hdr;
+       union {
+               struct iwl3945_rx_frame rx_frame;
+               struct iwl3945_tx_resp tx_resp;
+               struct iwl3945_beacon_notif beacon_status;
+
+               struct iwl_alive_resp alive_frame;
+               struct iwl_spectrum_notification spectrum_notif;
+               struct iwl_csa_notification csa_notif;
+               struct iwl_error_resp err_resp;
+               struct iwl_card_state_notif card_state_notif;
+               struct iwl_add_sta_resp add_sta;
+               struct iwl_rem_sta_resp rem_sta;
+               struct iwl_sleep_notification sleep_notif;
+               struct iwl_spectrum_resp spectrum;
+               struct iwl_notif_statistics stats;
+               struct iwl_compressed_ba_resp compressed_ba;
+               struct iwl_missed_beacon_notif missed_beacon;
+               __le32 status;
+               u8 raw[0];
+       } u;
+} __packed;
+
+#endif                         /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644 (file)
index 0000000..d418b64
--- /dev/null
@@ -0,0 +1,2674 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-power.h"
+#include "iwl-sta.h"
+#include "iwl-helpers.h"
+
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 iwlegacy_debug_level;
+EXPORT_SYMBOL(iwlegacy_debug_level);
+
+const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(iwlegacy_bcast_addr);
+
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
+{
+       struct iwl_priv *priv;
+       /* mac80211 allocates memory for this device instance, including
+        *   space for this driver's private structure */
+       struct ieee80211_hw *hw;
+
+       hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
+                               cfg->ops->ieee80211_ops);
+       if (hw == NULL) {
+               pr_err("%s: Can not allocate network device\n",
+                      cfg->name);
+               goto out;
+       }
+
+       priv = hw->priv;
+       priv->hw = hw;
+
+out:
+       return hw;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
+                             struct ieee80211_sta_ht_cap *ht_info,
+                             enum ieee80211_band band)
+{
+       u16 max_bit_rate = 0;
+       u8 rx_chains_num = priv->hw_params.rx_chains_num;
+       u8 tx_chains_num = priv->hw_params.tx_chains_num;
+
+       ht_info->cap = 0;
+       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+       ht_info->ht_supported = true;
+
+       ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+       max_bit_rate = MAX_BIT_RATE_20_MHZ;
+       if (priv->hw_params.ht40_channel & BIT(band)) {
+               ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+               ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+               ht_info->mcs.rx_mask[4] = 0x01;
+               max_bit_rate = MAX_BIT_RATE_40_MHZ;
+       }
+
+       if (priv->cfg->mod_params->amsdu_size_8K)
+               ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+       ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+       ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+       ht_info->mcs.rx_mask[0] = 0xFF;
+       if (rx_chains_num >= 2)
+               ht_info->mcs.rx_mask[1] = 0xFF;
+       if (rx_chains_num >= 3)
+               ht_info->mcs.rx_mask[2] = 0xFF;
+
+       /* Highest supported Rx data rate */
+       max_bit_rate *= rx_chains_num;
+       WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+       ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+       /* Tx MCS capabilities */
+       ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       if (tx_chains_num != rx_chains_num) {
+               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+               ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
+                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+       }
+}
+
+/**
+ * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int iwl_legacy_init_geos(struct iwl_priv *priv)
+{
+       struct iwl_channel_info *ch;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *channels;
+       struct ieee80211_channel *geo_ch;
+       struct ieee80211_rate *rates;
+       int i = 0;
+
+       if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+           priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+               IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
+               set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+               return 0;
+       }
+
+       channels = kzalloc(sizeof(struct ieee80211_channel) *
+                          priv->channel_count, GFP_KERNEL);
+       if (!channels)
+               return -ENOMEM;
+
+       rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
+                       GFP_KERNEL);
+       if (!rates) {
+               kfree(channels);
+               return -ENOMEM;
+       }
+
+       /* 5.2GHz channels start after the 2.4GHz channels */
+       sband = &priv->bands[IEEE80211_BAND_5GHZ];
+       sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
+       /* just OFDM */
+       sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
+       sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
+
+       if (priv->cfg->sku & IWL_SKU_N)
+               iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+                                        IEEE80211_BAND_5GHZ);
+
+       sband = &priv->bands[IEEE80211_BAND_2GHZ];
+       sband->channels = channels;
+       /* OFDM & CCK */
+       sband->bitrates = rates;
+       sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
+
+       if (priv->cfg->sku & IWL_SKU_N)
+               iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+                                        IEEE80211_BAND_2GHZ);
+
+       priv->ieee_channels = channels;
+       priv->ieee_rates = rates;
+
+       for (i = 0;  i < priv->channel_count; i++) {
+               ch = &priv->channel_info[i];
+
+               if (!iwl_legacy_is_channel_valid(ch))
+                       continue;
+
+               if (iwl_legacy_is_channel_a_band(ch))
+                       sband =  &priv->bands[IEEE80211_BAND_5GHZ];
+               else
+                       sband =  &priv->bands[IEEE80211_BAND_2GHZ];
+
+               geo_ch = &sband->channels[sband->n_channels++];
+
+               geo_ch->center_freq =
+                       ieee80211_channel_to_frequency(ch->channel, ch->band);
+               geo_ch->max_power = ch->max_power_avg;
+               geo_ch->max_antenna_gain = 0xff;
+               geo_ch->hw_value = ch->channel;
+
+               if (iwl_legacy_is_channel_valid(ch)) {
+                       if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+                               geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+                       if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+                               geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+                       if (ch->flags & EEPROM_CHANNEL_RADAR)
+                               geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+                       geo_ch->flags |= ch->ht40_extension_channel;
+
+                       if (ch->max_power_avg > priv->tx_power_device_lmt)
+                               priv->tx_power_device_lmt = ch->max_power_avg;
+               } else {
+                       geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+               }
+
+               IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
+                               ch->channel, geo_ch->center_freq,
+                               iwl_legacy_is_channel_a_band(ch) ?  "5.2" : "2.4",
+                               geo_ch->flags & IEEE80211_CHAN_DISABLED ?
+                               "restricted" : "valid",
+                                geo_ch->flags);
+       }
+
+       if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
+            priv->cfg->sku & IWL_SKU_A) {
+               IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
+                       "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+                          priv->pci_dev->device,
+                          priv->pci_dev->subsystem_device);
+               priv->cfg->sku &= ~IWL_SKU_A;
+       }
+
+       IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+                  priv->bands[IEEE80211_BAND_2GHZ].n_channels,
+                  priv->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+       set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_geos);
+
+/*
+ * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
+ */
+void iwl_legacy_free_geos(struct iwl_priv *priv)
+{
+       kfree(priv->ieee_channels);
+       kfree(priv->ieee_rates);
+       clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_free_geos);
+
+static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
+                                    enum ieee80211_band band,
+                                    u16 channel, u8 extension_chan_offset)
+{
+       const struct iwl_channel_info *ch_info;
+
+       ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+       if (!iwl_legacy_is_channel_valid(ch_info))
+               return false;
+
+       if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+               return !(ch_info->ht40_extension_channel &
+                                       IEEE80211_CHAN_NO_HT40PLUS);
+       else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+               return !(ch_info->ht40_extension_channel &
+                                       IEEE80211_CHAN_NO_HT40MINUS);
+
+       return false;
+}
+
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_sta_ht_cap *ht_cap)
+{
+       if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+               return false;
+
+       /*
+        * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+        * the bit will not set if it is pure 40MHz case
+        */
+       if (ht_cap && !ht_cap->ht_supported)
+               return false;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       if (priv->disable_ht40)
+               return false;
+#endif
+
+       return iwl_legacy_is_channel_extension(priv, priv->band,
+                       le16_to_cpu(ctx->staging.channel),
+                       ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
+
+static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+       u16 new_val;
+       u16 beacon_factor;
+
+       /*
+        * If mac80211 hasn't given us a beacon interval, program
+        * the default into the device.
+        */
+       if (!beacon_val)
+               return DEFAULT_BEACON_INTERVAL;
+
+       /*
+        * If the beacon interval we obtained from the peer
+        * is too large, we'll have to wake up more often
+        * (and in IBSS case, we'll beacon too much)
+        *
+        * For example, if max_beacon_val is 4096, and the
+        * requested beacon interval is 7000, we'll have to
+        * use 3500 to be able to wake up on the beacons.
+        *
+        * This could badly influence beacon detection stats.
+        */
+
+       beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+       new_val = beacon_val / beacon_factor;
+
+       if (!new_val)
+               new_val = max_beacon_val;
+
+       return new_val;
+}
+
+int
+iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       u64 tsf;
+       s32 interval_tm, rem;
+       struct ieee80211_conf *conf = NULL;
+       u16 beacon_int;
+       struct ieee80211_vif *vif = ctx->vif;
+
+       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+       lockdep_assert_held(&priv->mutex);
+
+       memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
+
+       ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
+       ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+       beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+       /*
+        * TODO: For IBSS we need to get atim_window from mac80211,
+        *       for now just always use 0
+        */
+       ctx->timing.atim_window = 0;
+
+       beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
+                       priv->hw_params.max_beacon_itrvl * TIME_UNIT);
+       ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+       tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
+       interval_tm = beacon_int * TIME_UNIT;
+       rem = do_div(tsf, interval_tm);
+       ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+       ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
+
+       IWL_DEBUG_ASSOC(priv,
+                       "beacon interval %d beacon timer %d beacon tim %d\n",
+                       le16_to_cpu(ctx->timing.beacon_interval),
+                       le32_to_cpu(ctx->timing.beacon_init_val),
+                       le16_to_cpu(ctx->timing.atim_window));
+
+       return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
+                               sizeof(ctx->timing), &ctx->timing);
+}
+EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
+
+void
+iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx,
+                               int hw_decrypt)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+       if (hw_decrypt)
+               rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+       else
+               rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+       bool error = false;
+
+       if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+               if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+                       IWL_WARN(priv, "check 2.4G: wrong narrow\n");
+                       error = true;
+               }
+               if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+                       IWL_WARN(priv, "check 2.4G: wrong radar\n");
+                       error = true;
+               }
+       } else {
+               if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+                       IWL_WARN(priv, "check 5.2G: not short slot!\n");
+                       error = true;
+               }
+               if (rxon->flags & RXON_FLG_CCK_MSK) {
+                       IWL_WARN(priv, "check 5.2G: CCK!\n");
+                       error = true;
+               }
+       }
+       if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+               IWL_WARN(priv, "mac/bssid mcast!\n");
+               error = true;
+       }
+
+       /* make sure basic rates 6Mbps and 1Mbps are supported */
+       if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
+           (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
+               IWL_WARN(priv, "neither 1 nor 6 are basic\n");
+               error = true;
+       }
+
+       if (le16_to_cpu(rxon->assoc_id) > 2007) {
+               IWL_WARN(priv, "aid > 2007\n");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
+                       == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+               IWL_WARN(priv, "CCK and short slot\n");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
+                       == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+               IWL_WARN(priv, "CCK and auto detect");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
+                           RXON_FLG_TGG_PROTECT_MSK)) ==
+                           RXON_FLG_TGG_PROTECT_MSK) {
+               IWL_WARN(priv, "TGg but no auto-detect\n");
+               error = true;
+       }
+
+       if (error)
+               IWL_WARN(priv, "Tuning to channel %d\n",
+                           le16_to_cpu(rxon->channel));
+
+       if (error) {
+               IWL_ERR(priv, "Invalid RXON\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
+
+/**
+ * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @priv: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+                          struct iwl_rxon_context *ctx)
+{
+       const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
+       const struct iwl_legacy_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond)                                                      \
+       if ((cond)) {                                                   \
+               IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");   \
+               return 1;                                               \
+       }
+
+#define CHK_NEQ(c1, c2)                                                \
+       if ((c1) != (c2)) {                                     \
+               IWL_DEBUG_INFO(priv, "need full RXON - "        \
+                              #c1 " != " #c2 " - %d != %d\n",  \
+                              (c1), (c2));                     \
+               return 1;                                       \
+       }
+
+       /* These items are only settable from the full RXON command */
+       CHK(!iwl_legacy_is_associated_ctx(ctx));
+       CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+       CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+       CHK(compare_ether_addr(staging->wlap_bssid_addr,
+                               active->wlap_bssid_addr));
+       CHK_NEQ(staging->dev_type, active->dev_type);
+       CHK_NEQ(staging->channel, active->channel);
+       CHK_NEQ(staging->air_propagation, active->air_propagation);
+       CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+               active->ofdm_ht_single_stream_basic_rates);
+       CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+               active->ofdm_ht_dual_stream_basic_rates);
+       CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+       /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+        * be updated with the RXON_ASSOC command -- however only some
+        * flag transitions are allowed using RXON_ASSOC */
+
+       /* Check if we are not switching bands */
+       CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+               active->flags & RXON_FLG_BAND_24G_MSK);
+
+       /* Check if we are switching association toggle */
+       CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+               active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx)
+{
+       /*
+        * Assign the lowest rate -- should really get this from
+        * the beacon skb from mac80211.
+        */
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+               return IWL_RATE_1M_PLCP;
+       else
+               return IWL_RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
+
+static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+                            struct iwl_ht_config *ht_conf,
+                            struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+       if (!ctx->ht.enabled) {
+               rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+                       RXON_FLG_HT40_PROT_MSK |
+                       RXON_FLG_HT_PROT_MSK);
+               return;
+       }
+
+       rxon->flags |= cpu_to_le32(ctx->ht.protection <<
+                                       RXON_FLG_HT_OPERATING_MODE_POS);
+
+       /* Set up channel bandwidth:
+        * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+       /* clear the HT channel mode before set the mode */
+       rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+                        RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
+               /* pure ht40 */
+               if (ctx->ht.protection ==
+                               IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+                       rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+                       /* Note: control channel is opposite of extension channel */
+                       switch (ctx->ht.extension_chan_offset) {
+                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                               rxon->flags &=
+                                       ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                               rxon->flags |=
+                                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               break;
+                       }
+               } else {
+                       /* Note: control channel is opposite of extension channel */
+                       switch (ctx->ht.extension_chan_offset) {
+                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                               rxon->flags &=
+                                       ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                               rxon->flags |=
+                                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+                       default:
+                               /* channel location only valid if in Mixed mode */
+                               IWL_ERR(priv,
+                                       "invalid extension channel offset\n");
+                               break;
+                       }
+               }
+       } else {
+               rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+       }
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
+                       "extension channel offset 0x%x\n",
+                       le32_to_cpu(rxon->flags), ctx->ht.protection,
+                       ctx->ht.extension_chan_offset);
+}
+
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
+{
+       struct iwl_rxon_context *ctx;
+
+       for_each_context(priv, ctx)
+               _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+                                enum ieee80211_band band)
+{
+       const struct iwl_channel_info *ch_info;
+       int i;
+       u8 channel = 0;
+       u8 min, max;
+       struct iwl_rxon_context *ctx;
+
+       if (band == IEEE80211_BAND_5GHZ) {
+               min = 14;
+               max = priv->channel_count;
+       } else {
+               min = 0;
+               max = 14;
+       }
+
+       for (i = min; i < max; i++) {
+               bool busy = false;
+
+               for_each_context(priv, ctx) {
+                       busy = priv->channel_info[i].channel ==
+                               le16_to_cpu(ctx->staging.channel);
+                       if (busy)
+                               break;
+               }
+
+               if (busy)
+                       continue;
+
+               channel = priv->channel_info[i].channel;
+               ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+               if (iwl_legacy_is_channel_valid(ch_info))
+                       break;
+       }
+
+       return channel;
+}
+EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
+
+/**
+ * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+                        struct iwl_rxon_context *ctx)
+{
+       enum ieee80211_band band = ch->band;
+       u16 channel = ch->hw_value;
+
+       if ((le16_to_cpu(ctx->staging.channel) == channel) &&
+           (priv->band == band))
+               return 0;
+
+       ctx->staging.channel = cpu_to_le16(channel);
+       if (band == IEEE80211_BAND_5GHZ)
+               ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+       else
+               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+       priv->band = band;
+
+       IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
+
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           enum ieee80211_band band,
+                           struct ieee80211_vif *vif)
+{
+       if (band == IEEE80211_BAND_5GHZ) {
+               ctx->staging.flags &=
+                   ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
+                     | RXON_FLG_CCK_MSK);
+               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+       } else {
+               /* Copied from iwl_post_associate() */
+               if (vif && vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+               ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+               ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+                                  struct iwl_rxon_context *ctx)
+{
+       const struct iwl_channel_info *ch_info;
+
+       memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+       if (!ctx->vif) {
+               ctx->staging.dev_type = ctx->unused_devtype;
+       } else
+       switch (ctx->vif->type) {
+
+       case NL80211_IFTYPE_STATION:
+               ctx->staging.dev_type = ctx->station_devtype;
+               ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+               break;
+
+       case NL80211_IFTYPE_ADHOC:
+               ctx->staging.dev_type = ctx->ibss_devtype;
+               ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+               ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
+                                                 RXON_FILTER_ACCEPT_GRP_MSK;
+               break;
+
+       default:
+               IWL_ERR(priv, "Unsupported interface type %d\n",
+                       ctx->vif->type);
+               break;
+       }
+
+#if 0
+       /* TODO:  Figure out when short_preamble would be set and cache from
+        * that */
+       if (!hw_to_local(priv->hw)->short_preamble)
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+       ch_info = iwl_legacy_get_channel_info(priv, priv->band,
+                                      le16_to_cpu(ctx->active.channel));
+
+       if (!ch_info)
+               ch_info = &priv->channel_info[0];
+
+       ctx->staging.channel = cpu_to_le16(ch_info->channel);
+       priv->band = ch_info->band;
+
+       iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
+
+       ctx->staging.ofdm_basic_rates =
+           (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+       ctx->staging.cck_basic_rates =
+           (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+       /* clear both MIX and PURE40 mode flag */
+       ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+                                       RXON_FLG_CHANNEL_MODE_PURE_40);
+       if (ctx->vif)
+               memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+       ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+       ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
+
+void iwl_legacy_set_rate(struct iwl_priv *priv)
+{
+       const struct ieee80211_supported_band *hw = NULL;
+       struct ieee80211_rate *rate;
+       struct iwl_rxon_context *ctx;
+       int i;
+
+       hw = iwl_get_hw_mode(priv, priv->band);
+       if (!hw) {
+               IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
+               return;
+       }
+
+       priv->active_rate = 0;
+
+       for (i = 0; i < hw->n_bitrates; i++) {
+               rate = &(hw->bitrates[i]);
+               if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
+                       priv->active_rate |= (1 << rate->hw_value);
+       }
+
+       IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
+
+       for_each_context(priv, ctx) {
+               ctx->staging.cck_basic_rates =
+                   (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+               ctx->staging.ofdm_basic_rates =
+                  (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_set_rate);
+
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (priv->switch_rxon.switch_in_progress) {
+               ieee80211_chswitch_done(ctx->vif, is_success);
+               mutex_lock(&priv->mutex);
+               priv->switch_rxon.switch_in_progress = false;
+               mutex_unlock(&priv->mutex);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_chswitch_done);
+
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
+
+       if (priv->switch_rxon.switch_in_progress) {
+               if (!le32_to_cpu(csa->status) &&
+                   (csa->channel == priv->switch_rxon.channel)) {
+                       rxon->channel = csa->channel;
+                       ctx->staging.channel = csa->channel;
+                       IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+                             le16_to_cpu(csa->channel));
+                       iwl_legacy_chswitch_done(priv, true);
+               } else {
+                       IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+                             le16_to_cpu(csa->channel));
+                       iwl_legacy_chswitch_done(priv, false);
+               }
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_rx_csa);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+                            struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+       IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
+       iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+       IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
+                               le16_to_cpu(rxon->channel));
+       IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+       IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
+                               le32_to_cpu(rxon->filter_flags));
+       IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
+       IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
+                       rxon->ofdm_basic_rates);
+       IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
+                               rxon->cck_basic_rates);
+       IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
+       IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+       IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
+                               le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
+#endif
+/**
+ * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
+{
+       /* Set the FW error flag -- cleared on iwl_down */
+       set_bit(STATUS_FW_ERROR, &priv->status);
+
+       /* Cancel currently queued command. */
+       clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+
+       IWL_ERR(priv, "Loaded firmware version: %s\n",
+               priv->hw->wiphy->fw_version);
+
+       priv->cfg->ops->lib->dump_nic_error_log(priv);
+       if (priv->cfg->ops->lib->dump_fh)
+               priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+       priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
+               iwl_legacy_print_rx_config_cmd(priv,
+                                       &priv->contexts[IWL_RXON_CTX_BSS]);
+#endif
+
+       wake_up_interruptible(&priv->wait_command_queue);
+
+       /* Keep the restart process from trying to send host
+        * commands by clearing the INIT status bit */
+       clear_bit(STATUS_READY, &priv->status);
+
+       if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+               IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+                         "Restarting adapter due to uCode error.\n");
+
+               if (priv->cfg->mod_params->restart_fw)
+                       queue_work(priv->workqueue, &priv->restart);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
+
+static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
+{
+       int ret = 0;
+
+       /* stop device's busmaster DMA activity */
+       iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+       ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+                       CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+       if (ret)
+               IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
+
+       IWL_DEBUG_INFO(priv, "stop master\n");
+
+       return ret;
+}
+
+void iwl_legacy_apm_stop(struct iwl_priv *priv)
+{
+       IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
+
+       /* Stop device's DMA activity */
+       iwl_legacy_apm_stop_master(priv);
+
+       /* Reset the entire device */
+       iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+       udelay(10);
+
+       /*
+        * Clear "initialization complete" bit to move adapter from
+        * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+        */
+       iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(iwl_legacy_apm_stop);
+
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int iwl_legacy_apm_init(struct iwl_priv *priv)
+{
+       int ret = 0;
+       u16 lctl;
+
+       IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
+
+       /*
+        * Use "set_bit" below rather than "write", to preserve any hardware
+        * bits already set by default after reset.
+        */
+
+       /* Disable L0S exit timer (platform NMI Work/Around) */
+       iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+                         CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+       /*
+        * Disable L0s without affecting L1;
+        *  don't wait for ICH L0s (ICH bug W/A)
+        */
+       iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+                         CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+       /* Set FH wait threshold to maximum (HW error during stress W/A) */
+       iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
+                                       CSR_DBG_HPET_MEM_REG_VAL);
+
+       /*
+        * Enable HAP INTA (interrupt from management bus) to
+        * wake device's PCI Express link L1a -> L0s
+        * NOTE:  This is no-op for 3945 (non-existant bit)
+        */
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                                   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+       /*
+        * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+        * Check if BIOS (or OS) enabled L1-ASPM on this device.
+        * If so (likely), disable L0S, so device moves directly L0->L1;
+        *    costs negligible amount of power savings.
+        * If not (unlikely), enable L0S, so there is at least some
+        *    power savings, even without L1.
+        */
+       if (priv->cfg->base_params->set_l0s) {
+               lctl = iwl_legacy_pcie_link_ctl(priv);
+               if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+                                       PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+                       /* L1-ASPM enabled; disable(!) L0S  */
+                       iwl_legacy_set_bit(priv, CSR_GIO_REG,
+                                       CSR_GIO_REG_VAL_L0S_ENABLED);
+                       IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
+               } else {
+                       /* L1-ASPM disabled; enable(!) L0S */
+                       iwl_legacy_clear_bit(priv, CSR_GIO_REG,
+                                       CSR_GIO_REG_VAL_L0S_ENABLED);
+                       IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
+               }
+       }
+
+       /* Configure analog phase-lock-loop before activating to D0A */
+       if (priv->cfg->base_params->pll_cfg_val)
+               iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
+                           priv->cfg->base_params->pll_cfg_val);
+
+       /*
+        * Set "initialization complete" bit to move adapter from
+        * D0U* --> D0A* (powered-up active) state.
+        */
+       iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       /*
+        * Wait for clock stabilization; once stabilized, access to
+        * device-internal resources is supported, e.g. iwl_legacy_write_prph()
+        * and accesses to uCode SRAM.
+        */
+       ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+                       CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                       CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+       if (ret < 0) {
+               IWL_DEBUG_INFO(priv, "Failed to init the card\n");
+               goto out;
+       }
+
+       /*
+        * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+        * BSM (Boostrap State Machine) is only in 3945 and 4965.
+        *
+        * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+        * do not disable clocks.  This preserves any hardware bits already
+        * set by default in "CLK_CTRL_REG" after reset.
+        */
+       if (priv->cfg->base_params->use_bsm)
+               iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+                       APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+       else
+               iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+                       APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(20);
+
+       /* Disable L1-Active */
+       iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+                         APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_apm_init);
+
+
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
+{
+       int ret;
+       s8 prev_tx_power;
+       bool defer;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (priv->tx_power_user_lmt == tx_power && !force)
+               return 0;
+
+       if (!priv->cfg->ops->lib->send_tx_power)
+               return -EOPNOTSUPP;
+
+       if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+               IWL_WARN(priv,
+                        "Requested user TXPOWER %d below lower limit %d.\n",
+                        tx_power,
+                        IWL4965_TX_POWER_TARGET_POWER_MIN);
+               return -EINVAL;
+       }
+
+       if (tx_power > priv->tx_power_device_lmt) {
+               IWL_WARN(priv,
+                       "Requested user TXPOWER %d above upper limit %d.\n",
+                        tx_power, priv->tx_power_device_lmt);
+               return -EINVAL;
+       }
+
+       if (!iwl_legacy_is_ready_rf(priv))
+               return -EIO;
+
+       /* scan complete and commit_rxon use tx_power_next value,
+        * it always need to be updated for newest request */
+       priv->tx_power_next = tx_power;
+
+       /* do not set tx power when scanning or channel changing */
+       defer = test_bit(STATUS_SCANNING, &priv->status) ||
+               memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+       if (defer && !force) {
+               IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+               return 0;
+       }
+
+       prev_tx_power = priv->tx_power_user_lmt;
+       priv->tx_power_user_lmt = tx_power;
+
+       ret = priv->cfg->ops->lib->send_tx_power(priv);
+
+       /* if fail to set tx_power, restore the orig. tx power */
+       if (ret) {
+               priv->tx_power_user_lmt = prev_tx_power;
+               priv->tx_power_next = prev_tx_power;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_set_tx_power);
+
+void iwl_legacy_send_bt_config(struct iwl_priv *priv)
+{
+       struct iwl_bt_cmd bt_cmd = {
+               .lead_time = BT_LEAD_TIME_DEF,
+               .max_kill = BT_MAX_KILL_DEF,
+               .kill_ack_mask = 0,
+               .kill_cts_mask = 0,
+       };
+
+       if (!bt_coex_active)
+               bt_cmd.flags = BT_COEX_DISABLE;
+       else
+               bt_cmd.flags = BT_COEX_ENABLE;
+
+       IWL_DEBUG_INFO(priv, "BT coex %s\n",
+               (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+       if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+                            sizeof(struct iwl_bt_cmd), &bt_cmd))
+               IWL_ERR(priv, "failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(iwl_legacy_send_bt_config);
+
+int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
+{
+       struct iwl_statistics_cmd statistics_cmd = {
+               .configuration_flags =
+                       clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
+       };
+
+       if (flags & CMD_ASYNC)
+               return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
+                                       sizeof(struct iwl_statistics_cmd),
+                                       &statistics_cmd, NULL);
+       else
+               return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+                                       sizeof(struct iwl_statistics_cmd),
+                                       &statistics_cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
+
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+                          struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+       IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+                    sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
+
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+                       "notification for %s:\n", len,
+                       iwl_legacy_get_cmd_string(pkt->hdr.cmd));
+       iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
+
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+                       struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+               "seq 0x%04X ser 0x%08X\n",
+               le32_to_cpu(pkt->u.err_resp.error_type),
+               iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
+               pkt->u.err_resp.cmd_id,
+               le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+               le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
+{
+       memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
+}
+
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+                          const struct ieee80211_tx_queue_params *params)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx;
+       unsigned long flags;
+       int q;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+               return -EIO;
+       }
+
+       if (queue >= AC_NUM) {
+               IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+               return 0;
+       }
+
+       q = AC_NUM - 1 - queue;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       for_each_context(priv, ctx) {
+               ctx->qos_data.def_qos_parm.ac[q].cw_min =
+                       cpu_to_le16(params->cw_min);
+               ctx->qos_data.def_qos_parm.ac[q].cw_max =
+                       cpu_to_le16(params->cw_max);
+               ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+               ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+                               cpu_to_le16((params->txop * 32));
+
+               ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
+
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
+
+static int
+iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       iwl_legacy_connection_init_rx_config(priv, ctx);
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       return iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static int iwl_legacy_setup_interface(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx)
+{
+       struct ieee80211_vif *vif = ctx->vif;
+       int err;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /*
+        * This variable will be correct only when there's just
+        * a single context, but all code using it is for hardware
+        * that supports only one context.
+        */
+       priv->iw_mode = vif->type;
+
+       ctx->is_active = true;
+
+       err = iwl_legacy_set_mode(priv, ctx);
+       if (err) {
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+               return err;
+       }
+
+       return 0;
+}
+
+int
+iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *tmp, *ctx = NULL;
+       int err;
+
+       IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+                          vif->type, vif->addr);
+
+       mutex_lock(&priv->mutex);
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_WARN(priv, "Try to add interface when device not ready\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       for_each_context(priv, tmp) {
+               u32 possible_modes =
+                       tmp->interface_modes | tmp->exclusive_interface_modes;
+
+               if (tmp->vif) {
+                       /* check if this busy context is exclusive */
+                       if (tmp->exclusive_interface_modes &
+                                               BIT(tmp->vif->type)) {
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       continue;
+               }
+
+               if (!(possible_modes & BIT(vif->type)))
+                       continue;
+
+               /* have maybe usable context w/o interface */
+               ctx = tmp;
+               break;
+       }
+
+       if (!ctx) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       vif_priv->ctx = ctx;
+       ctx->vif = vif;
+
+       err = iwl_legacy_setup_interface(priv, ctx);
+       if (!err)
+               goto out;
+
+       ctx->vif = NULL;
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
+
+static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif,
+                                  bool mode_change)
+{
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (priv->scan_vif == vif) {
+               iwl_legacy_scan_cancel_timeout(priv, 200);
+               iwl_legacy_force_scan_end(priv);
+       }
+
+       if (!mode_change) {
+               iwl_legacy_set_mode(priv, ctx);
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+       }
+}
+
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       mutex_lock(&priv->mutex);
+
+       WARN_ON(ctx->vif != vif);
+       ctx->vif = NULL;
+
+       iwl_legacy_teardown_interface(priv, vif, false);
+
+       memset(priv->bssid, 0, ETH_ALEN);
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
+
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
+{
+       if (!priv->txq)
+               priv->txq = kzalloc(
+                       sizeof(struct iwl_tx_queue) *
+                               priv->cfg->base_params->num_of_queues,
+                       GFP_KERNEL);
+       if (!priv->txq) {
+               IWL_ERR(priv, "Not enough memory for txq\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
+
+void iwl_legacy_txq_mem(struct iwl_priv *priv)
+{
+       kfree(priv->txq);
+       priv->txq = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_mem);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+
+#define IWL_TRAFFIC_DUMP_SIZE  (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
+
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+       priv->tx_traffic_idx = 0;
+       priv->rx_traffic_idx = 0;
+       if (priv->tx_traffic)
+               memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+       if (priv->rx_traffic)
+               memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+}
+
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+       u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
+
+       if (iwlegacy_debug_level & IWL_DL_TX) {
+               if (!priv->tx_traffic) {
+                       priv->tx_traffic =
+                               kzalloc(traffic_size, GFP_KERNEL);
+                       if (!priv->tx_traffic)
+                               return -ENOMEM;
+               }
+       }
+       if (iwlegacy_debug_level & IWL_DL_RX) {
+               if (!priv->rx_traffic) {
+                       priv->rx_traffic =
+                               kzalloc(traffic_size, GFP_KERNEL);
+                       if (!priv->rx_traffic)
+                               return -ENOMEM;
+               }
+       }
+       iwl_legacy_reset_traffic_log(priv);
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
+
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+       kfree(priv->tx_traffic);
+       priv->tx_traffic = NULL;
+
+       kfree(priv->rx_traffic);
+       priv->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
+
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+       __le16 fc;
+       u16 len;
+
+       if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
+               return;
+
+       if (!priv->tx_traffic)
+               return;
+
+       fc = header->frame_control;
+       if (ieee80211_is_data(fc)) {
+               len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+                      ? IWL_TRAFFIC_ENTRY_SIZE : length;
+               memcpy((priv->tx_traffic +
+                      (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+                      header, len);
+               priv->tx_traffic_idx =
+                       (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
+
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+       __le16 fc;
+       u16 len;
+
+       if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
+               return;
+
+       if (!priv->rx_traffic)
+               return;
+
+       fc = header->frame_control;
+       if (ieee80211_is_data(fc)) {
+               len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+                      ? IWL_TRAFFIC_ENTRY_SIZE : length;
+               memcpy((priv->rx_traffic +
+                      (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+                      header, len);
+               priv->rx_traffic_idx =
+                       (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
+
+const char *iwl_legacy_get_mgmt_string(int cmd)
+{
+       switch (cmd) {
+               IWL_CMD(MANAGEMENT_ASSOC_REQ);
+               IWL_CMD(MANAGEMENT_ASSOC_RESP);
+               IWL_CMD(MANAGEMENT_REASSOC_REQ);
+               IWL_CMD(MANAGEMENT_REASSOC_RESP);
+               IWL_CMD(MANAGEMENT_PROBE_REQ);
+               IWL_CMD(MANAGEMENT_PROBE_RESP);
+               IWL_CMD(MANAGEMENT_BEACON);
+               IWL_CMD(MANAGEMENT_ATIM);
+               IWL_CMD(MANAGEMENT_DISASSOC);
+               IWL_CMD(MANAGEMENT_AUTH);
+               IWL_CMD(MANAGEMENT_DEAUTH);
+               IWL_CMD(MANAGEMENT_ACTION);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+
+const char *iwl_legacy_get_ctrl_string(int cmd)
+{
+       switch (cmd) {
+               IWL_CMD(CONTROL_BACK_REQ);
+               IWL_CMD(CONTROL_BACK);
+               IWL_CMD(CONTROL_PSPOLL);
+               IWL_CMD(CONTROL_RTS);
+               IWL_CMD(CONTROL_CTS);
+               IWL_CMD(CONTROL_ACK);
+               IWL_CMD(CONTROL_CFEND);
+               IWL_CMD(CONTROL_CFENDACK);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
+{
+       memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
+       memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
+ * iwl_legacy_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_statistics
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of iwl_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
+{
+       struct traffic_stats    *stats;
+
+       if (is_tx)
+               stats = &priv->tx_stats;
+       else
+               stats = &priv->rx_stats;
+
+       if (ieee80211_is_mgmt(fc)) {
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+                       stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+                       stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+                       stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+                       stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+                       stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_BEACON):
+                       stats->mgmt[MANAGEMENT_BEACON]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ATIM):
+                       stats->mgmt[MANAGEMENT_ATIM]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+                       stats->mgmt[MANAGEMENT_DISASSOC]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+                       stats->mgmt[MANAGEMENT_AUTH]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+                       stats->mgmt[MANAGEMENT_DEAUTH]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ACTION):
+                       stats->mgmt[MANAGEMENT_ACTION]++;
+                       break;
+               }
+       } else if (ieee80211_is_ctl(fc)) {
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+                       stats->ctrl[CONTROL_BACK_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_BACK):
+                       stats->ctrl[CONTROL_BACK]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+                       stats->ctrl[CONTROL_PSPOLL]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_RTS):
+                       stats->ctrl[CONTROL_RTS]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CTS):
+                       stats->ctrl[CONTROL_CTS]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ACK):
+                       stats->ctrl[CONTROL_ACK]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CFEND):
+                       stats->ctrl[CONTROL_CFEND]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+                       stats->ctrl[CONTROL_CFENDACK]++;
+                       break;
+               }
+       } else {
+               /* data */
+               stats->data_cnt++;
+               stats->data_bytes += len;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_update_stats);
+#endif
+
+static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (!iwl_legacy_is_any_associated(priv)) {
+               IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+               return;
+       }
+       /*
+        * There is no easy and better way to force reset the radio,
+        * the only known method is switching channel which will force to
+        * reset and tune the radio.
+        * Use internal short scan (single channel) operation to should
+        * achieve this objective.
+        * Driver should reset the radio when number of consecutive missed
+        * beacon, or any other uCode error condition detected.
+        */
+       IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+       iwl_legacy_internal_short_hw_scan(priv);
+}
+
+
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
+{
+       struct iwl_force_reset *force_reset;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EINVAL;
+
+       if (mode >= IWL_MAX_FORCE_RESET) {
+               IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+               return -EINVAL;
+       }
+       force_reset = &priv->force_reset[mode];
+       force_reset->reset_request_count++;
+       if (!external) {
+               if (force_reset->last_force_reset_jiffies &&
+                   time_after(force_reset->last_force_reset_jiffies +
+                   force_reset->reset_duration, jiffies)) {
+                       IWL_DEBUG_INFO(priv, "force reset rejected\n");
+                       force_reset->reset_reject_count++;
+                       return -EAGAIN;
+               }
+       }
+       force_reset->reset_success_count++;
+       force_reset->last_force_reset_jiffies = jiffies;
+       IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+       switch (mode) {
+       case IWL_RF_RESET:
+               _iwl_legacy_force_rf_reset(priv);
+               break;
+       case IWL_FW_RESET:
+               /*
+                * if the request is from external(ex: debugfs),
+                * then always perform the request in regardless the module
+                * parameter setting
+                * if the request is from internal (uCode error or driver
+                * detect failure), then fw_restart module parameter
+                * need to be check before performing firmware reload
+                */
+               if (!external && !priv->cfg->mod_params->restart_fw) {
+                       IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
+                                      "module parameter setting\n");
+                       break;
+               }
+               IWL_ERR(priv, "On demand firmware reload\n");
+               /* Set the FW error flag -- cleared on iwl_down */
+               set_bit(STATUS_FW_ERROR, &priv->status);
+               wake_up_interruptible(&priv->wait_command_queue);
+               /*
+                * Keep the restart process from trying to send host
+                * commands by clearing the INIT status bit
+                */
+               clear_bit(STATUS_READY, &priv->status);
+               queue_work(priv->workqueue, &priv->restart);
+               break;
+       }
+       return 0;
+}
+
+int
+iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+                       struct ieee80211_vif *vif,
+                       enum nl80211_iftype newtype, bool newp2p)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+       struct iwl_rxon_context *tmp;
+       u32 interface_modes;
+       int err;
+
+       newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+       mutex_lock(&priv->mutex);
+
+       interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+       if (!(interface_modes & BIT(newtype))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (ctx->exclusive_interface_modes & BIT(newtype)) {
+               for_each_context(priv, tmp) {
+                       if (ctx == tmp)
+                               continue;
+
+                       if (!tmp->vif)
+                               continue;
+
+                       /*
+                        * The current mode switch would be exclusive, but
+                        * another context is active ... refuse the switch.
+                        */
+                       err = -EBUSY;
+                       goto out;
+               }
+       }
+
+       /* success */
+       iwl_legacy_teardown_interface(priv, vif, true);
+       vif->type = newtype;
+       err = iwl_legacy_setup_interface(priv, ctx);
+       WARN_ON(err);
+       /*
+        * We've switched internally, but submitting to the
+        * device may have failed for some reason. Mask this
+        * error, because otherwise mac80211 will not switch
+        * (and set the interface type back) and we'll be
+        * out of sync with it.
+        */
+       err = 0;
+
+ out:
+       mutex_unlock(&priv->mutex);
+       return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+       struct iwl_tx_queue *txq = &priv->txq[cnt];
+       struct iwl_queue *q = &txq->q;
+       unsigned long timeout;
+       int ret;
+
+       if (q->read_ptr == q->write_ptr) {
+               txq->time_stamp = jiffies;
+               return 0;
+       }
+
+       timeout = txq->time_stamp +
+                 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+
+       if (time_after(jiffies, timeout)) {
+               IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
+                               q->id, priv->cfg->base_params->wd_timeout);
+               ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
+               return (ret == -EAGAIN) ? 0 : 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IWL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void iwl_legacy_bg_watchdog(unsigned long data)
+{
+       struct iwl_priv *priv = (struct iwl_priv *)data;
+       int cnt;
+       unsigned long timeout;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       timeout = priv->cfg->base_params->wd_timeout;
+       if (timeout == 0)
+               return;
+
+       /* monitor and check for stuck cmd queue */
+       if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
+               return;
+
+       /* monitor and check for other stuck queues */
+       if (iwl_legacy_is_any_associated(priv)) {
+               for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+                       /* skip as we already checked the command queue */
+                       if (cnt == priv->cmd_queue)
+                               continue;
+                       if (iwl_legacy_check_stuck_queue(priv, cnt))
+                               return;
+               }
+       }
+
+       mod_timer(&priv->watchdog, jiffies +
+                 msecs_to_jiffies(IWL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
+
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
+{
+       unsigned int timeout = priv->cfg->base_params->wd_timeout;
+
+       if (timeout)
+               mod_timer(&priv->watchdog,
+                         jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
+       else
+               del_timer(&priv->watchdog);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+                                       u32 usec, u32 beacon_interval)
+{
+       u32 quot;
+       u32 rem;
+       u32 interval = beacon_interval * TIME_UNIT;
+
+       if (!interval || !usec)
+               return 0;
+
+       quot = (usec / interval) &
+               (iwl_legacy_beacon_time_mask_high(priv,
+               priv->hw_params.beacon_time_tsf_bits) >>
+               priv->hw_params.beacon_time_tsf_bits);
+       rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
+                                  priv->hw_params.beacon_time_tsf_bits);
+
+       return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+                          u32 addon, u32 beacon_interval)
+{
+       u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
+                                       priv->hw_params.beacon_time_tsf_bits);
+       u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
+                                       priv->hw_params.beacon_time_tsf_bits);
+       u32 interval = beacon_interval * TIME_UNIT;
+       u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
+                               priv->hw_params.beacon_time_tsf_bits)) +
+                               (addon & iwl_legacy_beacon_time_mask_high(priv,
+                               priv->hw_params.beacon_time_tsf_bits));
+
+       if (base_low > addon_low)
+               res += base_low - addon_low;
+       else if (base_low < addon_low) {
+               res += interval + base_low - addon_low;
+               res += (1 << priv->hw_params.beacon_time_tsf_bits);
+       } else
+               res += (1 << priv->hw_params.beacon_time_tsf_bits);
+
+       return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int iwl_legacy_pci_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+       /*
+        * This function is called when system goes into suspend state
+        * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
+        * first but since iwl_mac_stop() has no knowledge of who the caller is,
+        * it will not call apm_ops.stop() to stop the DMA operation.
+        * Calling apm_ops.stop here to make sure we stop the DMA.
+        */
+       iwl_legacy_apm_stop(priv);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_suspend);
+
+int iwl_legacy_pci_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+       bool hw_rfkill = false;
+
+       /*
+        * We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state.
+        */
+       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+       iwl_legacy_enable_interrupts(priv);
+
+       if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+               hw_rfkill = true;
+
+       if (hw_rfkill)
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_resume);
+
+const struct dev_pm_ops iwl_legacy_pm_ops = {
+       .suspend = iwl_legacy_pci_suspend,
+       .resume = iwl_legacy_pci_resume,
+       .freeze = iwl_legacy_pci_suspend,
+       .thaw = iwl_legacy_pci_resume,
+       .poweroff = iwl_legacy_pci_suspend,
+       .restore = iwl_legacy_pci_resume,
+};
+EXPORT_SYMBOL(iwl_legacy_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+       if (ctx->qos_data.qos_active)
+               ctx->qos_data.def_qos_parm.qos_flags |=
+                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+       if (ctx->ht.enabled)
+               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+                     ctx->qos_data.qos_active,
+                     ctx->qos_data.def_qos_parm.qos_flags);
+
+       iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
+                              sizeof(struct iwl_qosparam_cmd),
+                              &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * iwl_legacy_mac_config - mac80211 config callback
+ */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct iwl_priv *priv = hw->priv;
+       const struct iwl_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = conf->channel;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct iwl_rxon_context *ctx;
+       unsigned long flags = 0;
+       int ret = 0;
+       u16 ch;
+       int scan_active = 0;
+       bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+                                       channel->hw_value, changed);
+
+       if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+                       test_bit(STATUS_SCANNING, &priv->status))) {
+               scan_active = 1;
+               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+                      IEEE80211_CONF_CHANGE_CHANNEL)) {
+               /* mac80211 uses static for non-HT which is what we want */
+               priv->current_ht_config.smps = conf->smps_mode;
+
+               /*
+                * Recalculate chain counts.
+                *
+                * If monitor mode is enabled then mac80211 will
+                * set up the SM PS mode to OFF if an HT channel is
+                * configured.
+                */
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       for_each_context(priv, ctx)
+                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       /* during scanning mac80211 will delay channel setting until
+        * scan finish with changed = 0
+        */
+       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+               if (scan_active)
+                       goto set_ch_out;
+
+               ch = channel->hw_value;
+               ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
+               if (!iwl_legacy_is_channel_valid(ch_info)) {
+                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
+               spin_lock_irqsave(&priv->lock, flags);
+
+               for_each_context(priv, ctx) {
+                       /* Configure HT40 channels */
+                       if (ctx->ht.enabled != conf_is_ht(conf)) {
+                               ctx->ht.enabled = conf_is_ht(conf);
+                               ht_changed[ctx->ctxid] = true;
+                       }
+                       if (ctx->ht.enabled) {
+                               if (conf_is_ht40_minus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                                       ctx->ht.is_40mhz = true;
+                               } else if (conf_is_ht40_plus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                                       ctx->ht.is_40mhz = true;
+                               } else {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                                       ctx->ht.is_40mhz = false;
+                               }
+                       } else
+                               ctx->ht.is_40mhz = false;
+
+                       /*
+                        * Default to no protection. Protection mode will
+                        * later be set from BSS config in iwl_ht_conf
+                        */
+                       ctx->ht.protection =
+                                       IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+                       /* if we are switching from ht to 2.4 clear flags
+                        * from any ht related info since 2.4 does not
+                        * support ht */
+                       if ((le16_to_cpu(ctx->staging.channel) != ch))
+                               ctx->staging.flags = 0;
+
+                       iwl_legacy_set_rxon_channel(priv, channel, ctx);
+                       iwl_legacy_set_rxon_ht(priv, ht_conf);
+
+                       iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+                                              ctx->vif);
+               }
+
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               if (priv->cfg->ops->legacy->update_bcast_stations)
+                       ret =
+                       priv->cfg->ops->legacy->update_bcast_stations(priv);
+
+ set_ch_out:
+               /* The list of supported rates and rate mask can be different
+                * for each band; since the band may have changed, reset
+                * the rate mask to what mac80211 lists */
+               iwl_legacy_set_rate(priv);
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_PS |
+                       IEEE80211_CONF_CHANGE_IDLE)) {
+               ret = iwl_legacy_power_update_mode(priv, false);
+               if (ret)
+                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+                       priv->tx_power_user_lmt, conf->power_level);
+
+               iwl_legacy_set_tx_power(priv, conf->power_level, false);
+       }
+
+       if (!iwl_legacy_is_ready(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               goto out;
+       }
+
+       if (scan_active)
+               goto out;
+
+       for_each_context(priv, ctx) {
+               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+                       iwl_legacy_commit_rxon(priv, ctx);
+               else
+                       IWL_DEBUG_INFO(priv,
+                               "Not re-sending same RXON configuration.\n");
+               if (ht_changed[ctx->ctxid])
+                       iwl_legacy_update_qos(priv, ctx);
+       }
+
+out:
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       mutex_unlock(&priv->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_config);
+
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       unsigned long flags;
+       /* IBSS can only be the IWL_RXON_CTX_BSS context */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return;
+
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       spin_lock_irqsave(&priv->lock, flags);
+       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* new association get rid of ibss beacon skb */
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       priv->beacon_skb = NULL;
+
+       priv->timestamp = 0;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl_legacy_scan_cancel_timeout(priv, 100);
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               mutex_unlock(&priv->mutex);
+               return;
+       }
+
+       /* we are restarting association process
+        * clear RXON_FILTER_ASSOC_MSK bit
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwl_legacy_commit_rxon(priv, ctx);
+
+       iwl_legacy_set_rate(priv);
+
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
+
+static void iwl_legacy_ht_conf(struct iwl_priv *priv,
+                       struct ieee80211_vif *vif)
+{
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct ieee80211_sta *sta;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_ASSOC(priv, "enter:\n");
+
+       if (!ctx->ht.enabled)
+               return;
+
+       ctx->ht.protection =
+               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+       ctx->ht.non_gf_sta_present =
+               !!(bss_conf->ht_operation_mode &
+                               IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+       ht_conf->single_chain_sufficient = false;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               rcu_read_lock();
+               sta = ieee80211_find_sta(vif, bss_conf->bssid);
+               if (sta) {
+                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+                       int maxstreams;
+
+                       maxstreams = (ht_cap->mcs.tx_params &
+                             IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+                               >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
+                           (ht_cap->mcs.rx_mask[2] == 0))
+                               ht_conf->single_chain_sufficient = true;
+                       if (maxstreams <= 1)
+                               ht_conf->single_chain_sufficient = true;
+               } else {
+                       /*
+                        * If at all, this can only happen through a race
+                        * when the AP disconnects us while we're still
+                        * setting up the connection, in that case mac80211
+                        * will soon tell us about that.
+                        */
+                       ht_conf->single_chain_sufficient = true;
+               }
+               rcu_read_unlock();
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ht_conf->single_chain_sufficient = true;
+               break;
+       default:
+               break;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "leave\n");
+}
+
+static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
+                                   struct ieee80211_vif *vif)
+{
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+       /*
+        * inform the ucode that there is no longer an
+        * association and that no more packets should be
+        * sent
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       ctx->staging.assoc_id = 0;
+       iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       unsigned long flags;
+       __le64 timestamp;
+       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+       if (!skb)
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_ctx) {
+               IWL_ERR(priv, "update beacon but no beacon context!\n");
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       priv->beacon_skb = skb;
+
+       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+       priv->timestamp = le64_to_cpu(timestamp);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+               return;
+       }
+
+       priv->cfg->ops->legacy->post_associate(priv);
+}
+
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+       int ret;
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
+
+       if (!iwl_legacy_is_alive(priv))
+               return;
+
+       mutex_lock(&priv->mutex);
+
+       if (changes & BSS_CHANGED_QOS) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&priv->lock, flags);
+               ctx->qos_data.qos_active = bss_conf->qos;
+               iwl_legacy_update_qos(priv, ctx);
+               spin_unlock_irqrestore(&priv->lock, flags);
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               /*
+                * the add_interface code must make sure we only ever
+                * have a single interface that could be beaconing at
+                * any time.
+                */
+               if (vif->bss_conf.enable_beacon)
+                       priv->beacon_ctx = ctx;
+               else
+                       priv->beacon_ctx = NULL;
+       }
+
+       if (changes & BSS_CHANGED_BSSID) {
+               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
+
+               /*
+                * If there is currently a HW scan going on in the
+                * background then we need to cancel it else the RXON
+                * below/in post_associate will fail.
+                */
+               if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
+                       IWL_WARN(priv,
+                               "Aborted scan still in progress after 100ms\n");
+                       IWL_DEBUG_MAC80211(priv,
+                               "leaving - scan abort failed.\n");
+                       mutex_unlock(&priv->mutex);
+                       return;
+               }
+
+               /* mac80211 only sets assoc when in STATION mode */
+               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+                       memcpy(ctx->staging.bssid_addr,
+                              bss_conf->bssid, ETH_ALEN);
+
+                       /* currently needed in a few places */
+                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+               } else {
+                       ctx->staging.filter_flags &=
+                               ~RXON_FILTER_ASSOC_MSK;
+               }
+
+       }
+
+       /*
+        * This needs to be after setting the BSSID in case
+        * mac80211 decides to do both changes at once because
+        * it will invoke post_associate.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
+               iwl_legacy_beacon_update(hw, vif);
+
+       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
+                                  bss_conf->use_short_preamble);
+               if (bss_conf->use_short_preamble)
+                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       }
+
+       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+               IWL_DEBUG_MAC80211(priv,
+                       "ERP_CTS %d\n", bss_conf->use_cts_prot);
+               if (bss_conf->use_cts_prot &&
+                       (priv->band != IEEE80211_BAND_5GHZ))
+                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+               if (bss_conf->use_cts_prot)
+                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+       }
+
+       if (changes & BSS_CHANGED_BASIC_RATES) {
+               /* XXX use this information
+                *
+                * To do that, remove code from iwl_legacy_set_rate() and put something
+                * like this here:
+                *
+               if (A-band)
+                       ctx->staging.ofdm_basic_rates =
+                               bss_conf->basic_rates;
+               else
+                       ctx->staging.ofdm_basic_rates =
+                               bss_conf->basic_rates >> 4;
+                       ctx->staging.cck_basic_rates =
+                               bss_conf->basic_rates & 0xF;
+                */
+       }
+
+       if (changes & BSS_CHANGED_HT) {
+               iwl_legacy_ht_conf(priv, vif);
+
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
+               if (bss_conf->assoc) {
+                       priv->timestamp = bss_conf->timestamp;
+
+                       if (!iwl_legacy_is_rfkill(priv))
+                               priv->cfg->ops->legacy->post_associate(priv);
+               } else
+                       iwl_legacy_set_no_assoc(priv, vif);
+       }
+
+       if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
+               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
+                                  changes);
+               ret = iwl_legacy_send_rxon_assoc(priv, ctx);
+               if (!ret) {
+                       /* Sync active_rxon with latest change. */
+                       memcpy((void *)&ctx->active,
+                               &ctx->staging,
+                               sizeof(struct iwl_legacy_rxon_cmd));
+               }
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               if (vif->bss_conf.enable_beacon) {
+                       memcpy(ctx->staging.bssid_addr,
+                              bss_conf->bssid, ETH_ALEN);
+                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+                       priv->cfg->ops->legacy->config_ap(priv);
+               } else
+                       iwl_legacy_set_no_assoc(priv, vif);
+       }
+
+       if (changes & BSS_CHANGED_IBSS) {
+               ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
+                                                       bss_conf->ibss_joined);
+               if (ret)
+                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+                               bss_conf->ibss_joined ? "add" : "remove",
+                               bss_conf->bssid);
+       }
+
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data)
+{
+       struct iwl_priv *priv = data;
+       u32 inta, inta_mask;
+       u32 inta_fh;
+       unsigned long flags;
+       if (!priv)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Disable (but don't clear!) interrupts here to avoid
+        *    back-to-back ISRs and sporadic interrupts from our NIC.
+        * If we have something to service, the tasklet will re-enable ints.
+        * If we *don't* have something, we'll re-enable before leaving here. */
+       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
+       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+       /* Discover which interrupts are active/pending */
+       inta = iwl_read32(priv, CSR_INT);
+       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+
+       /* Ignore interrupt if there's nothing in NIC to service.
+        * This may be due to IRQ shared with another device,
+        * or due to sporadic interrupts thrown from our NIC. */
+       if (!inta && !inta_fh) {
+               IWL_DEBUG_ISR(priv,
+                       "Ignore interrupt, inta == 0, inta_fh == 0\n");
+               goto none;
+       }
+
+       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+               /* Hardware disappeared. It might have already raised
+                * an interrupt */
+               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+               goto unplugged;
+       }
+
+       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                     inta, inta_mask, inta_fh);
+
+       inta &= ~CSR_INT_BIT_SCD;
+
+       /* iwl_irq_tasklet() will service interrupts and re-enable them */
+       if (likely(inta || inta_fh))
+               tasklet_schedule(&priv->irq_tasklet);
+
+unplugged:
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_HANDLED;
+
+none:
+       /* re-enable interrupts here since we don't have anything to service. */
+       /* only Re-enable if diabled by irq */
+       if (test_bit(STATUS_INT_ENABLED, &priv->status))
+               iwl_legacy_enable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_NONE;
+}
+EXPORT_SYMBOL(iwl_legacy_isr);
+
+/*
+ *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+                              struct ieee80211_tx_info *info,
+                              __le16 fc, __le32 *tx_flags)
+{
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+               *tx_flags |= TX_CMD_FLG_RTS_MSK;
+               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+               if (!ieee80211_is_mgmt(fc))
+                       return;
+
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
+                       break;
+               }
+       } else if (info->control.rates[0].flags &
+                  IEEE80211_TX_RC_USE_CTS_PROTECT) {
+               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+               *tx_flags |= TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644 (file)
index 0000000..f03b463
--- /dev/null
@@ -0,0 +1,646 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_core_h__
+#define __iwl_legacy_core_h__
+
+/************************
+ * forward declarations *
+ ************************/
+struct iwl_host_cmd;
+struct iwl_cmd;
+
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+       .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+       .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT              1024
+
+#define IWL_SKU_G       0x1
+#define IWL_SKU_A       0x2
+#define IWL_SKU_N       0x8
+
+#define IWL_CMD(x) case x: return #x
+
+struct iwl_hcmd_ops {
+       int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+       int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+       void (*set_rxon_chain)(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx);
+};
+
+struct iwl_hcmd_utils_ops {
+       u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
+       u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
+                                                               u8 *data);
+       int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
+       void (*post_scan)(struct iwl_priv *priv);
+};
+
+struct iwl_apm_ops {
+       int (*init)(struct iwl_priv *priv);
+       void (*config)(struct iwl_priv *priv);
+};
+
+struct iwl_debugfs_ops {
+       ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos);
+       ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos);
+       ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos);
+};
+
+struct iwl_temp_ops {
+       void (*temperature)(struct iwl_priv *priv);
+};
+
+struct iwl_lib_ops {
+       /* set hw dependent parameters */
+       int (*set_hw_params)(struct iwl_priv *priv);
+       /* Handling TX */
+       void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq,
+                                       u16 byte_cnt);
+       int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
+                                    struct iwl_tx_queue *txq,
+                                    dma_addr_t addr,
+                                    u16 len, u8 reset, u8 pad);
+       void (*txq_free_tfd)(struct iwl_priv *priv,
+                            struct iwl_tx_queue *txq);
+       int (*txq_init)(struct iwl_priv *priv,
+                       struct iwl_tx_queue *txq);
+       /* setup Rx handler */
+       void (*rx_handler_setup)(struct iwl_priv *priv);
+       /* alive notification after init uCode load */
+       void (*init_alive_start)(struct iwl_priv *priv);
+       /* check validity of rtc data address */
+       int (*is_valid_rtc_data_addr)(u32 addr);
+       /* 1st ucode load */
+       int (*load_ucode)(struct iwl_priv *priv);
+       int (*dump_nic_event_log)(struct iwl_priv *priv,
+                                 bool full_log, char **buf, bool display);
+       void (*dump_nic_error_log)(struct iwl_priv *priv);
+       int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
+       int (*set_channel_switch)(struct iwl_priv *priv,
+                                 struct ieee80211_channel_switch *ch_switch);
+       /* power management */
+       struct iwl_apm_ops apm_ops;
+
+       /* power */
+       int (*send_tx_power) (struct iwl_priv *priv);
+       void (*update_chain_flags)(struct iwl_priv *priv);
+
+       /* eeprom operations (as defined in iwl-eeprom.h) */
+       struct iwl_eeprom_ops eeprom_ops;
+
+       /* temperature */
+       struct iwl_temp_ops temp_ops;
+       /* check for plcp health */
+       bool (*check_plcp_health)(struct iwl_priv *priv,
+                                       struct iwl_rx_packet *pkt);
+
+       struct iwl_debugfs_ops debugfs_ops;
+
+};
+
+struct iwl_led_ops {
+       int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
+};
+
+struct iwl_legacy_ops {
+       void (*post_associate)(struct iwl_priv *priv);
+       void (*config_ap)(struct iwl_priv *priv);
+       /* station management */
+       int (*update_bcast_stations)(struct iwl_priv *priv);
+       int (*manage_ibss_station)(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif, bool add);
+};
+
+struct iwl_ops {
+       const struct iwl_lib_ops *lib;
+       const struct iwl_hcmd_ops *hcmd;
+       const struct iwl_hcmd_utils_ops *utils;
+       const struct iwl_led_ops *led;
+       const struct iwl_nic_ops *nic;
+       const struct iwl_legacy_ops *legacy;
+       const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct iwl_mod_params {
+       int sw_crypto;          /* def: 0 = using hardware encryption */
+       int disable_hw_scan;    /* def: 0 = use h/w scan */
+       int num_of_queues;      /* def: HW dependent */
+       int disable_11n;        /* def: 0 = 11n capabilities enabled */
+       int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
+       int antenna;            /* def: 0 = both antennas (use diversity) */
+       int restart_fw;         /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ *     to the deviation to achieve the desired led frequency.
+ *     The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ *     radio tuning when there is a high receiving plcp error rate
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ *     sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ *     chain noise calibration operation
+ */
+struct iwl_base_params {
+       int eeprom_size;
+       int num_of_queues;      /* def: HW dependent */
+       int num_of_ampdu_queues;/* def: HW dependent */
+       /* for iwl_legacy_apm_init() */
+       u32 pll_cfg_val;
+       bool set_l0s;
+       bool use_bsm;
+
+       u16 led_compensation;
+       int chain_noise_num_beacons;
+       u8 plcp_delta_threshold;
+       unsigned int wd_timeout;
+       bool temperature_kelvin;
+       u32 max_event_log_size;
+       const bool ucode_tracing;
+       const bool sensitivity_calib_by_driver;
+       const bool chain_noise_calib_by_driver;
+};
+
+/**
+ * struct iwl_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *     (.ucode) will be added to filename before loading from disk. The
+ *     filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @iwl_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
+ *     Driver interacts with Firmware API version >= 2.
+ * } else {
+ *     Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * iwl_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct iwl_cfg {
+       /* params specific to an individual device within a device family */
+       const char *name;
+       const char *fw_name_pre;
+       const unsigned int ucode_api_max;
+       const unsigned int ucode_api_min;
+       u8   valid_tx_ant;
+       u8   valid_rx_ant;
+       unsigned int sku;
+       u16  eeprom_ver;
+       u16  eeprom_calib_ver;
+       const struct iwl_ops *ops;
+       /* module based parameters which can be set from modprobe cmd */
+       const struct iwl_mod_params *mod_params;
+       /* params not likely to change within a device family */
+       struct iwl_base_params *base_params;
+       /* params likely to change within a device family */
+       u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+       u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+       enum iwl_led_mode led_mode;
+};
+
+/***************************
+ *   L i b                 *
+ ***************************/
+
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+                   const struct ieee80211_tx_queue_params *params);
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
+void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       int hw_decrypt);
+int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx);
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx);
+int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
+                       struct ieee80211_channel *ch,
+                       struct iwl_rxon_context *ctx);
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           enum ieee80211_band band,
+                           struct ieee80211_vif *vif);
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+                                 enum ieee80211_band band);
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+                       struct iwl_ht_config *ht_conf);
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx,
+                           struct ieee80211_sta_ht_cap *ht_cap);
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+                                  struct iwl_rxon_context *ctx);
+void iwl_legacy_set_rate(struct iwl_priv *priv);
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+                          struct ieee80211_hdr *hdr,
+                          u32 decrypt_res,
+                          struct ieee80211_rx_status *stats);
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
+int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif);
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif);
+int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            enum nl80211_iftype newtype, bool newp2p);
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
+void iwl_legacy_txq_mem(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+                               u16 length, struct ieee80211_hdr *header);
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+                               u16 length, struct ieee80211_hdr *header);
+const char *iwl_legacy_get_mgmt_string(int cmd);
+const char *iwl_legacy_get_ctrl_string(int cmd);
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
+void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
+                     u16 len);
+#else
+static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+       return 0;
+}
+static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+                     u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
+                                   __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+                          struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+                       struct iwl_rx_mem_buffer *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
+void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+                                 struct iwl_rx_queue *q);
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
+void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb);
+/* Handlers */
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+                               struct iwl_rx_packet *pkt);
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq);
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                     int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
+                       struct iwl_tx_queue *txq,
+                       int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+                           struct iwl_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void iwl_legacy_init_scan_params(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_legacy_force_scan_end(struct iwl_priv *priv);
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif,
+                   struct cfg80211_scan_request *req);
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
+u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
+                       struct ieee80211_mgmt *frame,
+                      const u8 *ta, const u8 *ie, int ie_len, int left);
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
+u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+                             enum ieee80211_band band,
+                             u8 n_probes);
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+                              enum ieee80211_band band,
+                              struct ieee80211_vif *vif);
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
+#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
+
+#define IWL_SCAN_CHECK_WATCHDOG                (HZ * 7)
+
+/*****************************************************
+ *   S e n d i n g     H o s t     C o m m a n d s   *
+ *****************************************************/
+
+const char *iwl_legacy_get_cmd_string(u8 cmd);
+int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
+                                  struct iwl_host_cmd *cmd);
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+                                 u16 len, const void *data);
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
+                          const void *data,
+                          void (*callback)(struct iwl_priv *priv,
+                                           struct iwl_device_cmd *cmd,
+                                           struct iwl_rx_packet *pkt));
+
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+
+
+/*****************************************************
+ * PCI                                              *
+ *****************************************************/
+
+static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
+{
+       int pos;
+       u16 pci_lnk_ctl;
+       pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
+       pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+       return pci_lnk_ctl;
+}
+
+void iwl_legacy_bg_watchdog(unsigned long data);
+u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+                                       u32 usec, u32 beacon_interval);
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+                          u32 addon, u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int iwl_legacy_pci_suspend(struct device *device);
+int iwl_legacy_pci_resume(struct device *device);
+extern const struct dev_pm_ops iwl_legacy_pm_ops;
+
+#define IWL_LEGACY_PM_OPS      (&iwl_legacy_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IWL_LEGACY_PM_OPS      NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+*  Error Handling Debugging
+******************************************************/
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
+                          bool full_log, char **buf, bool display);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+                            struct iwl_rxon_context *ctx);
+#else
+static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+                                          struct iwl_rxon_context *ctx)
+{
+}
+#endif
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
+
+/*****************************************************
+*  GEOS
+******************************************************/
+int iwl_legacy_init_geos(struct iwl_priv *priv);
+void iwl_legacy_free_geos(struct iwl_priv *priv);
+
+/*************** DRIVER STATUS FUNCTIONS   *****/
+
+#define STATUS_HCMD_ACTIVE     0       /* host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
+#define STATUS_INT_ENABLED     2
+#define STATUS_RF_KILL_HW      3
+#define STATUS_CT_KILL         4
+#define STATUS_INIT            5
+#define STATUS_ALIVE           6
+#define STATUS_READY           7
+#define STATUS_TEMPERATURE     8
+#define STATUS_GEO_CONFIGURED  9
+#define STATUS_EXIT_PENDING    10
+#define STATUS_STATISTICS      12
+#define STATUS_SCANNING                13
+#define STATUS_SCAN_ABORTING   14
+#define STATUS_SCAN_HW         15
+#define STATUS_POWER_PMI       16
+#define STATUS_FW_ERROR                17
+
+
+static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
+{
+       /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+        * set but EXIT_PENDING is not */
+       return test_bit(STATUS_READY, &priv->status) &&
+              test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
+              !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_legacy_is_init(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_INIT, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
+{
+       return iwl_legacy_is_rfkill_hw(priv);
+}
+
+static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
+{
+       return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
+{
+
+       if (iwl_legacy_is_rfkill(priv))
+               return 0;
+
+       return iwl_legacy_is_ready(priv);
+}
+
+extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
+extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
+                                      u8 flags, bool clear);
+void iwl_legacy_apm_stop(struct iwl_priv *priv);
+int iwl_legacy_apm_init(struct iwl_priv *priv);
+
+int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx);
+static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
+                                     struct iwl_rxon_context *ctx)
+{
+       return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
+}
+static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
+                                     struct iwl_rxon_context *ctx)
+{
+       return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
+}
+static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
+                       struct iwl_priv *priv, enum ieee80211_band band)
+{
+       return priv->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes);
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+                               struct ieee80211_tx_info *info,
+                               __le16 fc, __le32 *tx_flags);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data);
+
+#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644 (file)
index 0000000..668a961
--- /dev/null
@@ -0,0 +1,422 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_csr_h__
+#define __iwl_legacy_csr_h__
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_legacy_write_direct32() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE:  Device does need to be awake in order to read this memory
+ *        via CSR_EEPROM register
+ */
+#define CSR_BASE    (0x000)
+
+#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
+#define CSR_GPIO_IN             (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET               (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_GP_CNTRL            (CSR_BASE+0x024)
+
+/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define CSR_INT_PERIODIC_REG   (CSR_BASE+0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-8:  Reserved
+ *  7-4:  Type of device:  see CSR_HW_REV_TYPE_xxx definitions
+ *  3-2:  Revision step:  0 = A, 1 = B, 2 = C, 3 = D
+ *  1-0:  "Dash" (-) value, as in A-1, etc.
+ *
+ * NOTE:  Revision step affects calculation of CCK txpower for 4965.
+ * NOTE:  See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
+ */
+#define CSR_HW_REV              (CSR_BASE+0x028)
+
+/*
+ * EEPROM memory reads
+ *
+ * NOTE:  Device must be awake, initialized via apm_ops.init(),
+ *        in order to read.
+ */
+#define CSR_EEPROM_REG          (CSR_BASE+0x02c)
+#define CSR_EEPROM_GP           (CSR_BASE+0x030)
+
+#define CSR_GIO_REG            (CSR_BASE+0x03C)
+#define CSR_GP_UCODE_REG       (CSR_BASE+0x048)
+#define CSR_GP_DRIVER_REG      (CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define CSR_UCODE_DRV_GP1       (CSR_BASE+0x054)
+#define CSR_UCODE_DRV_GP1_SET   (CSR_BASE+0x058)
+#define CSR_UCODE_DRV_GP1_CLR   (CSR_BASE+0x05c)
+#define CSR_UCODE_DRV_GP2       (CSR_BASE+0x060)
+
+#define CSR_LED_REG             (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG   (CSR_BASE+0x0A0)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
+
+/* Analog phase-lock-loop configuration  */
+#define CSR_ANA_PLL_CFG         (CSR_BASE+0x20c)
+
+/*
+ * CSR Hardware Revision Workaround Register.  Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation.  Used for 4965 only.
+ * See also CSR_HW_REV register.
+ * Bit fields:
+ *  3-2:  0 = A, 1 = B, 2 = C, 3 = D step
+ *  1-0:  "Dash" (-) value, as in C-1, etc.
+ */
+#define CSR_HW_REV_WA_REG              (CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG           (CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG      (CSR_BASE+0x250)
+
+/* Bits for CSR_HW_IF_CONFIG_REG */
+#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R      (0x00000010)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER     (0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI        (0x00000100)
+#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI      (0x00000200)
+
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB         (0x00000100)
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM         (0x00000200)
+#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC            (0x00000400)
+#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE         (0x00000800)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A    (0x00000000)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B    (0x00001000)
+
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A  (0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM        (0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY     (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE             (0x08000000) /* WAKE_ME */
+
+#define CSR_INT_PERIODIC_DIS                   (0x00) /* disable periodic int*/
+#define CSR_INT_PERIODIC_ENA                   (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define CSR_INT_BIT_FH_RX        (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR       (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC         (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP       (1 << 1)  /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE        (1 << 0)  /* uCode interrupts once it initializes */
+
+#define CSR_INI_SET_MASK       (CSR_INT_BIT_FH_RX   | \
+                                CSR_INT_BIT_HW_ERR  | \
+                                CSR_INT_BIT_FH_TX   | \
+                                CSR_INT_BIT_SW_ERR  | \
+                                CSR_INT_BIT_RF_KILL | \
+                                CSR_INT_BIT_SW_RX   | \
+                                CSR_INT_BIT_WAKEUP  | \
+                                CSR_INT_BIT_ALIVE)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define CSR_FH_INT_BIT_ERR       (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)  /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)  /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)  /* Tx channel 0 */
+
+#define CSR39_FH_INT_RX_MASK   (CSR_FH_INT_BIT_HI_PRIOR | \
+                                CSR39_FH_INT_BIT_RX_CHNL2 | \
+                                CSR_FH_INT_BIT_RX_CHNL1 | \
+                                CSR_FH_INT_BIT_RX_CHNL0)
+
+
+#define CSR39_FH_INT_TX_MASK   (CSR39_FH_INT_BIT_TX_CHNL6 | \
+                                CSR_FH_INT_BIT_TX_CHNL1 | \
+                                CSR_FH_INT_BIT_TX_CHNL0)
+
+#define CSR49_FH_INT_RX_MASK   (CSR_FH_INT_BIT_HI_PRIOR | \
+                                CSR_FH_INT_BIT_RX_CHNL1 | \
+                                CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR49_FH_INT_TX_MASK   (CSR_FH_INT_BIT_TX_CHNL1 | \
+                                CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define CSR_GPIO_IN_BIT_AUX_POWER                   (0x00000200)
+#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC                (0x00000000)
+#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC               (0x00000200)
+
+/* RESET */
+#define CSR_RESET_REG_FLAG_NEVO_RESET                (0x00000001)
+#define CSR_RESET_REG_FLAG_FORCE_NMI                 (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET                  (0x00000080)
+#define CSR_RESET_REG_FLAG_MASTER_DISABLED           (0x00000100)
+#define CSR_RESET_REG_FLAG_STOP_MASTER               (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED             (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ *    27:  HW_RF_KILL_SW
+ *         Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24:  POWER_SAVE_TYPE
+ *         Indicates current power-saving mode:
+ *         000 -- No power saving
+ *         001 -- MAC power-down
+ *         010 -- PHY (radio) power-down
+ *         011 -- Error
+ *   9-6:  SYS_CONFIG
+ *         Indicates current system configuration, reflecting pins on chip
+ *         as forced high/low by device circuit board.
+ *     4:  GOING_TO_SLEEP
+ *         Indicates MAC is entering a power-saving sleep power-down.
+ *         Not a good time to access device-internal resources.
+ *     3:  MAC_ACCESS_REQ
+ *         Host sets this to request and maintain MAC wakeup, to allow host
+ *         access to device-internal resources.  Host must wait for
+ *         MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ *         device registers.
+ *     2:  INIT_DONE
+ *         Host sets this to put device into fully operational D0 power mode.
+ *         Host resets this after SW_RESET to put device into low power mode.
+ *     0:  MAC_CLOCK_READY
+ *         Indicates MAC (ucode processor, etc.) is powered up and can run.
+ *         Internal resources are accessible.
+ *         NOTE:  This does not indicate that the processor is actually running.
+ *         NOTE:  This does not indicate that 4965 or 3945 has completed
+ *                init or post-power-down restore of internal SRAM memory.
+ *                Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ *                SRAM is restored and uCode is in normal operation mode.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ *         NOTE:  After device reset, this bit remains "0" until host sets
+ *                INIT_DONE
+ */
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY        (0x00000001)
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE              (0x00000004)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ         (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
+
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN           (0x00000001)
+
+#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE         (0x07000000)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE         (0x04000000)
+#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW          (0x08000000)
+
+
+/* EEPROM REG */
+#define CSR_EEPROM_REG_READ_VALID_MSK  (0x00000001)
+#define CSR_EEPROM_REG_BIT_CMD         (0x00000002)
+#define CSR_EEPROM_REG_MSK_ADDR                (0x0000FFFC)
+#define CSR_EEPROM_REG_MSK_DATA                (0xFFFF0000)
+
+/* EEPROM GP */
+#define CSR_EEPROM_GP_VALID_MSK                (0x00000007) /* signature */
+#define CSR_EEPROM_GP_IF_OWNER_MSK     (0x00000180)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K                (0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K                (0x00000004)
+
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
+
+
+/* CSR GIO */
+#define CSR_GIO_REG_VAL_L0S_ENABLED    (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ *     4:  UCODE_DISABLE
+ *         Host sets this to request permanent halt of uCode, same as
+ *         sending CARD_STATE command with "halt" bit set.
+ *     3:  CT_KILL_EXIT
+ *         Host sets this to request exit from CT_KILL state, i.e. host thinks
+ *         device temperature is low enough to continue normal operation.
+ *     2:  CMD_BLOCKED
+ *         Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ *         to release uCode to clear all Tx and command queues, enter
+ *         unassociated mode, and power down.
+ *         NOTE:  Some devices also use HBUS_TARG_MBX_C register for this bit.
+ *     1:  SW_BIT_RFKILL
+ *         Host sets this when issuing CARD_STATE command to request
+ *         device sleep.
+ *     0:  MAC_SLEEP
+ *         uCode sets this when preparing a power-saving power-down.
+ *         uCode resets this when power-up is complete and SRAM is sane.
+ *         NOTE:  3945/4965 saves internal SRAM data to host when powering down,
+ *                and must restore this data after powering back up.
+ *                MAC_SLEEP is the best indication that restore is complete.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ */
+#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP             (0x00000001)
+#define CSR_UCODE_SW_BIT_RFKILL                     (0x00000002)
+#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED           (0x00000004)
+#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT      (0x00000008)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
+
+/* LED */
+#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define CSR_LED_REG_TRUN_ON (0x78)
+#define CSR_LED_REG_TRUN_OFF (0x38)
+
+/* ANA_PLL */
+#define CSR39_ANA_PLL_CFG_VAL        (0x01000000)
+
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL       (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define HBUS_BASE      (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job.  Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ *  0-31:  memory address within device
+ */
+#define HBUS_TARG_MEM_RADDR     (HBUS_BASE+0x00c)
+#define HBUS_TARG_MEM_WADDR     (HBUS_BASE+0x010)
+#define HBUS_TARG_MEM_WDAT      (HBUS_BASE+0x018)
+#define HBUS_TARG_MEM_RDAT      (HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C         (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED         (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.).  First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ *  0-15:  register address (offset) within device
+ * 24-25:  (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define HBUS_TARG_PRPH_WADDR    (HBUS_BASE+0x044)
+#define HBUS_TARG_PRPH_RADDR    (HBUS_BASE+0x048)
+#define HBUS_TARG_PRPH_WDAT     (HBUS_BASE+0x04c)
+#define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ *  0-7:  queue write index
+ * 11-8:  queue selector
+ */
+#define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
+
+#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644 (file)
index 0000000..ae13112
--- /dev/null
@@ -0,0 +1,198 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_debug_h__
+#define __iwl_legacy_debug_h__
+
+struct iwl_priv;
+extern u32 iwlegacy_debug_level;
+
+#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
+#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
+#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
+#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
+
+#define iwl_print_hex_error(priv, p, len)                               \
+do {                                                                   \
+       print_hex_dump(KERN_ERR, "iwl data: ",                          \
+                      DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);           \
+} while (0)
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define IWL_DEBUG(__priv, level, fmt, args...)                         \
+do {                                                                   \
+       if (iwl_legacy_get_debug_level(__priv) & (level))                       \
+               dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),         \
+                        "%c %s " fmt, in_interrupt() ? 'I' : 'U',      \
+                       __func__ , ## args);                            \
+} while (0)
+
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)                   \
+do {                                                                   \
+       if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit())  \
+               dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),         \
+                       "%c %s " fmt, in_interrupt() ? 'I' : 'U',       \
+                        __func__ , ## args);                           \
+} while (0)
+
+#define iwl_print_hex_dump(priv, level, p, len)                        \
+do {                                                                   \
+       if (iwl_legacy_get_debug_level(priv) & level)                           \
+               print_hex_dump(KERN_DEBUG, "iwl data: ",                \
+                              DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);   \
+} while (0)
+
+#else
+#define IWL_DEBUG(__priv, level, fmt, args...)
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
+static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
+                                     const void *p, u32 len)
+{}
+#endif                         /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
+#else
+static inline int
+iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+       return 0;
+}
+static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+}
+#endif                         /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IWL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
+ * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ *     /sys/module/iwl4965/parameters/debug{50}
+ *     /sys/module/iwl3945/parameters/debug
+ *     /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IWL_DL_INFO            (1 << 0)
+#define IWL_DL_MAC80211                (1 << 1)
+#define IWL_DL_HCMD            (1 << 2)
+#define IWL_DL_STATE           (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_MACDUMP         (1 << 4)
+#define IWL_DL_HCMD_DUMP       (1 << 5)
+#define IWL_DL_EEPROM          (1 << 6)
+#define IWL_DL_RADIO           (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IWL_DL_POWER           (1 << 8)
+#define IWL_DL_TEMP            (1 << 9)
+#define IWL_DL_NOTIF           (1 << 10)
+#define IWL_DL_SCAN            (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IWL_DL_ASSOC           (1 << 12)
+#define IWL_DL_DROP            (1 << 13)
+#define IWL_DL_TXPOWER         (1 << 14)
+#define IWL_DL_AP              (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IWL_DL_FW              (1 << 16)
+#define IWL_DL_RF_KILL         (1 << 17)
+#define IWL_DL_FW_ERRORS       (1 << 18)
+#define IWL_DL_LED             (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IWL_DL_RATE            (1 << 20)
+#define IWL_DL_CALIB           (1 << 21)
+#define IWL_DL_WEP             (1 << 22)
+#define IWL_DL_TX              (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IWL_DL_RX              (1 << 24)
+#define IWL_DL_ISR             (1 << 25)
+#define IWL_DL_HT              (1 << 26)
+#define IWL_DL_IO              (1 << 27)
+/* 0xF0000000 - 0x10000000 */
+#define IWL_DL_11H             (1 << 28)
+#define IWL_DL_STATS           (1 << 29)
+#define IWL_DL_TX_REPLY                (1 << 30)
+#define IWL_DL_QOS             (1 << 31)
+
+#define IWL_DEBUG_INFO(p, f, a...)     IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_MACDUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
+#define IWL_DEBUG_TEMP(p, f, a...)     IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
+#define IWL_DEBUG_SCAN(p, f, a...)     IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
+#define IWL_DEBUG_RX(p, f, a...)       IWL_DEBUG(p, IWL_DL_RX, f, ## a)
+#define IWL_DEBUG_TX(p, f, a...)       IWL_DEBUG(p, IWL_DL_TX, f, ## a)
+#define IWL_DEBUG_ISR(p, f, a...)      IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
+#define IWL_DEBUG_LED(p, f, a...)      IWL_DEBUG(p, IWL_DL_LED, f, ## a)
+#define IWL_DEBUG_WEP(p, f, a...)      IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
+#define IWL_DEBUG_HC(p, f, a...)       IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_HC_DUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
+#define IWL_DEBUG_EEPROM(p, f, a...)   IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_CALIB(p, f, a...)    IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
+#define IWL_DEBUG_FW(p, f, a...)       IWL_DEBUG(p, IWL_DL_FW, f, ## a)
+#define IWL_DEBUG_RF_KILL(p, f, a...)  IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_DROP(p, f, a...)     IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_DROP_LIMIT(p, f, a...)       \
+               IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_AP(p, f, a...)       IWL_DEBUG(p, IWL_DL_AP, f, ## a)
+#define IWL_DEBUG_TXPOWER(p, f, a...)  IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
+#define IWL_DEBUG_IO(p, f, a...)       IWL_DEBUG(p, IWL_DL_IO, f, ## a)
+#define IWL_DEBUG_RATE(p, f, a...)     IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_RATE_LIMIT(p, f, a...)       \
+               IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_NOTIF(p, f, a...)    IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
+#define IWL_DEBUG_ASSOC(p, f, a...)    \
+               IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...)      \
+               IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_HT(p, f, a...)       IWL_DEBUG(p, IWL_DL_HT, f, ## a)
+#define IWL_DEBUG_STATS(p, f, a...)    IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_STATS_LIMIT(p, f, a...)      \
+               IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
+               IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_QOS(p, f, a...)      IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_RADIO(p, f, a...)    IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644 (file)
index 0000000..2d32438
--- /dev/null
@@ -0,0 +1,1467 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
+       if (!debugfs_create_file(#name, mode, parent, priv,             \
+                        &iwl_legacy_dbgfs_##name##_ops))               \
+               goto err;                                               \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {                       \
+       struct dentry *__tmp;                                           \
+       __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR,           \
+                                   parent, ptr);                       \
+       if (IS_ERR(__tmp) || !__tmp)                                    \
+               goto err;                                               \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do {                                \
+       struct dentry *__tmp;                                           \
+       __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR,            \
+                                  parent, ptr);                        \
+       if (IS_ERR(__tmp) || !__tmp)                                    \
+               goto err;                                               \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name)                                         \
+static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file,               \
+                                       char __user *user_buf,          \
+                                       size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)                                        \
+static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file,              \
+                                       const char __user *user_buf,    \
+                                       size_t count, loff_t *ppos);
+
+
+static int
+iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name)                            \
+       DEBUGFS_READ_FUNC(name);                                        \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
+       .read = iwl_legacy_dbgfs_##name##_read,                         \
+       .open = iwl_legacy_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                                  \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)                           \
+       DEBUGFS_WRITE_FUNC(name);                                       \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
+       .write = iwl_legacy_dbgfs_##name##_write,                       \
+       .open = iwl_legacy_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                                  \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)                           \
+       DEBUGFS_READ_FUNC(name);                                        \
+       DEBUGFS_WRITE_FUNC(name);                                       \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
+       .write = iwl_legacy_dbgfs_##name##_write,                       \
+       .read = iwl_legacy_dbgfs_##name##_read,                         \
+       .open = iwl_legacy_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                                  \
+};
+
+static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+
+       int cnt;
+       ssize_t ret;
+       const size_t bufsz = 100 +
+               sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_mgmt_string(cnt),
+                                priv->tx_stats.mgmt[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_ctrl_string(cnt),
+                                priv->tx_stats.ctrl[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+                        priv->tx_stats.data_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+                        priv->tx_stats.data_bytes);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       u32 clear_flag;
+       char buf[8];
+       int buf_size;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%x", &clear_flag) != 1)
+               return -EFAULT;
+       iwl_legacy_clear_traffic_stats(priv);
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+       int cnt;
+       ssize_t ret;
+       const size_t bufsz = 100 +
+               sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_mgmt_string(cnt),
+                                priv->rx_stats.mgmt[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "\t%25s\t\t: %u\n",
+                                iwl_legacy_get_ctrl_string(cnt),
+                                priv->rx_stats.ctrl[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+                        priv->rx_stats.data_cnt);
+       pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+                        priv->rx_stats.data_bytes);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       u32 val;
+       char *buf;
+       ssize_t ret;
+       int i;
+       int pos = 0;
+       struct iwl_priv *priv = file->private_data;
+       size_t bufsz;
+
+       /* default is to dump the entire data segment */
+       if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+               priv->dbgfs_sram_offset = 0x800000;
+               if (priv->ucode_type == UCODE_INIT)
+                       priv->dbgfs_sram_len = priv->ucode_init_data.len;
+               else
+                       priv->dbgfs_sram_len = priv->ucode_data.len;
+       }
+       bufsz =  30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+                       priv->dbgfs_sram_len);
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+                       priv->dbgfs_sram_offset);
+       for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+               val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+                                       priv->dbgfs_sram_len - i);
+               if (i < 4) {
+                       switch (i) {
+                       case 1:
+                               val &= BYTE1_MASK;
+                               break;
+                       case 2:
+                               val &= BYTE2_MASK;
+                               break;
+                       case 3:
+                               val &= BYTE3_MASK;
+                               break;
+                       }
+               }
+               if (!(i % 16))
+                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+               pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[64];
+       int buf_size;
+       u32 offset, len;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+               priv->dbgfs_sram_offset = offset;
+               priv->dbgfs_sram_len = len;
+       } else {
+               priv->dbgfs_sram_offset = 0;
+               priv->dbgfs_sram_len = 0;
+       }
+
+       return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_station_entry *station;
+       int max_sta = priv->hw_params.max_stations;
+       char *buf;
+       int i, j, pos = 0;
+       ssize_t ret;
+       /* Add 30 for initial string */
+       const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
+
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+                       priv->num_stations);
+
+       for (i = 0; i < max_sta; i++) {
+               station = &priv->stations[i];
+               if (!station->used)
+                       continue;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "station %d - addr: %pM, flags: %#x\n",
+                                i, station->sta.sta.addr,
+                                station->sta.station_flags_msk);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+               for (j = 0; j < MAX_TID_COUNT; j++) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+                               j, station->tid[j].seq_number,
+                               station->tid[j].agg.txq_id,
+                               station->tid[j].agg.frame_count,
+                               station->tid[j].tfds_in_queue,
+                               station->tid[j].agg.start_idx,
+                               station->tid[j].agg.bitmap,
+                               station->tid[j].agg.rate_n_flags);
+
+                       if (station->tid[j].agg.wait_for_ba)
+                               pos += scnprintf(buf + pos, bufsz - pos,
+                                                " - waitforba");
+                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+               }
+
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
+                                      char __user *user_buf,
+                                      size_t count,
+                                      loff_t *ppos)
+{
+       ssize_t ret;
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0, ofs = 0, buf_size = 0;
+       const u8 *ptr;
+       char *buf;
+       u16 eeprom_ver;
+       size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+       buf_size = 4 * eeprom_len + 256;
+
+       if (eeprom_len % 16) {
+               IWL_ERR(priv, "NVM size is not multiple of 16.\n");
+               return -ENODATA;
+       }
+
+       ptr = priv->eeprom;
+       if (!ptr) {
+               IWL_ERR(priv, "Invalid EEPROM memory\n");
+               return -ENOMEM;
+       }
+
+       /* 4 characters for byte 0xYY */
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+       eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+       pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
+                       "version: 0x%x\n", eeprom_ver);
+       for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+               hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
+                                  buf_size - pos, 0);
+               pos += strlen(buf + pos);
+               if (buf_size - pos > 0)
+                       buf[pos++] = '\n';
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+       ssize_t ret = -ENOMEM;
+
+       ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+                                       priv, true, &buf, true);
+       if (buf) {
+               ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+               kfree(buf);
+       }
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       u32 event_log_flag;
+       char buf[8];
+       int buf_size;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &event_log_flag) != 1)
+               return -EFAULT;
+       if (event_log_flag == 1)
+               priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+                                                       NULL, false);
+
+       return count;
+}
+
+
+
+static ssize_t
+iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       struct ieee80211_channel *channels = NULL;
+       const struct ieee80211_supported_band *supp_band = NULL;
+       int pos = 0, i, bufsz = PAGE_SIZE;
+       char *buf;
+       ssize_t ret;
+
+       if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+       if (supp_band) {
+               channels = supp_band->channels;
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+                               supp_band->n_channels);
+
+               for (i = 0; i < supp_band->n_channels; i++)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%d: %ddBm: BSS%s%s, %s.\n",
+                               channels[i].hw_value,
+                               channels[i].max_power,
+                               channels[i].flags & IEEE80211_CHAN_RADAR ?
+                               " (IEEE 802.11h required)" : "",
+                               ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+                               || (channels[i].flags &
+                               IEEE80211_CHAN_RADAR)) ? "" :
+                               ", IBSS",
+                               channels[i].flags &
+                               IEEE80211_CHAN_PASSIVE_SCAN ?
+                               "passive only" : "active/passive");
+       }
+       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+       if (supp_band) {
+               channels = supp_band->channels;
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Displaying %d channels in 5.2GHz band (802.11a)\n",
+                               supp_band->n_channels);
+
+               for (i = 0; i < supp_band->n_channels; i++)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%d: %ddBm: BSS%s%s, %s.\n",
+                               channels[i].hw_value,
+                               channels[i].max_power,
+                               channels[i].flags & IEEE80211_CHAN_RADAR ?
+                               " (IEEE 802.11h required)" : "",
+                               ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+                               || (channels[i].flags &
+                               IEEE80211_CHAN_RADAR)) ? "" :
+                               ", IBSS",
+                               channels[i].flags &
+                               IEEE80211_CHAN_PASSIVE_SCAN ?
+                               "passive only" : "active/passive");
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[512];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
+               test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
+               test_bit(STATUS_INT_ENABLED, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
+               test_bit(STATUS_RF_KILL_HW, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
+               test_bit(STATUS_CT_KILL, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
+               test_bit(STATUS_INIT, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
+               test_bit(STATUS_ALIVE, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
+               test_bit(STATUS_READY, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
+               test_bit(STATUS_TEMPERATURE, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
+               test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
+               test_bit(STATUS_EXIT_PENDING, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
+               test_bit(STATUS_STATISTICS, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
+               test_bit(STATUS_SCANNING, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
+               test_bit(STATUS_SCAN_ABORTING, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
+               test_bit(STATUS_SCAN_HW, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
+               test_bit(STATUS_POWER_PMI, &priv->status));
+       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
+               test_bit(STATUS_FW_ERROR, &priv->status));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = 24 * 64; /* 24 items * 64 char per item */
+       ssize_t ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "Interrupt Statistics Report:\n");
+
+       pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+               priv->isr_stats.hw);
+       pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+               priv->isr_stats.sw);
+       if (priv->isr_stats.sw || priv->isr_stats.hw) {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                       "\tLast Restarting Code:  0x%X\n",
+                       priv->isr_stats.err_code);
+       }
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+               priv->isr_stats.sch);
+       pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+               priv->isr_stats.alive);
+#endif
+       pos += scnprintf(buf + pos, bufsz - pos,
+               "HW RF KILL switch toggled:\t %u\n",
+               priv->isr_stats.rfkill);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+               priv->isr_stats.ctkill);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+               priv->isr_stats.wakeup);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+               "Rx command responses:\t\t %u\n",
+               priv->isr_stats.rx);
+       for (cnt = 0; cnt < REPLY_MAX; cnt++) {
+               if (priv->isr_stats.rx_handlers[cnt] > 0)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tRx handler[%36s]:\t\t %u\n",
+                               iwl_legacy_get_cmd_string(cnt),
+                               priv->isr_stats.rx_handlers[cnt]);
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+               priv->isr_stats.tx);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+               priv->isr_stats.unhandled);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       u32 reset_flag;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%x", &reset_flag) != 1)
+               return -EFAULT;
+       if (reset_flag == 0)
+               iwl_legacy_clear_isr_stats(priv);
+
+       return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_rxon_context *ctx;
+       int pos = 0, i;
+       char buf[256 * NUM_IWL_RXON_CTX];
+       const size_t bufsz = sizeof(buf);
+
+       for_each_context(priv, ctx) {
+               pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
+                                ctx->ctxid);
+               for (i = 0; i < AC_NUM; i++) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tcw_min\tcw_max\taifsn\ttxop\n");
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+                               ctx->qos_data.def_qos_parm.ac[i].cw_min,
+                               ctx->qos_data.def_qos_parm.ac[i].cw_max,
+                               ctx->qos_data.def_qos_parm.ac[i].aifsn,
+                               ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+               }
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int ht40;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &ht40) != 1)
+               return -EFAULT;
+       if (!iwl_legacy_is_any_associated(priv))
+               priv->disable_ht40 = ht40 ? true : false;
+       else {
+               IWL_ERR(priv, "Sta associated with AP - "
+                       "Change to 40MHz channel support is not allowed\n");
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[100];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "11n 40MHz Mode: %s\n",
+                       priv->disable_ht40 ? "Disabled" : "Enabled");
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0, ofs = 0;
+       int cnt = 0, entry;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       char *buf;
+       int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+               (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+       const u8 *ptr;
+       ssize_t ret;
+
+       if (!priv->txq) {
+               IWL_ERR(priv, "txq not ready\n");
+               return -EAGAIN;
+       }
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate buffer\n");
+               return -ENOMEM;
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+               txq = &priv->txq[cnt];
+               q = &txq->q;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "q[%d]: read_ptr: %u, write_ptr: %u\n",
+                               cnt, q->read_ptr, q->write_ptr);
+       }
+       if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
+               ptr = priv->tx_traffic;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+               for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+                       for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+                            entry++,  ofs += 16) {
+                               pos += scnprintf(buf + pos, bufsz - pos,
+                                               "0x%.4x ", ofs);
+                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+                                                  buf + pos, bufsz - pos, 0);
+                               pos += strlen(buf + pos);
+                               if (bufsz - pos > 0)
+                                       buf[pos++] = '\n';
+                       }
+               }
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "read: %u, write: %u\n",
+                        rxq->read, rxq->write);
+
+       if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
+               ptr = priv->rx_traffic;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+               for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+                       for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+                            entry++,  ofs += 16) {
+                               pos += scnprintf(buf + pos, bufsz - pos,
+                                               "0x%.4x ", ofs);
+                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+                                                  buf + pos, bufsz - pos, 0);
+                               pos += strlen(buf + pos);
+                               if (bufsz - pos > 0)
+                                       buf[pos++] = '\n';
+                       }
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int traffic_log;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &traffic_log) != 1)
+               return -EFAULT;
+       if (traffic_log == 0)
+               iwl_legacy_reset_traffic_log(priv);
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_tx_queue *txq;
+       struct iwl_queue *q;
+       char *buf;
+       int pos = 0;
+       int cnt;
+       int ret;
+       const size_t bufsz = sizeof(char) * 64 *
+                               priv->cfg->base_params->num_of_queues;
+
+       if (!priv->txq) {
+               IWL_ERR(priv, "txq not ready\n");
+               return -EAGAIN;
+       }
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+               txq = &priv->txq[cnt];
+               q = &txq->q;
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "hwq %.2d: read=%u write=%u stop=%d"
+                               " swq_id=%#.2x (ac %d/hwq %d)\n",
+                               cnt, q->read_ptr, q->write_ptr,
+                               !!test_bit(cnt, priv->queue_stopped),
+                               txq->swq_id, txq->swq_id & 3,
+                               (txq->swq_id >> 2) & 0x1f);
+               if (cnt >= 4)
+                       continue;
+               /* for the ACs, display the stop count too */
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "        stop-count: %d\n",
+                               atomic_read(&priv->queue_stop_count[cnt]));
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+                                               rxq->read);
+       pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+                                               rxq->write);
+       pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+                                               rxq->free_count);
+       if (rxq->rb_stts) {
+               pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
+       } else {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                       "closed_rb_num: Not Allocated\n");
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+                       user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+                       user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+                       user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
+       ssize_t ret;
+       struct iwl_sensitivity_data *data;
+
+       data = &priv->sensitivity_data;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+                       data->auto_corr_ofdm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "auto_corr_ofdm_mrc:\t\t %u\n",
+                       data->auto_corr_ofdm_mrc);
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+                       data->auto_corr_ofdm_x1);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+                       data->auto_corr_ofdm_mrc_x1);
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+                       data->auto_corr_cck);
+       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+                       data->auto_corr_cck_mrc);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+                       data->last_bad_plcp_cnt_ofdm);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+                       data->last_fa_cnt_ofdm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "last_bad_plcp_cnt_cck:\t\t %u\n",
+                       data->last_bad_plcp_cnt_cck);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+                       data->last_fa_cnt_cck);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+                       data->nrg_curr_state);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+                       data->nrg_prev_state);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+       for (cnt = 0; cnt < 10; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->nrg_value[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+       for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->nrg_silence_rssi[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+                       data->nrg_silence_ref);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+                       data->nrg_energy_idx);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+                       data->nrg_silence_idx);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+                       data->nrg_th_cck);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                       "nrg_auto_corr_silence_diff:\t %u\n",
+                       data->nrg_auto_corr_silence_diff);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+                       data->num_in_cck_no_fa);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+                       data->nrg_th_ofdm);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+
+static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
+       ssize_t ret;
+       struct iwl_chain_noise_data *data;
+
+       data = &priv->chain_noise_data;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IWL_ERR(priv, "Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+                       data->active_chains);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+                       data->chain_noise_a);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+                       data->chain_noise_b);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+                       data->chain_noise_c);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+                       data->chain_signal_a);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+                       data->chain_signal_b);
+       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+                       data->chain_signal_c);
+       pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+                       data->beacon_count);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->disconn_array[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+               pos += scnprintf(buf + pos, bufsz - pos, " %u",
+                               data->delta_gain_code[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+                       data->radio_write);
+       pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+                       data->state);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
+                                                   char __user *user_buf,
+                                                   size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[60];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       u32 pwrsave_status;
+
+       pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+                       CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+       pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+               (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+               (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+               (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+               "error");
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int clear;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &clear) != 1)
+               return -EFAULT;
+
+       /* make request to uCode to retrieve statistics information */
+       mutex_lock(&priv->mutex);
+       iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
+       mutex_unlock(&priv->mutex);
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char buf[128];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+                       priv->event_log.ucode_trace ? "On" : "Off");
+       pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+                       priv->event_log.non_wraps_count);
+       pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+                       priv->event_log.wraps_once_count);
+       pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+                       priv->event_log.wraps_more_count);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int trace;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &trace) != 1)
+               return -EFAULT;
+
+       if (trace) {
+               priv->event_log.ucode_trace = true;
+               /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+               mod_timer(&priv->ucode_trace,
+                       jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+       } else {
+               priv->event_log.ucode_trace = false;
+               del_timer_sync(&priv->ucode_trace);
+       }
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int len = 0;
+       char buf[20];
+
+       len = sprintf(buf, "0x%04X\n",
+               le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int len = 0;
+       char buf[20];
+
+       len = sprintf(buf, "0x%04X\n",
+       le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char *buf;
+       int pos = 0;
+       ssize_t ret = -EFAULT;
+
+       if (priv->cfg->ops->lib->dump_fh) {
+               ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+               if (buf) {
+                       ret = simple_read_from_buffer(user_buf,
+                                                     count, ppos, buf, pos);
+                       kfree(buf);
+               }
+       }
+
+       return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char buf[12];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+                       priv->missed_beacon_threshold);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int missed;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &missed) != 1)
+               return -EINVAL;
+
+       if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+           missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+               priv->missed_beacon_threshold =
+                       IWL_MISSED_BEACON_THRESHOLD_DEF;
+       else
+               priv->missed_beacon_threshold = missed;
+
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int pos = 0;
+       char buf[12];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+                       priv->cfg->base_params->plcp_delta_threshold);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int plcp;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &plcp) != 1)
+               return -EINVAL;
+       if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+               (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+               priv->cfg->base_params->plcp_delta_threshold =
+                       IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
+       else
+               priv->cfg->base_params->plcp_delta_threshold = plcp;
+       return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       int i, pos = 0;
+       char buf[300];
+       const size_t bufsz = sizeof(buf);
+       struct iwl_force_reset *force_reset;
+
+       for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
+               force_reset = &priv->force_reset[i];
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "Force reset method %d\n", i);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tnumber of reset request: %d\n",
+                               force_reset->reset_request_count);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tnumber of reset request success: %d\n",
+                               force_reset->reset_success_count);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\tnumber of reset request reject: %d\n",
+                               force_reset->reset_reject_count);
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "\treset duration: %lu\n",
+                               force_reset->reset_duration);
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int reset, ret;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &reset) != 1)
+               return -EINVAL;
+       switch (reset) {
+       case IWL_RF_RESET:
+       case IWL_FW_RESET:
+               ret = iwl_legacy_force_reset(priv, reset, true);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return ret ? ret : count;
+}
+
+static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos) {
+
+       struct iwl_priv *priv = file->private_data;
+       char buf[8];
+       int buf_size;
+       int timeout;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) -  1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &timeout) != 1)
+               return -EINVAL;
+       if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
+               timeout = IWL_DEF_WD_TIMEOUT;
+
+       priv->cfg->base_params->wd_timeout = timeout;
+       iwl_legacy_setup_watchdog(priv);
+       return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_statistics);
+DEBUGFS_READ_FILE_OPS(tx_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+       struct dentry *phyd = priv->hw->wiphy->debugfsdir;
+       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+       dir_drv = debugfs_create_dir(name, phyd);
+       if (!dir_drv)
+               return -ENOMEM;
+
+       priv->debugfs_dir = dir_drv;
+
+       dir_data = debugfs_create_dir("data", dir_drv);
+       if (!dir_data)
+               goto err;
+       dir_rf = debugfs_create_dir("rf", dir_drv);
+       if (!dir_rf)
+               goto err;
+       dir_debug = debugfs_create_dir("debug", dir_drv);
+       if (!dir_debug)
+               goto err;
+
+       DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+       if (priv->cfg->base_params->sensitivity_calib_by_driver)
+               DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+       if (priv->cfg->base_params->chain_noise_calib_by_driver)
+               DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+       if (priv->cfg->base_params->ucode_tracing)
+               DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+       if (priv->cfg->base_params->sensitivity_calib_by_driver)
+               DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+                                &priv->disable_sens_cal);
+       if (priv->cfg->base_params->chain_noise_calib_by_driver)
+               DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+                                &priv->disable_chain_noise_cal);
+       DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
+                               &priv->disable_tx_power_cal);
+       return 0;
+
+err:
+       IWL_ERR(priv, "Can't create the debugfs directory\n");
+       iwl_legacy_dbgfs_unregister(priv);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+       if (!priv->debugfs_dir)
+               return;
+
+       debugfs_remove_recursive(priv->debugfs_dir);
+       priv->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644 (file)
index 0000000..9ee849d
--- /dev/null
@@ -0,0 +1,1426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-dev.h) for driver implementation definitions.
+ * Please use iwl-commands.h for uCode API definitions.
+ * Please use iwl-4965-hw.h for hardware-related definitions.
+ */
+
+#ifndef __iwl_legacy_dev_h__
+#define __iwl_legacy_dev_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-fh.h"
+#include "iwl-debug.h"
+#include "iwl-4965-hw.h"
+#include "iwl-3945-hw.h"
+#include "iwl-led.h"
+#include "iwl-power.h"
+#include "iwl-legacy-rs.h"
+
+struct iwl_tx_queue;
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon statistics being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE            2304U
+#define MAX_MPDU_SIZE            2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define        DEFAULT_SHORT_RETRY_LIMIT 7U
+#define        DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct iwl_rx_mem_buffer {
+       dma_addr_t page_dma;
+       struct page *page;
+       struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct iwl_device_cmd;
+
+struct iwl_cmd_meta {
+       /* only for SYNC commands, iff the reply skb is wanted */
+       struct iwl_host_cmd *source;
+       /*
+        * only for ASYNC commands
+        * (which is somewhat stupid -- look at iwl-sta.c for instance
+        * which duplicates a bunch of code because the callback isn't
+        * invoked for SYNC commands, if it were and its result passed
+        * through it would be simpler...)
+        */
+       void (*callback)(struct iwl_priv *priv,
+                        struct iwl_device_cmd *cmd,
+                        struct iwl_rx_packet *pkt);
+
+       /* The CMD_SIZE_HUGE flag bit indicates that the command
+        * structure is stored at the end of the shared queue memory. */
+       u32 flags;
+
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+       DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct iwl_queue {
+       int n_bd;              /* number of BDs in this queue */
+       int write_ptr;       /* 1-st empty entry (index) host_w*/
+       int read_ptr;         /* last used entry (index) host_r*/
+       /* use for monitoring and recovering the stuck queue */
+       dma_addr_t dma_addr;   /* physical addr for BD's */
+       int n_window;          /* safe queue window */
+       u32 id;
+       int low_mark;          /* low watermark, resume queue if free
+                               * space more than this */
+       int high_mark;         /* high watermark, stop queue if free
+                               * space less than this */
+} __packed;
+
+/* One for each TFD */
+struct iwl_tx_info {
+       struct sk_buff *skb;
+       struct iwl_rxon_context *ctx;
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write index
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_tx_queue {
+       struct iwl_queue q;
+       void *tfds;
+       struct iwl_device_cmd **cmd;
+       struct iwl_cmd_meta *meta;
+       struct iwl_tx_info *txb;
+       unsigned long time_stamp;
+       u8 need_update;
+       u8 sched_retry;
+       u8 active;
+       u8 swq_id;
+};
+
+#define IWL_NUM_SCAN_RATES         (2)
+
+struct iwl4965_channel_tgd_info {
+       u8 type;
+       s8 max_power;
+};
+
+struct iwl4965_channel_tgh_info {
+       s64 last_radar_time;
+};
+
+#define IWL4965_MAX_RATE (33)
+
+struct iwl3945_clip_group {
+       /* maximum power level to prevent clipping for each rate, derived by
+        *   us from this band's saturation power in EEPROM */
+       const s8 clip_powers[IWL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power index must also be set. */
+struct iwl3945_channel_power_info {
+       struct iwl3945_tx_power tpc;    /* actual radio and DSP gain settings */
+       s8 power_table_index;   /* actual (compenst'd) index into gain table */
+       s8 base_power_index;    /* gain index for power at factory temp. */
+       s8 requested_power;     /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct iwl3945_scan_power_info {
+       struct iwl3945_tx_power tpc;    /* actual radio and DSP gain settings */
+       s8 power_table_index;   /* actual (compenst'd) index into gain table */
+       s8 requested_power;     /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ *     with one another!
+ */
+struct iwl_channel_info {
+       struct iwl4965_channel_tgd_info tgd;
+       struct iwl4965_channel_tgh_info tgh;
+       struct iwl_eeprom_channel eeprom;       /* EEPROM regulatory limit */
+       struct iwl_eeprom_channel ht40_eeprom;  /* EEPROM regulatory limit for
+                                                * HT40 channel */
+
+       u8 channel;       /* channel number */
+       u8 flags;         /* flags copied from EEPROM */
+       s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+       s8 curr_txpow;    /* (dBm) regulatory/spectrum/user (not h/w) limit */
+       s8 min_power;     /* always 0 */
+       s8 scan_power;    /* (dBm) regul. eeprom, direct scans, any rate */
+
+       u8 group_index;   /* 0-4, maps channel to group1/2/3/4/5 */
+       u8 band_index;    /* 0-4, maps channel to band1/2/3/4/5 */
+       enum ieee80211_band band;
+
+       /* HT40 channel info */
+       s8 ht40_max_power_avg;  /* (dBm) regul. eeprom, normal Tx, any rate */
+       u8 ht40_flags;          /* flags copied from EEPROM */
+       u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+       /* Radio/DSP gain settings for each "normal" data Tx rate.
+        * These include, in addition to RF and DSP gain, a few fields for
+        *   remembering/modifying gain settings (indexes). */
+       struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
+
+       /* Radio/DSP gain settings for each scan rate, for directed scans. */
+       struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
+};
+
+#define IWL_TX_FIFO_BK         0       /* shared */
+#define IWL_TX_FIFO_BE         1
+#define IWL_TX_FIFO_VI         2       /* shared */
+#define IWL_TX_FIFO_VO         3
+#define IWL_TX_FIFO_UNUSED     -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IWL_MIN_NUM_QUEUES     10
+
+#define IWL_DEFAULT_CMD_QUEUE_NUM      4
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct iwl_frame {
+       union {
+               struct ieee80211_hdr frame;
+               struct iwl_tx_beacon_cmd beacon;
+               u8 raw[IEEE80211_FRAME_LEN];
+               u8 cmd[360];
+       } u;
+       struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+       CMD_SYNC = 0,
+       CMD_SIZE_NORMAL = 0,
+       CMD_NO_SKB = 0,
+       CMD_SIZE_HUGE = (1 << 0),
+       CMD_ASYNC = (1 << 1),
+       CMD_WANT_SKB = (1 << 2),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct iwl_device_cmd {
+       struct iwl_cmd_header hdr;      /* uCode API */
+       union {
+               u32 flags;
+               u8 val8;
+               u16 val16;
+               u32 val32;
+               struct iwl_tx_cmd tx;
+               u8 payload[DEF_CMD_PAYLOAD_SIZE];
+       } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+
+struct iwl_host_cmd {
+       const void *data;
+       unsigned long reply_page;
+       void (*callback)(struct iwl_priv *priv,
+                        struct iwl_device_cmd *cmd,
+                        struct iwl_rx_packet *pkt);
+       u32 flags;
+       u16 len;
+       u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+       __le32 *bd;
+       dma_addr_t bd_dma;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+       u32 read;
+       u32 write;
+       u32 free_count;
+       u32 write_actual;
+       struct list_head rx_free;
+       struct list_head rx_used;
+       int need_update;
+       struct iwl_rb_status *rb_stts;
+       dma_addr_t rb_stts_dma;
+       spinlock_t lock;
+};
+
+#define IWL_SUPPORTED_RATES_IE_LEN         8
+
+#define MAX_TID_COUNT        9
+
+#define IWL_INVALID_RATE     0xFF
+#define IWL_INVALID_VALUE    -1
+
+/**
+ * struct iwl_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx window
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If REPLY_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (REPLY_COMPRESSED_BA).  This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct iwl_ht_agg {
+       u16 txq_id;
+       u16 frame_count;
+       u16 wait_for_ba;
+       u16 start_idx;
+       u64 bitmap;
+       u32 rate_n_flags;
+#define IWL_AGG_OFF 0
+#define IWL_AGG_ON 1
+#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IWL_EMPTYING_HW_QUEUE_DELBA 3
+       u8 state;
+};
+
+
+struct iwl_tid_data {
+       u16 seq_number; /* 4965 only */
+       u16 tfds_in_queue;
+       struct iwl_ht_agg agg;
+};
+
+struct iwl_hw_key {
+       u32 cipher;
+       int keylen;
+       u8 keyidx;
+       u8 key[32];
+};
+
+union iwl_ht_rate_supp {
+       u16 rates;
+       struct {
+               u8 siso_rate;
+               u8 mimo_rate;
+       };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
+
+struct iwl_ht_config {
+       bool single_chain_sufficient;
+       enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct iwl_qos_info {
+       int qos_active;
+       struct iwl_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct iwl_station_entry {
+       struct iwl_legacy_addsta_cmd sta;
+       struct iwl_tid_data tid[MAX_TID_COUNT];
+       u8 used, ctxid;
+       struct iwl_hw_key keyinfo;
+       struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+       struct iwl_rxon_context *ctx;
+       u8 sta_id;
+};
+
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct iwl_station_priv {
+       struct iwl_station_priv_common common;
+       struct iwl_lq_sta lq_sta;
+       atomic_t pending_frames;
+       bool client;
+       bool asleep;
+};
+
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+       struct iwl_rxon_context *ctx;
+       u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+       void *v_addr;           /* access by driver */
+       dma_addr_t p_addr;      /* access by card's busmaster DMA */
+       u32 len;                /* bytes */
+};
+
+/* uCode file layout */
+struct iwl_ucode_header {
+       __le32 ver;     /* major/minor/API/serial */
+       struct {
+               __le32 inst_size;       /* bytes of runtime code */
+               __le32 data_size;       /* bytes of runtime data */
+               __le32 init_size;       /* bytes of init code */
+               __le32 init_data_size;  /* bytes of init data */
+               __le32 boot_size;       /* bytes of bootstrap code */
+               u8 data[0];             /* in same order as sizes */
+       } v1;
+};
+
+struct iwl4965_ibss_seq {
+       u8 mac[ETH_ALEN];
+       u16 seq_num;
+       u16 frag_num;
+       unsigned long packet_time;
+       struct list_head list;
+};
+
+struct iwl_sensitivity_ranges {
+       u16 min_nrg_cck;
+       u16 max_nrg_cck;
+
+       u16 nrg_th_cck;
+       u16 nrg_th_ofdm;
+
+       u16 auto_corr_min_ofdm;
+       u16 auto_corr_min_ofdm_mrc;
+       u16 auto_corr_min_ofdm_x1;
+       u16 auto_corr_min_ofdm_mrc_x1;
+
+       u16 auto_corr_max_ofdm;
+       u16 auto_corr_max_ofdm_mrc;
+       u16 auto_corr_max_ofdm_x1;
+       u16 auto_corr_max_ofdm_mrc_x1;
+
+       u16 auto_corr_max_cck;
+       u16 auto_corr_max_cck_mrc;
+       u16 auto_corr_min_cck;
+       u16 auto_corr_min_cck_mrc;
+
+       u16 barker_corr_th_min;
+       u16 barker_corr_th_min_mrc;
+       u16 nrg_th_cca;
+};
+
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+
+/**
+ * struct iwl_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ */
+struct iwl_hw_params {
+       u8 max_txq_num;
+       u8 dma_chnl_num;
+       u16 scd_bc_tbls_size;
+       u32 tfd_size;
+       u8  tx_chains_num;
+       u8  rx_chains_num;
+       u8  valid_tx_ant;
+       u8  valid_rx_ant;
+       u16 max_rxq_size;
+       u16 max_rxq_log;
+       u32 rx_page_order;
+       u32 rx_wrt_ptr_reg;
+       u8  max_stations;
+       u8  ht40_channel;
+       u8  max_beacon_itrvl;   /* in 1024 ms */
+       u32 max_inst_size;
+       u32 max_data_size;
+       u32 max_bsm_size;
+       u32 ct_kill_threshold; /* value in hw-dependent units */
+       u16 beacon_time_tsf_bits;
+       const struct iwl_sensitivity_ranges *sens;
+};
+
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * iwl_         <-- Is part of iwlwifi
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * iwl4965_bg_      <-- Called from work queue context
+ * iwl4965_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
+extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
+extern int iwl_legacy_queue_space(const struct iwl_queue *q);
+static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
+{
+       return q->write_ptr >= q->read_ptr ?
+               (i >= q->read_ptr && i < q->write_ptr) :
+               !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+
+static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
+                                                               int is_huge)
+{
+       /*
+        * This is for init calibration result and scan command which
+        * required buffer > TFD_MAX_PAYLOAD_SIZE,
+        * the big buffer at end of command array
+        */
+       if (is_huge)
+               return q->n_window;     /* must be power of 2 */
+
+       /* Otherwise, use normal size buffers */
+       return index & (q->n_window - 1);
+}
+
+
+struct iwl_dma_ptr {
+       dma_addr_t dma;
+       void *addr;
+       size_t size;
+};
+
+#define IWL_OPERATION_MODE_AUTO     0
+#define IWL_OPERATION_MODE_HT_ONLY  1
+#define IWL_OPERATION_MODE_MIXED    2
+#define IWL_OPERATION_MODE_20MHZ    3
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE           0xFFFF
+#define IWL4965_CAL_NUM_BEACONS                20
+#define IWL_CAL_NUM_BEACONS            16
+#define MAXIMUM_ALLOWED_PATHLOSS       15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM  50
+#define MIN_FA_OFDM  5
+#define MAX_FA_CCK   50
+#define MIN_FA_CCK   5
+
+#define AUTO_CORR_STEP_OFDM       1
+
+#define AUTO_CORR_STEP_CCK     3
+#define AUTO_CORR_MAX_TH_CCK   160
+
+#define NRG_DIFF               2
+#define NRG_STEP_CCK           2
+#define NRG_MARGIN             8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
+
+#define CHAIN_A             0
+#define CHAIN_B             1
+#define CHAIN_C             2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER                        0xFF00
+#define IN_BAND_FILTER                 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE    0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L     20
+#define NUM_RX_CHAINS           3
+
+enum iwl4965_false_alarm_state {
+       IWL_FA_TOO_MANY = 0,
+       IWL_FA_TOO_FEW = 1,
+       IWL_FA_GOOD_RANGE = 2,
+};
+
+enum iwl4965_chain_noise_state {
+       IWL_CHAIN_NOISE_ALIVE = 0,  /* must be 0 */
+       IWL_CHAIN_NOISE_ACCUMULATE,
+       IWL_CHAIN_NOISE_CALIBRATED,
+       IWL_CHAIN_NOISE_DONE,
+};
+
+enum iwl4965_calib_enabled_state {
+       IWL_CALIB_DISABLED = 0,  /* must be 0 */
+       IWL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum iwl_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum iwl_calib {
+       IWL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+       void *buf;
+       size_t buf_len;
+};
+
+enum ucode_type {
+       UCODE_NONE = 0,
+       UCODE_INIT,
+       UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct iwl_sensitivity_data {
+       u32 auto_corr_ofdm;
+       u32 auto_corr_ofdm_mrc;
+       u32 auto_corr_ofdm_x1;
+       u32 auto_corr_ofdm_mrc_x1;
+       u32 auto_corr_cck;
+       u32 auto_corr_cck_mrc;
+
+       u32 last_bad_plcp_cnt_ofdm;
+       u32 last_fa_cnt_ofdm;
+       u32 last_bad_plcp_cnt_cck;
+       u32 last_fa_cnt_cck;
+
+       u32 nrg_curr_state;
+       u32 nrg_prev_state;
+       u32 nrg_value[10];
+       u8  nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+       u32 nrg_silence_ref;
+       u32 nrg_energy_idx;
+       u32 nrg_silence_idx;
+       u32 nrg_th_cck;
+       s32 nrg_auto_corr_silence_diff;
+       u32 num_in_cck_no_fa;
+       u32 nrg_th_ofdm;
+
+       u16 barker_corr_th_min;
+       u16 barker_corr_th_min_mrc;
+       u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct iwl_chain_noise_data {
+       u32 active_chains;
+       u32 chain_noise_a;
+       u32 chain_noise_b;
+       u32 chain_noise_c;
+       u32 chain_signal_a;
+       u32 chain_signal_b;
+       u32 chain_signal_c;
+       u16 beacon_count;
+       u8 disconn_array[NUM_RX_CHAINS];
+       u8 delta_gain_code[NUM_RX_CHAINS];
+       u8 radio_write;
+       u8 state;
+};
+
+#define        EEPROM_SEM_TIMEOUT 10           /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000    /* number of attempts (not time) */
+
+#define IWL_TRAFFIC_ENTRIES    (256)
+#define IWL_TRAFFIC_ENTRY_SIZE  (64)
+
+enum {
+       MEASUREMENT_READY = (1 << 0),
+       MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt statistics */
+struct isr_statistics {
+       u32 hw;
+       u32 sw;
+       u32 err_code;
+       u32 sch;
+       u32 alive;
+       u32 rfkill;
+       u32 ctkill;
+       u32 wakeup;
+       u32 rx;
+       u32 rx_handlers[REPLY_MAX];
+       u32 tx;
+       u32 unhandled;
+};
+
+/* management statistics */
+enum iwl_mgmt_stats {
+       MANAGEMENT_ASSOC_REQ = 0,
+       MANAGEMENT_ASSOC_RESP,
+       MANAGEMENT_REASSOC_REQ,
+       MANAGEMENT_REASSOC_RESP,
+       MANAGEMENT_PROBE_REQ,
+       MANAGEMENT_PROBE_RESP,
+       MANAGEMENT_BEACON,
+       MANAGEMENT_ATIM,
+       MANAGEMENT_DISASSOC,
+       MANAGEMENT_AUTH,
+       MANAGEMENT_DEAUTH,
+       MANAGEMENT_ACTION,
+       MANAGEMENT_MAX,
+};
+/* control statistics */
+enum iwl_ctrl_stats {
+       CONTROL_BACK_REQ =  0,
+       CONTROL_BACK,
+       CONTROL_PSPOLL,
+       CONTROL_RTS,
+       CONTROL_CTS,
+       CONTROL_ACK,
+       CONTROL_CFEND,
+       CONTROL_CFENDACK,
+       CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       u32 mgmt[MANAGEMENT_MAX];
+       u32 ctrl[CONTROL_MAX];
+       u32 data_cnt;
+       u64 data_bytes;
+#endif
+};
+
+/*
+ * iwl_switch_rxon: "channel switch" structure
+ *
+ * @ switch_in_progress: channel switch in progress
+ * @ channel: new channel
+ */
+struct iwl_switch_rxon {
+       bool switch_in_progress;
+       __le16 channel;
+};
+
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry:  the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ *                   when dump ucode events
+ */
+struct iwl_event_log {
+       bool ucode_trace;
+       u32 num_wraps;
+       u32 next_entry;
+       int non_wraps_count;
+       int wraps_once_count;
+       int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX       (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF       (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN       (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs.  It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF    (100)
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF        (200)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE     (0)
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET  (HZ*3)
+#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_DEF_WD_TIMEOUT     (2000)
+#define IWL_LONG_WD_TIMEOUT    (10000)
+#define IWL_MAX_WD_TIMEOUT     (120000)
+
+enum iwl_reset {
+       IWL_RF_RESET = 0,
+       IWL_FW_RESET,
+       IWL_MAX_FORCE_RESET,
+};
+
+struct iwl_force_reset {
+       int reset_request_count;
+       int reset_success_count;
+       int reset_reject_count;
+       unsigned long reset_duration;
+       unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting  */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0  - interval
+ */
+#define IWL3945_EXT_BEACON_TIME_POS    24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IWL4965_EXT_BEACON_TIME_POS    22
+
+enum iwl_rxon_context_id {
+       IWL_RXON_CTX_BSS,
+
+       NUM_IWL_RXON_CTX
+};
+
+struct iwl_rxon_context {
+       struct ieee80211_vif *vif;
+
+       const u8 *ac_to_fifo;
+       const u8 *ac_to_queue;
+       u8 mcast_queue;
+
+       /*
+        * We could use the vif to indicate active, but we
+        * also need it to be active during disabling when
+        * we already removed the vif for type setting.
+        */
+       bool always_active, is_active;
+
+       bool ht_need_multiple_chains;
+
+       enum iwl_rxon_context_id ctxid;
+
+       u32 interface_modes, exclusive_interface_modes;
+       u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+       /*
+        * We declare this const so it can only be
+        * changed via explicit cast within the
+        * routines that actually update the physical
+        * hardware.
+        */
+       const struct iwl_legacy_rxon_cmd active;
+       struct iwl_legacy_rxon_cmd staging;
+
+       struct iwl_rxon_time_cmd timing;
+
+       struct iwl_qos_info qos_data;
+
+       u8 bcast_sta_id, ap_sta_id;
+
+       u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+       u8 qos_cmd;
+       u8 wep_key_cmd;
+
+       struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
+       u8 key_mapping_keys;
+
+       __le32 station_flags;
+
+       struct {
+               bool non_gf_sta_present;
+               u8 protection;
+               bool enabled, is_40mhz;
+               u8 extension_chan_offset;
+       } ht;
+};
+
+struct iwl_priv {
+
+       /* ieee device used by generic ieee processing code */
+       struct ieee80211_hw *hw;
+       struct ieee80211_channel *ieee_channels;
+       struct ieee80211_rate *ieee_rates;
+       struct iwl_cfg *cfg;
+
+       /* temporary frame storage list */
+       struct list_head free_frames;
+       int frames_count;
+
+       enum ieee80211_band band;
+       int alloc_rxb_page;
+
+       void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+                                      struct iwl_rx_mem_buffer *rxb);
+
+       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+       /* spectrum measurement report caching */
+       struct iwl_spectrum_notification measure_report;
+       u8 measurement_status;
+
+       /* ucode beacon time */
+       u32 ucode_beacon_time;
+       int missed_beacon_threshold;
+
+       /* track IBSS manager (last beacon) status */
+       u32 ibss_manager;
+
+       /* storing the jiffies when the plcp error rate is received */
+       unsigned long plcp_jiffies;
+
+       /* force reset */
+       struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+
+       /* we allocate array of iwl_channel_info for NIC's valid channels.
+        *    Access via channel # using indirect index array */
+       struct iwl_channel_info *channel_info;  /* channel info array */
+       u8 channel_count;       /* # of channels */
+
+       /* thermal calibration */
+       s32 temperature;        /* degrees Kelvin */
+       s32 last_temperature;
+
+       /* init calibration results */
+       struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+
+       /* Scan related variables */
+       unsigned long scan_start;
+       unsigned long scan_start_tsf;
+       void *scan_cmd;
+       enum ieee80211_band scan_band;
+       struct cfg80211_scan_request *scan_request;
+       struct ieee80211_vif *scan_vif;
+       bool is_internal_short_scan;
+       u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+       u8 mgmt_tx_ant;
+
+       /* spinlock */
+       spinlock_t lock;        /* protect general shared data */
+       spinlock_t hcmd_lock;   /* protect hcmd */
+       spinlock_t reg_lock;    /* protect hw register access */
+       struct mutex mutex;
+       struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
+
+       /* basic pci-network driver stuff */
+       struct pci_dev *pci_dev;
+
+       /* pci hardware address support */
+       void __iomem *hw_base;
+       u32  hw_rev;
+       u32  hw_wa_rev;
+       u8   rev_id;
+
+       /* microcode/device supports multiple contexts */
+       u8 valid_contexts;
+
+       /* command queue number */
+       u8 cmd_queue;
+
+       /* max number of station keys */
+       u8 sta_key_max_num;
+
+       /* EEPROM MAC addresses */
+       struct mac_address addresses[1];
+
+       /* uCode images, save to reload in case of failure */
+       int fw_index;                   /* firmware we're trying to load */
+       u32 ucode_ver;                  /* version of ucode, copy of
+                                          iwl_ucode.ver */
+       struct fw_desc ucode_code;      /* runtime inst */
+       struct fw_desc ucode_data;      /* runtime data original */
+       struct fw_desc ucode_data_backup;       /* runtime data save/restore */
+       struct fw_desc ucode_init;      /* initialization inst */
+       struct fw_desc ucode_init_data; /* initialization data */
+       struct fw_desc ucode_boot;      /* bootstrap inst */
+       enum ucode_type ucode_type;
+       u8 ucode_write_complete;        /* the image write is complete */
+       char firmware_name[25];
+
+       struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
+
+       struct iwl_switch_rxon switch_rxon;
+
+       /* 1st responses from initialize and runtime uCode images.
+        * _4965's initialize alive response contains some calibration data. */
+       struct iwl_init_alive_resp card_alive_init;
+       struct iwl_alive_resp card_alive;
+
+       u16 active_rate;
+
+       u8 start_calib;
+       struct iwl_sensitivity_data sensitivity_data;
+       struct iwl_chain_noise_data chain_noise_data;
+       __le16 sensitivity_tbl[HD_TABLE_SIZE];
+
+       struct iwl_ht_config current_ht_config;
+
+       /* Rate scaling data */
+       u8 retry_rate;
+
+       wait_queue_head_t wait_command_queue;
+
+       int activity_timer_active;
+
+       /* Rx and Tx DMA processing queues */
+       struct iwl_rx_queue rxq;
+       struct iwl_tx_queue *txq;
+       unsigned long txq_ctx_active_msk;
+       struct iwl_dma_ptr  kw; /* keep warm address */
+       struct iwl_dma_ptr  scd_bc_tbls;
+
+       u32 scd_base_addr;      /* scheduler sram base address */
+
+       unsigned long status;
+
+       /* counts mgmt, ctl, and data packets */
+       struct traffic_stats tx_stats;
+       struct traffic_stats rx_stats;
+
+       /* counts interrupts */
+       struct isr_statistics isr_stats;
+
+       struct iwl_power_mgr power_data;
+
+       /* context information */
+       u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+       /* station table variables */
+
+       /* Note: if lock and sta_lock are needed, lock must be acquired first */
+       spinlock_t sta_lock;
+       int num_stations;
+       struct iwl_station_entry stations[IWL_STATION_COUNT];
+       unsigned long ucode_key_table;
+
+       /* queue refcounts */
+#define IWL_MAX_HW_QUEUES      32
+       unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+       /* for each AC */
+       atomic_t queue_stop_count[4];
+
+       /* Indication if ieee80211_ops->open has been called */
+       u8 is_open;
+
+       u8 mac80211_registered;
+
+       /* eeprom -- this is in the card's little endian byte order */
+       u8 *eeprom;
+       struct iwl_eeprom_calib_info *calib_info;
+
+       enum nl80211_iftype iw_mode;
+
+       /* Last Rx'd beacon timestamp */
+       u64 timestamp;
+
+       union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+               struct {
+                       void *shared_virt;
+                       dma_addr_t shared_phys;
+
+                       struct delayed_work thermal_periodic;
+                       struct delayed_work rfkill_poll;
+
+                       struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+                       struct iwl3945_notif_statistics accum_statistics;
+                       struct iwl3945_notif_statistics delta_statistics;
+                       struct iwl3945_notif_statistics max_delta;
+#endif
+
+                       u32 sta_supp_rates;
+                       int last_rx_rssi;       /* From Rx packet statistics */
+
+                       /* Rx'd packet timing information */
+                       u32 last_beacon_time;
+                       u64 last_tsf;
+
+                       /*
+                        * each calibration channel group in the
+                        * EEPROM has a derived clip setting for
+                        * each rate.
+                        */
+                       const struct iwl3945_clip_group clip_groups[5];
+
+               } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+               struct {
+                       /*
+                        * reporting the number of tids has AGG on. 0 means
+                        * no AGGREGATION
+                        */
+                       u8 agg_tids_count;
+
+                       struct iwl_rx_phy_res last_phy_res;
+                       bool last_phy_res_valid;
+
+                       struct completion firmware_loading_complete;
+
+                       /*
+                        * chain noise reset and gain commands are the
+                        * two extra calibration commands follows the standard
+                        * phy calibration commands
+                        */
+                       u8 phy_calib_chain_noise_reset_cmd;
+                       u8 phy_calib_chain_noise_gain_cmd;
+
+                       struct iwl_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+                       struct iwl_notif_statistics accum_statistics;
+                       struct iwl_notif_statistics delta_statistics;
+                       struct iwl_notif_statistics max_delta;
+#endif
+
+               } _4965;
+#endif
+       };
+
+       struct iwl_hw_params hw_params;
+
+       u32 inta_mask;
+
+       struct workqueue_struct *workqueue;
+
+       struct work_struct restart;
+       struct work_struct scan_completed;
+       struct work_struct rx_replenish;
+       struct work_struct abort_scan;
+
+       struct iwl_rxon_context *beacon_ctx;
+       struct sk_buff *beacon_skb;
+
+       struct work_struct start_internal_scan;
+       struct work_struct tx_flush;
+
+       struct tasklet_struct irq_tasklet;
+
+       struct delayed_work init_alive_start;
+       struct delayed_work alive_start;
+       struct delayed_work scan_check;
+
+       /* TX Power */
+       s8 tx_power_user_lmt;
+       s8 tx_power_device_lmt;
+       s8 tx_power_next;
+
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       /* debugging info */
+       u32 debug_level; /* per device debugging will override global
+                           iwlegacy_debug_level if set */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+       /* debugfs */
+       u16 tx_traffic_idx;
+       u16 rx_traffic_idx;
+       u8 *tx_traffic;
+       u8 *rx_traffic;
+       struct dentry *debugfs_dir;
+       u32 dbgfs_sram_offset, dbgfs_sram_len;
+       bool disable_ht40;
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+       struct work_struct txpower_work;
+       u32 disable_sens_cal;
+       u32 disable_chain_noise_cal;
+       u32 disable_tx_power_cal;
+       struct work_struct run_time_calib_work;
+       struct timer_list statistics_periodic;
+       struct timer_list ucode_trace;
+       struct timer_list watchdog;
+       bool hw_ready;
+
+       struct iwl_event_log event_log;
+
+       struct led_classdev led;
+       unsigned long blink_on, blink_off;
+       bool led_registered;
+}; /*iwl_priv */
+
+static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
+{
+       set_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
+{
+       clear_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+/*
+ * iwl_legacy_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+       if (priv->debug_level)
+               return priv->debug_level;
+       else
+               return iwlegacy_debug_level;
+}
+#else
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+       return iwlegacy_debug_level;
+}
+#endif
+
+
+static inline struct ieee80211_hdr *
+iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
+                                                int txq_id, int idx)
+{
+       if (priv->txq[txq_id].txb[idx].skb)
+               return (struct ieee80211_hdr *)priv->txq[txq_id].
+                               txb[idx].skb->data;
+       return NULL;
+}
+
+static inline struct iwl_rxon_context *
+iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       return vif_priv->ctx;
+}
+
+#define for_each_context(priv, ctx)                            \
+       for (ctx = &priv->contexts[IWL_RXON_CTX_BSS];           \
+            ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++)    \
+               if (priv->valid_contexts & BIT(ctx->ctxid))
+
+static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
+                                   enum iwl_rxon_context_id ctxid)
+{
+       return (priv->contexts[ctxid].active.filter_flags &
+                       RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
+{
+       return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
+}
+
+static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
+{
+       return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
+{
+       if (ch_info == NULL)
+               return 0;
+       return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
+{
+       return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
+{
+       return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
+{
+       return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline void
+__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
+{
+       __free_pages(page, priv->hw_params.rx_page_order);
+       priv->alloc_rxb_page--;
+}
+
+static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+       free_pages(page, priv->hw_params.rx_page_order);
+       priv->alloc_rxb_page--;
+}
+#endif                         /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644 (file)
index 0000000..080b852
--- /dev/null
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#include "iwl-dev.h"
+
+#define CREATE_TRACE_POINTS
+#include "iwl-devtrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644 (file)
index 0000000..9612aa0
--- /dev/null
@@ -0,0 +1,270 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_LEGACY_DEVICE_TRACE
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+
+#define PRIV_ENTRY     __field(struct iwl_priv *, priv)
+#define PRIV_ASSIGN    (__entry->priv = priv)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_io
+
+TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
+       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+       TP_ARGS(priv, offs, val),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, offs)
+               __field(u32, val)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
+                                       __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
+       TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+       TP_ARGS(priv, offs, val),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, offs)
+               __field(u8, val)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+                                       __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
+       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+       TP_ARGS(priv, offs, val),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, offs)
+               __field(u32, val)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+                                       __entry->offs, __entry->val)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_ucode
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
+       TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+       TP_ARGS(priv, time, data, ev),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(u32, time)
+               __field(u32, data)
+               __field(u32, ev)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->time = time;
+               __entry->data = data;
+               __entry->ev = ev;
+       ),
+       TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+                 __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
+       TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+       TP_ARGS(priv, wraps, n_entry, p_entry),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(u32, wraps)
+               __field(u32, n_entry)
+               __field(u32, p_entry)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->wraps = wraps;
+               __entry->n_entry = n_entry;
+               __entry->p_entry = p_entry;
+       ),
+       TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+                 __entry->priv, __entry->wraps, __entry->n_entry,
+                 __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
+       TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
+       TP_ARGS(priv, hcmd, len, flags),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __dynamic_array(u8, hcmd, len)
+               __field(u32, flags)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               memcpy(__get_dynamic_array(hcmd), hcmd, len);
+               __entry->flags = flags;
+       ),
+       TP_printk("[%p] hcmd %#.2x (%ssync)",
+                 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
+                 __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_rx,
+       TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+       TP_ARGS(priv, rxbuf, len),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __dynamic_array(u8, rxbuf, len)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+       ),
+       TP_printk("[%p] RX cmd %#.2x",
+                 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_tx,
+       TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+                void *buf0, size_t buf0_len,
+                void *buf1, size_t buf1_len),
+       TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(size_t, framelen)
+               __dynamic_array(u8, tfd, tfdlen)
+
+               /*
+                * Do not insert between or below these items,
+                * we want to keep the frame together (except
+                * for the possible padding).
+                */
+               __dynamic_array(u8, buf0, buf0_len)
+               __dynamic_array(u8, buf1, buf1_len)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->framelen = buf0_len + buf1_len;
+               memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+               memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+               memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+       ),
+       TP_printk("[%p] TX %.2x (%zu bytes)",
+                 __entry->priv,
+                 ((u8 *)__get_dynamic_array(buf0))[0],
+                 __entry->framelen)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
+       TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
+                u32 data1, u32 data2, u32 line, u32 blink1,
+                u32 blink2, u32 ilink1, u32 ilink2),
+       TP_ARGS(priv, desc, time, data1, data2, line,
+               blink1, blink2, ilink1, ilink2),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+               __field(u32, desc)
+               __field(u32, time)
+               __field(u32, data1)
+               __field(u32, data2)
+               __field(u32, line)
+               __field(u32, blink1)
+               __field(u32, blink2)
+               __field(u32, ilink1)
+               __field(u32, ilink2)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->desc = desc;
+               __entry->time = time;
+               __entry->data1 = data1;
+               __entry->data2 = data2;
+               __entry->line = line;
+               __entry->blink1 = blink1;
+               __entry->blink2 = blink2;
+               __entry->ilink1 = ilink1;
+               __entry->ilink2 = ilink2;
+       ),
+       TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+                 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
+                 __entry->priv, __entry->desc, __entry->time, __entry->data1,
+                 __entry->data2, __entry->line, __entry->blink1,
+                 __entry->blink2, __entry->ilink1, __entry->ilink2)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
+       TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+       TP_ARGS(priv, time, data, ev),
+       TP_STRUCT__entry(
+               PRIV_ENTRY
+
+               __field(u32, time)
+               __field(u32, data)
+               __field(u32, ev)
+       ),
+       TP_fast_assign(
+               PRIV_ASSIGN;
+               __entry->time = time;
+               __entry->data = data;
+               __entry->ev = ev;
+       ),
+       TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+                 __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644 (file)
index 0000000..04c5648
--- /dev/null
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-io.h"
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The iwlegacy_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into priv->channel_info_24/52 and priv->channel_map_24/52
+ *
+ * channel_map_24/52 provides the index in the channel_info array for a
+ * given channel.  We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware.  This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel.  There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 iwlegacy_eeprom_band_1[14] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 iwlegacy_eeprom_band_2[] = {   /* 4915-5080MHz */
+       183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwlegacy_eeprom_band_3[] = {   /* 5170-5320MHz */
+       34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwlegacy_eeprom_band_4[] = {   /* 5500-5700MHz */
+       100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwlegacy_eeprom_band_5[] = {   /* 5725-5825MHz */
+       145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwlegacy_eeprom_band_6[] = {       /* 2.4 ht40 channel */
+       1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwlegacy_eeprom_band_7[] = {       /* 5.2 ht40 channel */
+       36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
+{
+       u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+       int ret = 0;
+
+       IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+       switch (gp) {
+       case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+       case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+               break;
+       default:
+               IWL_ERR(priv, "bad EEPROM signature,"
+                       "EEPROM_GP=0x%08x\n", gp);
+               ret = -ENOENT;
+               break;
+       }
+       return ret;
+}
+
+const u8
+*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+{
+       BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
+       return &priv->eeprom[offset];
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
+
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+{
+       if (!priv->eeprom)
+               return 0;
+       return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
+
+/**
+ * iwl_legacy_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into priv->eeprom
+ *
+ * NOTE:  This routine uses the non-debug IO access functions.
+ */
+int iwl_legacy_eeprom_init(struct iwl_priv *priv)
+{
+       __le16 *e;
+       u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+       int sz;
+       int ret;
+       u16 addr;
+
+       /* allocate eeprom */
+       sz = priv->cfg->base_params->eeprom_size;
+       IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
+       priv->eeprom = kzalloc(sz, GFP_KERNEL);
+       if (!priv->eeprom) {
+               ret = -ENOMEM;
+               goto alloc_err;
+       }
+       e = (__le16 *)priv->eeprom;
+
+       priv->cfg->ops->lib->apm_ops.init(priv);
+
+       ret = iwl_legacy_eeprom_verify_signature(priv);
+       if (ret < 0) {
+               IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+               ret = -ENOENT;
+               goto err;
+       }
+
+       /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+       ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
+       if (ret < 0) {
+               IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
+               ret = -ENOENT;
+               goto err;
+       }
+
+       /* eeprom is an array of 16bit values */
+       for (addr = 0; addr < sz; addr += sizeof(u16)) {
+               u32 r;
+
+               _iwl_legacy_write32(priv, CSR_EEPROM_REG,
+                            CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+               ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+                                         CSR_EEPROM_REG_READ_VALID_MSK,
+                                         CSR_EEPROM_REG_READ_VALID_MSK,
+                                         IWL_EEPROM_ACCESS_TIMEOUT);
+               if (ret < 0) {
+                       IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
+                                                       addr);
+                       goto done;
+               }
+               r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
+               e[addr / 2] = cpu_to_le16(r >> 16);
+       }
+
+       IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
+                      "EEPROM",
+                      iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
+
+       ret = 0;
+done:
+       priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
+err:
+       if (ret)
+               iwl_legacy_eeprom_free(priv);
+       /* Reset chip to save power until we load uCode during "up". */
+       iwl_legacy_apm_stop(priv);
+alloc_err:
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_init);
+
+void iwl_legacy_eeprom_free(struct iwl_priv *priv)
+{
+       kfree(priv->eeprom);
+       priv->eeprom = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_free);
+
+static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
+                       int eep_band, int *eeprom_ch_count,
+                       const struct iwl_eeprom_channel **eeprom_ch_info,
+                       const u8 **eeprom_ch_index)
+{
+       u32 offset = priv->cfg->ops->lib->
+                       eeprom_ops.regulatory_bands[eep_band - 1];
+       switch (eep_band) {
+       case 1:         /* 2.4GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_1;
+               break;
+       case 2:         /* 4.9GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_2;
+               break;
+       case 3:         /* 5.2GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_3;
+               break;
+       case 4:         /* 5.5GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_4;
+               break;
+       case 5:         /* 5.7GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_5;
+               break;
+       case 6:         /* 2.4GHz ht40 channels */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_6;
+               break;
+       case 7:         /* 5 GHz ht40 channels */
+               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
+               *eeprom_ch_info = (struct iwl_eeprom_channel *)
+                               iwl_legacy_eeprom_query_addr(priv, offset);
+               *eeprom_ch_index = iwlegacy_eeprom_band_7;
+               break;
+       default:
+               BUG();
+               return;
+       }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+                           ? # x " " : "")
+/**
+ * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
+                             enum ieee80211_band band, u16 channel,
+                             const struct iwl_eeprom_channel *eeprom_ch,
+                             u8 clear_ht40_extension_channel)
+{
+       struct iwl_channel_info *ch_info;
+
+       ch_info = (struct iwl_channel_info *)
+                       iwl_legacy_get_channel_info(priv, band, channel);
+
+       if (!iwl_legacy_is_channel_valid(ch_info))
+               return -1;
+
+       IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+                       " Ad-Hoc %ssupported\n",
+                       ch_info->channel,
+                       iwl_legacy_is_channel_a_band(ch_info) ?
+                       "5.2" : "2.4",
+                       CHECK_AND_PRINT(IBSS),
+                       CHECK_AND_PRINT(ACTIVE),
+                       CHECK_AND_PRINT(RADAR),
+                       CHECK_AND_PRINT(WIDE),
+                       CHECK_AND_PRINT(DFS),
+                       eeprom_ch->flags,
+                       eeprom_ch->max_power_avg,
+                       ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
+                        && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
+                       "" : "not ");
+
+       ch_info->ht40_eeprom = *eeprom_ch;
+       ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+       ch_info->ht40_flags = eeprom_ch->flags;
+       if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+               ch_info->ht40_extension_channel &=
+                                       ~clear_ht40_extension_channel;
+
+       return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+                           ? # x " " : "")
+
+/**
+ * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
+ */
+int iwl_legacy_init_channel_map(struct iwl_priv *priv)
+{
+       int eeprom_ch_count = 0;
+       const u8 *eeprom_ch_index = NULL;
+       const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
+       int band, ch;
+       struct iwl_channel_info *ch_info;
+
+       if (priv->channel_count) {
+               IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
+               return 0;
+       }
+
+       IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
+
+       priv->channel_count =
+           ARRAY_SIZE(iwlegacy_eeprom_band_1) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_2) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_3) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_4) +
+           ARRAY_SIZE(iwlegacy_eeprom_band_5);
+
+       IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
+                       priv->channel_count);
+
+       priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
+                                    priv->channel_count, GFP_KERNEL);
+       if (!priv->channel_info) {
+               IWL_ERR(priv, "Could not allocate channel_info\n");
+               priv->channel_count = 0;
+               return -ENOMEM;
+       }
+
+       ch_info = priv->channel_info;
+
+       /* Loop through the 5 EEPROM bands adding them in order to the
+        * channel map we maintain (that contains additional information than
+        * what just in the EEPROM) */
+       for (band = 1; band <= 5; band++) {
+
+               iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+                                       &eeprom_ch_info, &eeprom_ch_index);
+
+               /* Loop through each band adding each of the channels */
+               for (ch = 0; ch < eeprom_ch_count; ch++) {
+                       ch_info->channel = eeprom_ch_index[ch];
+                       ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
+                           IEEE80211_BAND_5GHZ;
+
+                       /* permanently store EEPROM's channel regulatory flags
+                        *   and max power in channel info database. */
+                       ch_info->eeprom = eeprom_ch_info[ch];
+
+                       /* Copy the run-time flags so they are there even on
+                        * invalid channels */
+                       ch_info->flags = eeprom_ch_info[ch].flags;
+                       /* First write that ht40 is not enabled, and then enable
+                        * one by one */
+                       ch_info->ht40_extension_channel =
+                                       IEEE80211_CHAN_NO_HT40;
+
+                       if (!(iwl_legacy_is_channel_valid(ch_info))) {
+                               IWL_DEBUG_EEPROM(priv,
+                                              "Ch. %d Flags %x [%sGHz] - "
+                                              "No traffic\n",
+                                              ch_info->channel,
+                                              ch_info->flags,
+                                              iwl_legacy_is_channel_a_band(ch_info) ?
+                                              "5.2" : "2.4");
+                               ch_info++;
+                               continue;
+                       }
+
+                       /* Initialize regulatory-based run-time data */
+                       ch_info->max_power_avg = ch_info->curr_txpow =
+                           eeprom_ch_info[ch].max_power_avg;
+                       ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+                       ch_info->min_power = 0;
+
+                       IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
+                                      "%s%s%s%s%s%s(0x%02x %ddBm):"
+                                      " Ad-Hoc %ssupported\n",
+                                      ch_info->channel,
+                                      iwl_legacy_is_channel_a_band(ch_info) ?
+                                      "5.2" : "2.4",
+                                      CHECK_AND_PRINT_I(VALID),
+                                      CHECK_AND_PRINT_I(IBSS),
+                                      CHECK_AND_PRINT_I(ACTIVE),
+                                      CHECK_AND_PRINT_I(RADAR),
+                                      CHECK_AND_PRINT_I(WIDE),
+                                      CHECK_AND_PRINT_I(DFS),
+                                      eeprom_ch_info[ch].flags,
+                                      eeprom_ch_info[ch].max_power_avg,
+                                      ((eeprom_ch_info[ch].
+                                        flags & EEPROM_CHANNEL_IBSS)
+                                       && !(eeprom_ch_info[ch].
+                                            flags & EEPROM_CHANNEL_RADAR))
+                                      ? "" : "not ");
+
+                       /* Set the tx_power_user_lmt to the highest power
+                        * supported by any channel */
+                       if (eeprom_ch_info[ch].max_power_avg >
+                                               priv->tx_power_user_lmt)
+                               priv->tx_power_user_lmt =
+                                   eeprom_ch_info[ch].max_power_avg;
+
+                       ch_info++;
+               }
+       }
+
+       /* Check if we do have HT40 channels */
+       if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+           EEPROM_REGULATORY_BAND_NO_HT40 &&
+           priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+           EEPROM_REGULATORY_BAND_NO_HT40)
+               return 0;
+
+       /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+       for (band = 6; band <= 7; band++) {
+               enum ieee80211_band ieeeband;
+
+               iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+                                       &eeprom_ch_info, &eeprom_ch_index);
+
+               /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+               ieeeband =
+                       (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+               /* Loop through each band adding each of the channels */
+               for (ch = 0; ch < eeprom_ch_count; ch++) {
+                       /* Set up driver's info for lower half */
+                       iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+                                               eeprom_ch_index[ch],
+                                               &eeprom_ch_info[ch],
+                                               IEEE80211_CHAN_NO_HT40PLUS);
+
+                       /* Set up driver's info for upper half */
+                       iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+                                               eeprom_ch_index[ch] + 4,
+                                               &eeprom_ch_info[ch],
+                                               IEEE80211_CHAN_NO_HT40MINUS);
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_channel_map);
+
+/*
+ * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
+ */
+void iwl_legacy_free_channel_map(struct iwl_priv *priv)
+{
+       kfree(priv->channel_info);
+       priv->channel_count = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_free_channel_map);
+
+/**
+ * iwl_legacy_get_channel_info - Find driver's private channel info
+ *
+ * Based on band and channel number.
+ */
+const struct
+iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
+                                       enum ieee80211_band band, u16 channel)
+{
+       int i;
+
+       switch (band) {
+       case IEEE80211_BAND_5GHZ:
+               for (i = 14; i < priv->channel_count; i++) {
+                       if (priv->channel_info[i].channel == channel)
+                               return &priv->channel_info[i];
+               }
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (channel >= 1 && channel <= 14)
+                       return &priv->channel_info[channel - 1];
+               break;
+       default:
+               BUG();
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644 (file)
index 0000000..c59c810
--- /dev/null
@@ -0,0 +1,344 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_eeprom_h__
+#define __iwl_legacy_eeprom_h__
+
+#include <net/mac80211.h>
+
+struct iwl_priv;
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT      5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT         10   /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT     1000 /* number of attempts (not time) */
+
+
+/*
+ * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ *        It only indicates that 20 MHz channel use is supported; HT40 channel
+ *        usage is indicated by a separate set of regulatory flags for each
+ *        HT40 channel pair.
+ *
+ * NOTE:  Using a channel inappropriately will result in a uCode error!
+ */
+#define IWL_NUM_TX_CALIB_GROUPS 5
+enum {
+       EEPROM_CHANNEL_VALID = (1 << 0),        /* usable for this SKU/geo */
+       EEPROM_CHANNEL_IBSS = (1 << 1),         /* usable as an IBSS channel */
+       /* Bit 2 Reserved */
+       EEPROM_CHANNEL_ACTIVE = (1 << 3),       /* active scanning allowed */
+       EEPROM_CHANNEL_RADAR = (1 << 4),        /* radar detection required */
+       EEPROM_CHANNEL_WIDE = (1 << 5),         /* 20 MHz channel okay */
+       /* Bit 6 Reserved (was Narrow Channel) */
+       EEPROM_CHANNEL_DFS = (1 << 7),  /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct iwl_eeprom_channel {
+       u8 flags;               /* EEPROM_CHANNEL_* flags copied from EEPROM */
+       s8 max_power_avg;       /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION     (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS      (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS          (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS   (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION    (5)
+#define EEPROM_4965_EEPROM_VERSION     (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8) /* 48  bytes */
+#define EEPROM_4965_BOARD_REVISION             (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA                  (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 iwlegacy_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna).  EEPROM contains:
+ *
+ * 1)  Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2)  Gain table index used to achieve the target measurement power.
+ *     This refers to the "well-known" gain tables (see iwl-4965-hw.h).
+ *
+ * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4)  RF power amplifier detector level measurement (not used).
+ */
+struct iwl_eeprom_calib_measure {
+       u8 temperature;         /* Device temperature (Celsius) */
+       u8 gain_idx;            /* Index into gain table */
+       u8 actual_pow;          /* Measured RF output power, half-dBm */
+       s8 pa_det;              /* Power amp detector level (not used) */
+} __packed;
+
+
+/*
+ * measurement set for one channel.  EEPROM contains:
+ *
+ * 1)  Channel number measured
+ *
+ * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
+ *     (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct iwl_eeprom_calib_ch_info {
+       u8 ch_num;
+       struct iwl_eeprom_calib_measure
+               measurements[EEPROM_TX_POWER_TX_CHAINS]
+                       [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1)  First and last channels within range of the subband.  "0" values
+ *     indicate that this sample set is not being used.
+ *
+ * 2)  Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct iwl_eeprom_calib_subband_info {
+       u8 ch_from;     /* channel number of lowest channel in subband */
+       u8 ch_to;       /* channel number of highest channel in subband */
+       struct iwl_eeprom_calib_ch_info ch1;
+       struct iwl_eeprom_calib_ch_info ch2;
+} __packed;
+
+
+/*
+ * txpower calibration info.  EEPROM contains:
+ *
+ * 1)  Factory-measured saturation power levels (maximum levels at which
+ *     tx power amplifier can output a signal without too much distortion).
+ *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
+ *     values apply to all channels within each of the bands.
+ *
+ * 2)  Factory-measured power supply voltage level.  This is assumed to be
+ *     constant (i.e. same value applies to all channels/bands) while the
+ *     factory measurements are being made.
+ *
+ * 3)  Up to 8 sets of factory-measured txpower calibration values.
+ *     These are for different frequency ranges, since txpower gain
+ *     characteristics of the analog radio circuitry vary with frequency.
+ *
+ *     Not all sets need to be filled with data;
+ *     struct iwl_eeprom_calib_subband_info contains range of channels
+ *     (0 if unused) for each set of data.
+ */
+struct iwl_eeprom_calib_info {
+       u8 saturation_power24;  /* half-dBm (e.g. "34" = 17 dBm) */
+       u8 saturation_power52;  /* half-dBm */
+       __le16 voltage;         /* signed */
+       struct iwl_eeprom_calib_subband_info
+               band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+
+/* General */
+#define EEPROM_DEVICE_ID                    (2*0x08)   /* 2 bytes */
+#define EEPROM_MAC_ADDRESS                  (2*0x15)   /* 6  bytes */
+#define EEPROM_BOARD_REVISION               (2*0x35)   /* 2  bytes */
+#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1) /* 9  bytes */
+#define EEPROM_VERSION                      (2*0x44)   /* 2  bytes */
+#define EEPROM_SKU_CAP                      (2*0x45)   /* 2  bytes */
+#define EEPROM_OEM_MODE                     (2*0x46)   /* 2  bytes */
+#define EEPROM_WOWLAN_MODE                  (2*0x47)   /* 2  bytes */
+#define EEPROM_RADIO_CONFIG                 (2*0x48)   /* 2  bytes */
+#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)   /* 2  bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)         /* bits 0-1   */
+#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
+#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
+#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID            (2*0x60)    /* 4  bytes */
+#define EEPROM_REGULATORY_BAND_1            (2*0x62)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)   /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2            (2*0x71)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)   /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3            (2*0x7F)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)   /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4            (2*0x8C)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)   /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5            (2*0x98)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)   /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
+ *        control channel to which to tune.  RXON also specifies whether the
+ *        control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)  /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)  /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40                 (0)
+
+struct iwl_eeprom_ops {
+       const u32 regulatory_bands[7];
+       int (*acquire_semaphore) (struct iwl_priv *priv);
+       void (*release_semaphore) (struct iwl_priv *priv);
+};
+
+
+int iwl_legacy_eeprom_init(struct iwl_priv *priv);
+void iwl_legacy_eeprom_free(struct iwl_priv *priv);
+const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
+                                       size_t offset);
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+int iwl_legacy_init_channel_map(struct iwl_priv *priv);
+void iwl_legacy_free_channel_map(struct iwl_priv *priv);
+const struct iwl_channel_info *iwl_legacy_get_channel_info(
+               const struct iwl_priv *priv,
+               enum ieee80211_band band, u16 channel);
+
+#endif  /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644 (file)
index 0000000..4e20c7e
--- /dev/null
@@ -0,0 +1,513 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_fh_h__
+#define __iwl_legacy_fh_h__
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH_MEM_LOWER_BOUND                   (0x1000)
+#define FH_MEM_UPPER_BOUND                   (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH_KW_MEM_ADDR_REG                  (FH_MEM_LOWER_BOUND + 0x97C)
+
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
+ * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH_MEM_CBBC_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH_MEM_CBBC_QUEUE(x)  (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ *     entries (although any power of 2, up to 4096, is selectable by driver).
+ *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ *     (typically 4K, although 8K or 16K are also selectable by driver).
+ *     Driver sets up RB size and number of RBDs in the CB via Rx config
+ *     register FH_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ *     Bit fields within one RBD:
+ *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ *     Driver sets physical address [35:8] of base of RBD circular buffer
+ *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ *     (RBs) have been filled, via a "write pointer", actually the index of
+ *     the RB's corresponding RBD within the circular buffer.  Driver sets
+ *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ *     Bit fields in lower dword of Rx status buffer (upper dword not used
+ *     by driver; see struct iwl4965_shared, val0):
+ *     31-12:  Not used by driver
+ *     11- 0:  Index of last filled Rx buffer descriptor
+ *             (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" index register,
+ * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" index corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer.  This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" index has advanced past 1!  See below).
+ * NOTE:  4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the index of the latest filled RBD.  The driver must
+ * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third index, which is the
+ * next RBD to process.  When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" index.  For example, if "read" index becomes "1",
+ * driver may process the RB pointed to by RBD 0.  Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read index == write index, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" indexes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH_MEM_RSCSR_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0xBC0)
+#define FH_MEM_RSCSR_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RSCSR_CHNL0             (FH_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_STTS_WPTR_REG   (FH_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_RBDCB_BASE_REG  (FH_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (index, really!).
+ * Bit fields:
+ *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG  (FH_MEM_RSCSR_CHNL0 + 0x008)
+#define FH_RSCSR_CHNL0_WPTR        (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH_MEM_RSSR_RX_STATUS_REG        for
+ * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ *        '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ *        typical value 0x10 (about 1/2 msec)
+ *  3- 0: reserved
+ */
+#define FH_MEM_RCSR_LOWER_BOUND      (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RCSR_UPPER_BOUND      (FH_MEM_LOWER_BOUND + 0xCC0)
+#define FH_MEM_RCSR_CHNL0            (FH_MEM_RCSR_LOWER_BOUND)
+
+#define FH_MEM_RCSR_CHNL0_CONFIG_REG   (FH_MEM_RCSR_CHNL0)
+
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000) /* bits 12 */
+#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000) /* bits 16-17 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
+
+#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS       (20)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS     (4)
+#define RX_RB_TIMEOUT  (0x10)
+
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
+
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
+
+#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
+
+#define FH_RSCSR_FRAME_SIZE_MSK        (0x00003FFF)    /* bits 0-13 */
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ *  24:  1 = Channel 0 is idle
+ *
+ * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH_MEM_RSSR_LOWER_BOUND           (FH_MEM_LOWER_BOUND + 0xC40)
+#define FH_MEM_RSSR_UPPER_BOUND           (FH_MEM_LOWER_BOUND + 0xD00)
+
+#define FH_MEM_RSSR_SHARED_CTRL_REG       (FH_MEM_RSSR_LOWER_BOUND)
+#define FH_MEM_RSSR_RX_STATUS_REG      (FH_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+                                       (FH_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE      (0x01000000)
+
+#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT        28
+
+/* TFDB  Area - TFDs buffer table */
+#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
+#define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
+#define FH_TFDIB_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0x958)
+#define FH_TFDIB_CTRL0_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH_TFDIB_CTRL1_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ *     3: Enable internal DMA requests (1, normal operation), disable (0)
+ *  2- 0: Reserved, set to "0"
+ */
+#define FH_TCSR_LOWER_BOUND  (FH_MEM_LOWER_BOUND + 0xD00)
+#define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM                            (7)
+#define FH50_TCSR_CHNL_NUM                            (8)
+
+/* TCSR: tx_config register values */
+#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)      \
+               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl)      \
+               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)     \
+               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF         (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV         (0x00000001)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE   (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE    (0x00000008)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT      (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD     (0x00100000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD      (0x00200000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT       (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD      (0x00400000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD       (0x00800000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE       (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF   (0x40000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE      (0x80000000)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY     (0x00000000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT      (0x00002000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID     (0x00000003)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM         (20)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX         (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24:  1 = Channel buffers empty (channel 7:0)
+ * 23-16:  1 = No pending requests (channel 7:0)
+ */
+#define FH_TSSR_LOWER_BOUND            (FH_MEM_LOWER_BOUND + 0xEA0)
+#define FH_TSSR_UPPER_BOUND            (FH_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH_TSSR_TX_STATUS_REG          (FH_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31:  Indicates an address error when accessed to internal memory
+ *     uCode/driver must write "1" in order to clear this flag
+ * 30:  Indicates that Host did not send the expected number of dwords to FH
+ *     uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ *     command was received from the scheduler while the TRB was already full
+ *     with previous command
+ *     uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ *     bit is set, it indicates that the FH has received a full indication
+ *     from the RTC TxFIFO and the current value of the TxCredit counter was
+ *     not equal to zero. This mean that the credit mechanism was not
+ *     synchronized to the TxFIFO status
+ *     uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG           (FH_TSSR_LOWER_BOUND + 0x018)
+
+#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH_SRVC_CHNL           (9)
+#define FH_SRVC_LOWER_BOUND    (FH_MEM_LOWER_BOUND + 0x9C8)
+#define FH_SRVC_UPPER_BOUND    (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+               (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN   (0x00000002)
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+/* Size of one Rx buffer in host DRAM */
+#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
+
+/**
+ * struct iwl_rb_status - reseve buffer status
+ *     host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ *     in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ *     which was transfered
+ */
+struct iwl_rb_status {
+       __le16 closed_rb_num;
+       __le16 closed_fr_num;
+       __le16 finished_rb_num;
+       __le16 finished_fr_nam;
+       __le32 __unused; /* 3945 only */
+} __packed;
+
+
+#define TFD_QUEUE_SIZE_MAX      (256)
+#define TFD_QUEUE_SIZE_BC_DUP  (64)
+#define TFD_QUEUE_BC_SIZE      (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
+#define IWL_NUM_OF_TBS         20
+
+static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
+{
+       return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+/**
+ * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer
+ *     every even is unaligned on 16 bit boundary
+ * @hi_n_len 0-3 [35:32] portion of dma
+ *          4-15 length of the tx buffer
+ */
+struct iwl_tfd_tb {
+       __le32 lo;
+       __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct iwl_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ *          5   reserved
+ *          6-7 padding (not used)
+ * @ tbs[20]   transmit frame buffer descriptors
+ * @ __pad     padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM.  These buffers collectively contain the (one) frame described
+ * by the TFD.  Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
+ * of (4K - 4).  The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct iwl_tfd {
+       u8 __reserved1[3];
+       u8 num_tbs;
+       struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
+       __le32 __pad;
+} __packed;
+
+/* Keep Warm Size */
+#define IWL_KW_SIZE 0x1000     /* 4k */
+
+#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644 (file)
index 0000000..9d721cb
--- /dev/null
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-core.h"
+
+
+const char *iwl_legacy_get_cmd_string(u8 cmd)
+{
+       switch (cmd) {
+               IWL_CMD(REPLY_ALIVE);
+               IWL_CMD(REPLY_ERROR);
+               IWL_CMD(REPLY_RXON);
+               IWL_CMD(REPLY_RXON_ASSOC);
+               IWL_CMD(REPLY_QOS_PARAM);
+               IWL_CMD(REPLY_RXON_TIMING);
+               IWL_CMD(REPLY_ADD_STA);
+               IWL_CMD(REPLY_REMOVE_STA);
+               IWL_CMD(REPLY_WEPKEY);
+               IWL_CMD(REPLY_3945_RX);
+               IWL_CMD(REPLY_TX);
+               IWL_CMD(REPLY_RATE_SCALE);
+               IWL_CMD(REPLY_LEDS_CMD);
+               IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+               IWL_CMD(REPLY_CHANNEL_SWITCH);
+               IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+               IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+               IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+               IWL_CMD(POWER_TABLE_CMD);
+               IWL_CMD(PM_SLEEP_NOTIFICATION);
+               IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+               IWL_CMD(REPLY_SCAN_CMD);
+               IWL_CMD(REPLY_SCAN_ABORT_CMD);
+               IWL_CMD(SCAN_START_NOTIFICATION);
+               IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+               IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+               IWL_CMD(BEACON_NOTIFICATION);
+               IWL_CMD(REPLY_TX_BEACON);
+               IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+               IWL_CMD(REPLY_BT_CONFIG);
+               IWL_CMD(REPLY_STATISTICS_CMD);
+               IWL_CMD(STATISTICS_NOTIFICATION);
+               IWL_CMD(CARD_STATE_NOTIFICATION);
+               IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+               IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+               IWL_CMD(SENSITIVITY_CMD);
+               IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+               IWL_CMD(REPLY_RX_PHY_CMD);
+               IWL_CMD(REPLY_RX_MPDU_CMD);
+               IWL_CMD(REPLY_RX);
+               IWL_CMD(REPLY_COMPRESSED_BA);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
+                                    struct iwl_device_cmd *cmd,
+                                    struct iwl_rx_packet *pkt)
+{
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
+               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+               return;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       switch (cmd->hdr.cmd) {
+       case REPLY_TX_LINK_QUALITY_CMD:
+       case SENSITIVITY_CMD:
+               IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
+               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+               break;
+       default:
+               IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
+               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+       }
+#endif
+}
+
+static int
+iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       int ret;
+
+       BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+       /* An asynchronous command can not expect an SKB to be set. */
+       BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+       /* Assign a generic callback if one is not provided */
+       if (!cmd->callback)
+               cmd->callback = iwl_legacy_generic_cmd_callback;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EBUSY;
+
+       ret = iwl_legacy_enqueue_hcmd(priv, cmd);
+       if (ret < 0) {
+               IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+                         iwl_legacy_get_cmd_string(cmd->id), ret);
+               return ret;
+       }
+       return 0;
+}
+
+int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       int cmd_idx;
+       int ret;
+
+       BUG_ON(cmd->flags & CMD_ASYNC);
+
+        /* A synchronous command can not have a callback set. */
+       BUG_ON(cmd->callback);
+
+       IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
+                       iwl_legacy_get_cmd_string(cmd->id));
+       mutex_lock(&priv->sync_cmd_mutex);
+
+       set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+       IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
+                       iwl_legacy_get_cmd_string(cmd->id));
+
+       cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
+       if (cmd_idx < 0) {
+               ret = cmd_idx;
+               IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+                         iwl_legacy_get_cmd_string(cmd->id), ret);
+               goto out;
+       }
+
+       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+                       !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
+                       HOST_COMPLETE_TIMEOUT);
+       if (!ret) {
+               if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
+                       IWL_ERR(priv,
+                               "Error sending %s: time out after %dms.\n",
+                               iwl_legacy_get_cmd_string(cmd->id),
+                               jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+                       clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+                       IWL_DEBUG_INFO(priv,
+                               "Clearing HCMD_ACTIVE for command %s\n",
+                                      iwl_legacy_get_cmd_string(cmd->id));
+                       ret = -ETIMEDOUT;
+                       goto cancel;
+               }
+       }
+
+       if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
+               IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
+                              iwl_legacy_get_cmd_string(cmd->id));
+               ret = -ECANCELED;
+               goto fail;
+       }
+       if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+               IWL_ERR(priv, "Command %s failed: FW Error\n",
+                              iwl_legacy_get_cmd_string(cmd->id));
+               ret = -EIO;
+               goto fail;
+       }
+       if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+               IWL_ERR(priv, "Error: Response NULL in '%s'\n",
+                         iwl_legacy_get_cmd_string(cmd->id));
+               ret = -EIO;
+               goto cancel;
+       }
+
+       ret = 0;
+       goto out;
+
+cancel:
+       if (cmd->flags & CMD_WANT_SKB) {
+               /*
+                * Cancel the CMD_WANT_SKB flag for the cmd in the
+                * TX cmd queue. Otherwise in case the cmd comes
+                * in later, it will possibly set an invalid
+                * address (cmd->meta.source).
+                */
+               priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
+                                                       ~CMD_WANT_SKB;
+       }
+fail:
+       if (cmd->reply_page) {
+               iwl_legacy_free_pages(priv, cmd->reply_page);
+               cmd->reply_page = 0;
+       }
+out:
+       mutex_unlock(&priv->sync_cmd_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
+
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       if (cmd->flags & CMD_ASYNC)
+               return iwl_legacy_send_cmd_async(priv, cmd);
+
+       return iwl_legacy_send_cmd_sync(priv, cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd);
+
+int
+iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = len,
+               .data = data,
+       };
+
+       return iwl_legacy_send_cmd_sync(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
+
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
+                          u8 id, u16 len, const void *data,
+                          void (*callback)(struct iwl_priv *priv,
+                                           struct iwl_device_cmd *cmd,
+                                           struct iwl_rx_packet *pkt))
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = len,
+               .data = data,
+       };
+
+       cmd.flags |= CMD_ASYNC;
+       cmd.callback = callback;
+
+       return iwl_legacy_send_cmd_async(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644 (file)
index 0000000..02132e7
--- /dev/null
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_helpers_h__
+#define __iwl_legacy_helpers_h__
+
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
+
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+
+static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
+       struct ieee80211_hw *hw)
+{
+       return &hw->conf;
+}
+
+/**
+ * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
+{
+       return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
+{
+       return --index & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
+                                   struct fw_desc *desc)
+{
+       if (desc->v_addr)
+               dma_free_coherent(&pci_dev->dev, desc->len,
+                                 desc->v_addr, desc->p_addr);
+       desc->v_addr = NULL;
+       desc->len = 0;
+}
+
+static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
+                                   struct fw_desc *desc)
+{
+       if (!desc->len) {
+               desc->v_addr = NULL;
+               return -EINVAL;
+       }
+
+       desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+                                         &desc->p_addr, GFP_KERNEL);
+       return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
+{
+       BUG_ON(ac > 3);   /* only have 2 bits */
+       BUG_ON(hwq > 31); /* only use 5 bits */
+
+       txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
+                                 struct iwl_tx_queue *txq)
+{
+       u8 queue = txq->swq_id;
+       u8 ac = queue & 3;
+       u8 hwq = (queue >> 2) & 0x1f;
+
+       if (test_and_clear_bit(hwq, priv->queue_stopped))
+               if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
+                       ieee80211_wake_queue(priv->hw, ac);
+}
+
+static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
+                                 struct iwl_tx_queue *txq)
+{
+       u8 queue = txq->swq_id;
+       u8 ac = queue & 3;
+       u8 hwq = (queue >> 2) & 0x1f;
+
+       if (!test_and_set_bit(hwq, priv->queue_stopped))
+               if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
+                       ieee80211_stop_queue(priv->hw, ac);
+}
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
+{
+       clear_bit(STATUS_INT_ENABLED, &priv->status);
+
+       /* disable interrupts from uCode/NIC to host */
+       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+       /* acknowledge/clear/reset any interrupts still pending
+        * from uCode or flow handler (Rx/Tx DMA) */
+       iwl_write32(priv, CSR_INT, 0xffffffff);
+       iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
+       IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
+}
+
+static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
+{
+       IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
+       set_bit(STATUS_INT_ENABLED, &priv->status);
+       iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
+                                          u16 tsf_bits)
+{
+       return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
+                                           u16 tsf_bits)
+{
+       return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+#endif                         /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644 (file)
index 0000000..5cc5d34
--- /dev/null
@@ -0,0 +1,545 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_io_h__
+#define __iwl_legacy_io_h__
+
+#include <linux/io.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-devtrace.h"
+
+/*
+ * IO, register, and NIC memory access functions
+ *
+ * NOTE on naming convention and macro usage for these
+ *
+ * A single _ prefix before a an access function means that no state
+ * check or debug information is printed when that function is called.
+ *
+ * A double __ prefix before an access function means that state is checked
+ * and the current line number and caller function name are printed in addition
+ * to any other debug output.
+ *
+ * The non-prefixed name is the #define that maps the caller into a
+ * #define that provides the caller's name and __LINE__ to the double
+ * prefix version.
+ *
+ * If you wish to call the function without any debug or state checking,
+ * you should use the single _ prefix version (as is used by dependent IO
+ * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
+ * _iwl_legacy_read32.)
+ *
+ * These declarations are *extremely* useful in quickly isolating code deltas
+ * which result in misconfiguration of the hardware I/O.  In combination with
+ * git-bisect and the IO debug level you can quickly determine the specific
+ * commit which breaks the IO sequence to the hardware.
+ *
+ */
+
+static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+{
+       trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
+       iowrite8(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
+                                u32 ofs, u8 val)
+{
+       IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
+       _iwl_legacy_write8(priv, ofs, val);
+}
+#define iwl_write8(priv, ofs, val) \
+       __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
+#endif
+
+
+static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+{
+       trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
+       iowrite32(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
+                                u32 ofs, u32 val)
+{
+       IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
+       _iwl_legacy_write32(priv, ofs, val);
+}
+#define iwl_write32(priv, ofs, val) \
+       __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
+#endif
+
+static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
+{
+       u32 val = ioread32(priv->hw_base + ofs);
+       trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
+       return val;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32
+__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
+{
+       IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
+       return _iwl_legacy_read32(priv, ofs);
+}
+#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
+#else
+#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
+#endif
+
+#define IWL_POLL_INTERVAL 10   /* microseconds */
+static inline int
+_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
+                               u32 bits, u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
+                       return t;
+               udelay(IWL_POLL_INTERVAL);
+               t += IWL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
+                                struct iwl_priv *priv, u32 addr,
+                                u32 bits, u32 mask, int timeout)
+{
+       int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
+       IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
+                    addr, bits, mask,
+                    unlikely(ret  == -ETIMEDOUT) ? "timeout" : "", f, l);
+       return ret;
+}
+#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
+       __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
+       bits, mask, timeout)
+#else
+#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
+#endif
+
+static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_set_bit(const char *f, u32 l,
+                                struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       u32 val = _iwl_legacy_read32(priv, reg) | mask;
+       IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
+                                                       mask, val);
+       _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       _iwl_legacy_set_bit(p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline void
+_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_clear_bit(const char *f, u32 l,
+                                  struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
+       IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
+       _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       _iwl_legacy_clear_bit(p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
+{
+       int ret;
+       u32 val;
+
+       /* this bit wakes up the NIC */
+       _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /*
+        * These bits say the device is running, and should keep running for
+        * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+        * but they do not indicate that embedded SRAM is restored yet;
+        * 3945 and 4965 have volatile SRAM, and must save/restore contents
+        * to/from host DRAM when sleeping/waking for power-saving.
+        * Each direction takes approximately 1/4 millisecond; with this
+        * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+        * series of register accesses are expected (e.g. reading Event Log),
+        * to keep device from sleeping.
+        *
+        * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+        * SRAM is okay/restored.  We don't check that here because this call
+        * is just for hardware register access; but GP1 MAC_SLEEP check is a
+        * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+        *
+        */
+       ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
+                          CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+                          (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+                           CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+       if (ret < 0) {
+               val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
+               IWL_ERR(priv,
+                       "MAC is in deep sleep!.  CSR_GP_CNTRL = 0x%08X\n", val);
+               _iwl_legacy_write32(priv, CSR_RESET,
+                               CSR_RESET_REG_FLAG_FORCE_NMI);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
+                                              struct iwl_priv *priv)
+{
+       IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
+       return _iwl_legacy_grab_nic_access(priv);
+}
+#define iwl_grab_nic_access(priv) \
+       __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_grab_nic_access(priv) \
+       _iwl_legacy_grab_nic_access(priv)
+#endif
+
+static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
+{
+       _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
+                                           struct iwl_priv *priv)
+{
+
+       IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
+       _iwl_legacy_release_nic_access(priv);
+}
+#define iwl_release_nic_access(priv) \
+       __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_release_nic_access(priv) \
+       _iwl_legacy_release_nic_access(priv)
+#endif
+
+static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+       return _iwl_legacy_read32(priv, reg);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
+                                       struct iwl_priv *priv, u32 reg)
+{
+       u32 value = _iwl_legacy_read_direct32(priv, reg);
+       IWL_DEBUG_IO(priv,
+                       "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
+                    f, l);
+       return value;
+}
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+       u32 value;
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return value;
+}
+
+#else
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+       u32 value;
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       value = _iwl_legacy_read_direct32(priv, reg);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return value;
+
+}
+#endif
+
+static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
+                                        u32 reg, u32 value)
+{
+       _iwl_legacy_write32(priv, reg, value);
+}
+static inline void
+iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_direct32(priv, reg, value);
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
+                                              u32 reg, u32 len, u32 *values)
+{
+       u32 count = sizeof(u32);
+
+       if ((priv != NULL) && (values != NULL)) {
+               for (; 0 < len; len -= count, reg += count, values++)
+                       iwl_legacy_write_direct32(priv, reg, *values);
+       }
+}
+
+static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
+                                      u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
+                       return t;
+               udelay(IWL_POLL_INTERVAL);
+               t += IWL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
+                                           struct iwl_priv *priv,
+                                           u32 addr, u32 mask, int timeout)
+{
+       int ret  = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
+
+       if (unlikely(ret == -ETIMEDOUT))
+               IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
+                            "timedout - %s %d\n", addr, mask, f, l);
+       else
+               IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
+                            "- %s %d\n", addr, mask, ret, f, l);
+       return ret;
+}
+#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
+__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
+#else
+#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
+#endif
+
+static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+       rmb();
+       return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
+}
+static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+       unsigned long reg_flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       val = _iwl_legacy_read_prph(priv, reg);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return val;
+}
+
+static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
+                                            u32 addr, u32 val)
+{
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
+                             ((addr & 0x0000FFFF) | (3 << 24)));
+       wmb();
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_prph(priv, addr, val);
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
+_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
+
+static inline void
+iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       _iwl_legacy_set_bits_prph(priv, reg, mask);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
+_iwl_legacy_write_prph(priv, reg,                              \
+                ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
+
+static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+                               u32 bits, u32 mask)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
+                                                *priv, u32 reg, u32 mask)
+{
+       unsigned long reg_flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+       val = _iwl_legacy_read_prph(priv, reg);
+       _iwl_legacy_write_prph(priv, reg, (val & ~mask));
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
+{
+       unsigned long reg_flags;
+       u32 value;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
+       rmb();
+       value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return value;
+}
+
+static inline void
+iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+               wmb();
+               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void
+iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
+                                         u32 len, u32 *values)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (!iwl_grab_nic_access(priv)) {
+               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+               wmb();
+               for (; 0 < len; len -= sizeof(u32), values++)
+                       _iwl_legacy_write_direct32(priv,
+                                       HBUS_TARG_MEM_WDAT, *values);
+
+               iwl_release_nic_access(priv);
+       }
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644 (file)
index 0000000..15eb8b7
--- /dev/null
@@ -0,0 +1,188 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* default: IWL_LED_BLINK(0) using blinking index table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+               "1=On(RF On)/Off(RF Off), 2=blinking");
+
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+       { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+       { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+       { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+       { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+       { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+       { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+       { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+       { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+       { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+       { .throughput = 300 * 1024 - 1, .blink_time = 50 },
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
+                                   u8 time, u16 compensation)
+{
+       if (!compensation) {
+               IWL_ERR(priv, "undefined blink compensation: "
+                       "use pre-defined blinking time\n");
+               return time;
+       }
+
+       return (u8)((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int iwl_legacy_led_cmd(struct iwl_priv *priv,
+                      unsigned long on,
+                      unsigned long off)
+{
+       struct iwl_led_cmd led_cmd = {
+               .id = IWL_LED_LINK,
+               .interval = IWL_DEF_LED_INTRVL
+       };
+       int ret;
+
+       if (!test_bit(STATUS_READY, &priv->status))
+               return -EBUSY;
+
+       if (priv->blink_on == on && priv->blink_off == off)
+               return 0;
+
+       IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
+                       priv->cfg->base_params->led_compensation);
+       led_cmd.on = iwl_legacy_blink_compensation(priv, on,
+                               priv->cfg->base_params->led_compensation);
+       led_cmd.off = iwl_legacy_blink_compensation(priv, off,
+                               priv->cfg->base_params->led_compensation);
+
+       ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+       if (!ret) {
+               priv->blink_on = on;
+               priv->blink_off = off;
+       }
+       return ret;
+}
+
+static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
+{
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+       unsigned long on = 0;
+
+       if (brightness > 0)
+               on = IWL_LED_SOLID;
+
+       iwl_legacy_led_cmd(priv, on, 0);
+}
+
+static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
+                            unsigned long *delay_on,
+                            unsigned long *delay_off)
+{
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+
+       return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
+}
+
+void iwl_legacy_leds_init(struct iwl_priv *priv)
+{
+       int mode = led_mode;
+       int ret;
+
+       if (mode == IWL_LED_DEFAULT)
+               mode = priv->cfg->led_mode;
+
+       priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+                                  wiphy_name(priv->hw->wiphy));
+       priv->led.brightness_set = iwl_legacy_led_brightness_set;
+       priv->led.blink_set = iwl_legacy_led_blink_set;
+       priv->led.max_brightness = 1;
+
+       switch (mode) {
+       case IWL_LED_DEFAULT:
+               WARN_ON(1);
+               break;
+       case IWL_LED_BLINK:
+               priv->led.default_trigger =
+                       ieee80211_create_tpt_led_trigger(priv->hw,
+                                       IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+                                       iwl_blink, ARRAY_SIZE(iwl_blink));
+               break;
+       case IWL_LED_RF_STATE:
+               priv->led.default_trigger =
+                       ieee80211_get_radio_led_name(priv->hw);
+               break;
+       }
+
+       ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+       if (ret) {
+               kfree(priv->led.name);
+               return;
+       }
+
+       priv->led_registered = true;
+}
+EXPORT_SYMBOL(iwl_legacy_leds_init);
+
+void iwl_legacy_leds_exit(struct iwl_priv *priv)
+{
+       if (!priv->led_registered)
+               return;
+
+       led_classdev_unregister(&priv->led);
+       kfree(priv->led.name);
+}
+EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644 (file)
index 0000000..f0791f7
--- /dev/null
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_leds_h__
+#define __iwl_legacy_leds_h__
+
+
+struct iwl_priv;
+
+#define IWL_LED_SOLID 11
+#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IWL_LED_ACTIVITY       (0<<1)
+#define IWL_LED_LINK           (1<<1)
+
+/*
+ * LED mode
+ *    IWL_LED_DEFAULT:  use device default
+ *    IWL_LED_RF_STATE: turn LED on/off based on RF state
+ *                     LED ON  = RF ON
+ *                     LED OFF = RF OFF
+ *    IWL_LED_BLINK:    adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+       IWL_LED_DEFAULT,
+       IWL_LED_RF_STATE,
+       IWL_LED_BLINK,
+};
+
+void iwl_legacy_leds_init(struct iwl_priv *priv);
+void iwl_legacy_leds_exit(struct iwl_priv *priv);
+
+#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644 (file)
index 0000000..38647e4
--- /dev/null
@@ -0,0 +1,456 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_rs_h__
+#define __iwl_legacy_rs_h__
+
+struct iwl_rate_info {
+       u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+       u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+       u8 ieee;        /* MAC header:  IWL_RATE_6M_IEEE, etc. */
+       u8 prev_ieee;    /* previous rate in IEEE speeds */
+       u8 next_ieee;    /* next rate in IEEE speeds */
+       u8 prev_rs;      /* previous rate used in rs algo */
+       u8 next_rs;      /* next rate used in rs algo */
+       u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;  /* next rate used in TGG rs algo */
+};
+
+struct iwl3945_rate_info {
+       u8 plcp;                /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 ieee;                /* MAC header:  IWL_RATE_6M_IEEE, etc. */
+       u8 prev_ieee;           /* previous rate in IEEE speeds */
+       u8 next_ieee;           /* next rate in IEEE speeds */
+       u8 prev_rs;             /* previous rate used in rs algo */
+       u8 next_rs;             /* next rate used in rs algo */
+       u8 prev_rs_tgg;         /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;         /* next rate used in TGG rs algo */
+       u8 table_rs_index;      /* index in rate scale table cmd */
+       u8 prev_table_rs;       /* prev in rate table cmd */
+};
+
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+ */
+enum {
+       IWL_RATE_1M_INDEX = 0,
+       IWL_RATE_2M_INDEX,
+       IWL_RATE_5M_INDEX,
+       IWL_RATE_11M_INDEX,
+       IWL_RATE_6M_INDEX,
+       IWL_RATE_9M_INDEX,
+       IWL_RATE_12M_INDEX,
+       IWL_RATE_18M_INDEX,
+       IWL_RATE_24M_INDEX,
+       IWL_RATE_36M_INDEX,
+       IWL_RATE_48M_INDEX,
+       IWL_RATE_54M_INDEX,
+       IWL_RATE_60M_INDEX,
+       IWL_RATE_COUNT,
+       IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1,     /* Excluding 60M */
+       IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
+       IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+       IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+enum {
+       IWL_RATE_6M_INDEX_TABLE = 0,
+       IWL_RATE_9M_INDEX_TABLE,
+       IWL_RATE_12M_INDEX_TABLE,
+       IWL_RATE_18M_INDEX_TABLE,
+       IWL_RATE_24M_INDEX_TABLE,
+       IWL_RATE_36M_INDEX_TABLE,
+       IWL_RATE_48M_INDEX_TABLE,
+       IWL_RATE_54M_INDEX_TABLE,
+       IWL_RATE_1M_INDEX_TABLE,
+       IWL_RATE_2M_INDEX_TABLE,
+       IWL_RATE_5M_INDEX_TABLE,
+       IWL_RATE_11M_INDEX_TABLE,
+       IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+enum {
+       IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+       IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
+       IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+       IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+       IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define        IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
+#define        IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
+#define        IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
+#define        IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
+#define        IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
+#define        IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
+#define        IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
+#define        IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
+#define        IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
+#define        IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
+#define        IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
+#define        IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+       IWL_RATE_6M_PLCP  = 13,
+       IWL_RATE_9M_PLCP  = 15,
+       IWL_RATE_12M_PLCP = 5,
+       IWL_RATE_18M_PLCP = 7,
+       IWL_RATE_24M_PLCP = 9,
+       IWL_RATE_36M_PLCP = 11,
+       IWL_RATE_48M_PLCP = 1,
+       IWL_RATE_54M_PLCP = 3,
+       IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
+       IWL_RATE_1M_PLCP  = 10,
+       IWL_RATE_2M_PLCP  = 20,
+       IWL_RATE_5M_PLCP  = 55,
+       IWL_RATE_11M_PLCP = 110,
+       /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+       IWL_RATE_SISO_6M_PLCP = 0,
+       IWL_RATE_SISO_12M_PLCP = 1,
+       IWL_RATE_SISO_18M_PLCP = 2,
+       IWL_RATE_SISO_24M_PLCP = 3,
+       IWL_RATE_SISO_36M_PLCP = 4,
+       IWL_RATE_SISO_48M_PLCP = 5,
+       IWL_RATE_SISO_54M_PLCP = 6,
+       IWL_RATE_SISO_60M_PLCP = 7,
+       IWL_RATE_MIMO2_6M_PLCP  = 0x8,
+       IWL_RATE_MIMO2_12M_PLCP = 0x9,
+       IWL_RATE_MIMO2_18M_PLCP = 0xa,
+       IWL_RATE_MIMO2_24M_PLCP = 0xb,
+       IWL_RATE_MIMO2_36M_PLCP = 0xc,
+       IWL_RATE_MIMO2_48M_PLCP = 0xd,
+       IWL_RATE_MIMO2_54M_PLCP = 0xe,
+       IWL_RATE_MIMO2_60M_PLCP = 0xf,
+       IWL_RATE_SISO_INVM_PLCP,
+       IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+       IWL_RATE_6M_IEEE  = 12,
+       IWL_RATE_9M_IEEE  = 18,
+       IWL_RATE_12M_IEEE = 24,
+       IWL_RATE_18M_IEEE = 36,
+       IWL_RATE_24M_IEEE = 48,
+       IWL_RATE_36M_IEEE = 72,
+       IWL_RATE_48M_IEEE = 96,
+       IWL_RATE_54M_IEEE = 108,
+       IWL_RATE_60M_IEEE = 120,
+       IWL_RATE_1M_IEEE  = 2,
+       IWL_RATE_2M_IEEE  = 4,
+       IWL_RATE_5M_IEEE  = 11,
+       IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_CCK_BASIC_RATES_MASK    \
+       (IWL_RATE_1M_MASK          | \
+       IWL_RATE_2M_MASK)
+
+#define IWL_CCK_RATES_MASK          \
+       (IWL_CCK_BASIC_RATES_MASK  | \
+       IWL_RATE_5M_MASK          | \
+       IWL_RATE_11M_MASK)
+
+#define IWL_OFDM_BASIC_RATES_MASK   \
+       (IWL_RATE_6M_MASK         | \
+       IWL_RATE_12M_MASK         | \
+       IWL_RATE_24M_MASK)
+
+#define IWL_OFDM_RATES_MASK         \
+       (IWL_OFDM_BASIC_RATES_MASK | \
+       IWL_RATE_9M_MASK          | \
+       IWL_RATE_18M_MASK         | \
+       IWL_RATE_36M_MASK         | \
+       IWL_RATE_48M_MASK         | \
+       IWL_RATE_54M_MASK)
+
+#define IWL_BASIC_RATES_MASK         \
+       (IWL_OFDM_BASIC_RATES_MASK | \
+        IWL_CCK_BASIC_RATES_MASK)
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
+
+#define IWL_INVALID_VALUE    -1
+
+#define IWL_MIN_RSSI_VAL                 -100
+#define IWL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT       160
+#define IWL_LEGACY_SUCCESS_LIMIT       480
+#define IWL_LEGACY_TABLE_COUNT         160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT  400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT  4500
+#define IWL_NONE_LEGACY_TABLE_COUNT    1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO              12800   /* 100% */
+#define IWL_RATE_SCALE_SWITCH          10880   /*  85% */
+#define IWL_RATE_HIGH_TH               10880   /*  85% */
+#define IWL_RATE_INCREASE_TH           6400    /*  50% */
+#define IWL_RATE_DECREASE_TH           1920    /*  15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1      0
+#define IWL_LEGACY_SWITCH_ANTENNA2      1
+#define IWL_LEGACY_SWITCH_SISO          2
+#define IWL_LEGACY_SWITCH_MIMO2_AB      3
+#define IWL_LEGACY_SWITCH_MIMO2_AC      4
+#define IWL_LEGACY_SWITCH_MIMO2_BC      5
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1        0
+#define IWL_SISO_SWITCH_ANTENNA2        1
+#define IWL_SISO_SWITCH_MIMO2_AB        2
+#define IWL_SISO_SWITCH_MIMO2_AC        3
+#define IWL_SISO_SWITCH_MIMO2_BC        4
+#define IWL_SISO_SWITCH_GI              5
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1       0
+#define IWL_MIMO2_SWITCH_ANTENNA2       1
+#define IWL_MIMO2_SWITCH_SISO_A         2
+#define IWL_MIMO2_SWITCH_SISO_B         3
+#define IWL_MIMO2_SWITCH_SISO_C         4
+#define IWL_MIMO2_SWITCH_GI             5
+
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+
+#define IWL_ACTION_LIMIT               3       /* # possible actions */
+
+#define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD   0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID                0xff
+#define TID_QUEUE_CELL_SPACING 50      /*mS */
+#define TID_QUEUE_MAX_SIZE     20
+#define TID_ROUND_VALUE                5       /* mS */
+#define TID_MAX_LOAD_COUNT     8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+
+enum iwl_table_type {
+       LQ_NONE,
+       LQ_G,           /* legacy types */
+       LQ_A,
+       LQ_SISO,        /* high-throughput types */
+       LQ_MIMO2,
+       LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define        ANT_NONE        0x0
+#define        ANT_A           BIT(0)
+#define        ANT_B           BIT(1)
+#define        ANT_AB          (ANT_A | ANT_B)
+#define ANT_C          BIT(2)
+#define        ANT_AC          (ANT_A | ANT_C)
+#define ANT_BC         (ANT_B | ANT_C)
+#define ANT_ABC                (ANT_AB | ANT_C)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE       12
+
+struct iwl_rate_mcs_info {
+       char    mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+       char    mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+       u64 data;               /* bitmap of successful frames */
+       s32 success_counter;    /* number of frames successful */
+       s32 success_ratio;      /* per-cent * 128  */
+       s32 counter;            /* number of frames attempted */
+       s32 average_tpt;        /* success ratio * expected throughput */
+       unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+       enum iwl_table_type lq_type;
+       u8 ant_type;
+       u8 is_SGI;      /* 1 = short guard interval */
+       u8 is_ht40;     /* 1 = 40 MHz channel width */
+       u8 is_dup;      /* 1 = duplicated data streams */
+       u8 action;      /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+       u8 max_search;  /* maximun number of tables we can search */
+       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
+       u32 current_rate;  /* rate_n_flags, uCode API format */
+       struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+       unsigned long time_stamp;       /* age of the oldest statistics */
+       u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+                                                * slice */
+       u32 total;                      /* total num of packets during the
+                                        * last TID_MAX_TIME_DIFF */
+       u8 queue_count;                 /* number of queues that has
+                                        * been used since the last cleanup */
+       u8 head;                        /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+       u8 active_tbl;          /* index of active table, range 0-1 */
+       u8 enable_counter;      /* indicates HT mode */
+       u8 stay_in_tbl;         /* 1: disallow, 0: allow search for new mode */
+       u8 search_better_tbl;   /* 1: currently trying alternate mode */
+       s32 last_tpt;
+
+       /* The following determine when to search for a new mode */
+       u32 table_count_limit;
+       u32 max_failure_limit;  /* # failed frames before new search */
+       u32 max_success_limit;  /* # successful frames before new search */
+       u32 table_count;
+       u32 total_failed;       /* total failed frames, any/all rates */
+       u32 total_success;      /* total successful frames, any/all rates */
+       u64 flush_timer;        /* time staying in mode before new search */
+
+       u8 action_counter;      /* # mode-switch actions tried */
+       u8 is_green;
+       u8 is_dup;
+       enum ieee80211_band band;
+
+       /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+       u32 supp_rates;
+       u16 active_legacy_rate;
+       u16 active_siso_rate;
+       u16 active_mimo2_rate;
+       s8 max_rate_idx;     /* Max rate set by user */
+       u8 missed_rate_counter;
+
+       struct iwl_link_quality_cmd lq;
+       struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+       struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+       u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+       struct dentry *rs_sta_dbgfs_scale_table_file;
+       struct dentry *rs_sta_dbgfs_stats_table_file;
+       struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+       struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+       u32 dbg_fixed_rate;
+#endif
+       struct iwl_priv *drv;
+
+       /* used to be in sta_info */
+       int last_txrate_idx;
+       /* last tx rate_n_flags */
+       u32 last_rate_n_flags;
+       /* packets destined for this STA are aggregated */
+       u8 is_agg;
+};
+
+static inline u8 iwl4965_num_of_ant(u8 mask)
+{
+       return  !!((mask) & ANT_A) +
+               !!((mask) & ANT_B) +
+               !!((mask) & ANT_C);
+}
+
+static inline u8 iwl4965_first_antenna(u8 mask)
+{
+       if (mask & ANT_A)
+               return ANT_A;
+       if (mask & ANT_B)
+               return ANT_B;
+       return ANT_C;
+}
+
+
+/**
+ * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
+                            struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+                                struct ieee80211_sta *sta, u8 sta_id);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl4965_rate_control_register(void);
+extern int iwl3945_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl4965_rate_control_unregister(void);
+extern void iwl3945_rate_control_unregister(void);
+
+#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644 (file)
index 0000000..903ef0d
--- /dev/null
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-commands.h"
+#include "iwl-debug.h"
+#include "iwl-power.h"
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct iwl_power_vec_entry {
+       struct iwl_powertable_cmd cmd;
+       u8 no_dtim;     /* number of skip dtim */
+};
+
+static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
+                                   struct iwl_powertable_cmd *cmd)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       if (priv->power_data.pci_pm)
+               cmd->flags |= IWL_POWER_PCI_PM_MSK;
+
+       IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
+}
+
+static int
+iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
+{
+       IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
+       IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
+       IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
+                                       le32_to_cpu(cmd->tx_data_timeout));
+       IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
+                                       le32_to_cpu(cmd->rx_data_timeout));
+       IWL_DEBUG_POWER(priv,
+                       "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+                       le32_to_cpu(cmd->sleep_interval[0]),
+                       le32_to_cpu(cmd->sleep_interval[1]),
+                       le32_to_cpu(cmd->sleep_interval[2]),
+                       le32_to_cpu(cmd->sleep_interval[3]),
+                       le32_to_cpu(cmd->sleep_interval[4]));
+
+       return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
+                               sizeof(struct iwl_powertable_cmd), cmd);
+}
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+                      bool force)
+{
+       int ret;
+       bool update_chains;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /* Don't update the RX chain when chain noise calibration is running */
+       update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+                       priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+       if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+               return 0;
+
+       if (!iwl_legacy_is_ready_rf(priv))
+               return -EIO;
+
+       /* scan complete use sleep_power_next, need to be updated */
+       memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+               IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+               return 0;
+       }
+
+       if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+               set_bit(STATUS_POWER_PMI, &priv->status);
+
+       ret = iwl_legacy_set_power(priv, cmd);
+       if (!ret) {
+               if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+                       clear_bit(STATUS_POWER_PMI, &priv->status);
+
+               if (priv->cfg->ops->lib->update_chain_flags && update_chains)
+                       priv->cfg->ops->lib->update_chain_flags(priv);
+               else if (priv->cfg->ops->lib->update_chain_flags)
+                       IWL_DEBUG_POWER(priv,
+                                       "Cannot update the power, chain noise "
+                                       "calibration running: %d\n",
+                                       priv->chain_noise_data.state);
+
+               memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+       } else
+               IWL_ERR(priv, "set power fail, ret = %d", ret);
+
+       return ret;
+}
+
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
+{
+       struct iwl_powertable_cmd cmd;
+
+       iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
+       return iwl_legacy_power_set_mode(priv, &cmd, force);
+}
+EXPORT_SYMBOL(iwl_legacy_power_update_mode);
+
+/* initialize to default */
+void iwl_legacy_power_initialize(struct iwl_priv *priv)
+{
+       u16 lctl = iwl_legacy_pcie_link_ctl(priv);
+
+       priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+       priv->power_data.debug_sleep_level_override = -1;
+
+       memset(&priv->power_data.sleep_cmd, 0,
+               sizeof(priv->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644 (file)
index 0000000..d30b36a
--- /dev/null
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#ifndef __iwl_legacy_power_setting_h__
+#define __iwl_legacy_power_setting_h__
+
+#include "iwl-commands.h"
+
+enum iwl_power_level {
+       IWL_POWER_INDEX_1,
+       IWL_POWER_INDEX_2,
+       IWL_POWER_INDEX_3,
+       IWL_POWER_INDEX_4,
+       IWL_POWER_INDEX_5,
+       IWL_POWER_NUM
+};
+
+struct iwl_power_mgr {
+       struct iwl_powertable_cmd sleep_cmd;
+       struct iwl_powertable_cmd sleep_cmd_next;
+       int debug_sleep_level_override;
+       bool pci_pm;
+};
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+                      bool force);
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
+void iwl_legacy_power_initialize(struct iwl_priv *priv);
+
+#endif  /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644 (file)
index 0000000..30a4930
--- /dev/null
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef        __iwl_legacy_prph_h__
+#define __iwl_legacy_prph_h__
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via HBUS_TARG_PRPH_* registers.
+ */
+#define PRPH_BASE      (0x00000)
+#define PRPH_END       (0xFFFFF)
+
+/* APMG (power management) constants */
+#define APMG_BASE                      (PRPH_BASE + 0x3000)
+#define APMG_CLK_CTRL_REG              (APMG_BASE + 0x0000)
+#define APMG_CLK_EN_REG                        (APMG_BASE + 0x0004)
+#define APMG_CLK_DIS_REG               (APMG_BASE + 0x0008)
+#define APMG_PS_CTRL_REG               (APMG_BASE + 0x000c)
+#define APMG_PCIDEV_STT_REG            (APMG_BASE + 0x0010)
+#define APMG_RFKILL_REG                        (APMG_BASE + 0x0014)
+#define APMG_RTC_INT_STT_REG           (APMG_BASE + 0x001c)
+#define APMG_RTC_INT_MSK_REG           (APMG_BASE + 0x0020)
+#define APMG_DIGITAL_SVR_REG           (APMG_BASE + 0x0058)
+#define APMG_ANALOG_SVR_REG            (APMG_BASE + 0x006C)
+
+#define APMS_CLK_VAL_MRB_FUNC_MODE     (0x00000001)
+#define APMG_CLK_VAL_DMA_CLK_RQT       (0x00000200)
+#define APMG_CLK_VAL_BSM_CLK_RQT       (0x00000800)
+
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS   (0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ             (0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC               (0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN         (0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX           (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX          (0x02000000)
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK        (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_DIGITAL_VOLTAGE_1_32          (0x00000060)
+
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
+
+/**
+ * BSM (Bootstrap State Machine)
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down when the embedded control
+ * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
+ *
+ * When powering back up after sleeps (or during initial uCode load), the BSM
+ * internally loads the short bootstrap program from the special SRAM into the
+ * embedded processor's instruction SRAM, and starts the processor so it runs
+ * the bootstrap program.
+ *
+ * This bootstrap program loads (via PCI busmaster DMA) instructions and data
+ * images for a uCode program from host DRAM locations.  The host driver
+ * indicates DRAM locations and sizes for instruction and data images via the
+ * four BSM_DRAM_* registers.  Once the bootstrap program loads the new program,
+ * the new program starts automatically.
+ *
+ * The uCode used for open-source drivers includes two programs:
+ *
+ * 1)  Initialization -- performs hardware calibration and sets up some
+ *     internal data, then notifies host via "initialize alive" notification
+ *     (struct iwl_init_alive_resp) that it has completed all of its work.
+ *     After signal from host, it then loads and starts the runtime program.
+ *     The initialization program must be used when initially setting up the
+ *     NIC after loading the driver.
+ *
+ * 2)  Runtime/Protocol -- performs all normal runtime operations.  This
+ *     notifies host via "alive" notification (struct iwl_alive_resp) that it
+ *     is ready to be used.
+ *
+ * When initializing the NIC, the host driver does the following procedure:
+ *
+ * 1)  Load bootstrap program (instructions only, no data image for bootstrap)
+ *     into bootstrap memory.  Use dword writes starting at BSM_SRAM_LOWER_BOUND
+ *
+ * 2)  Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
+ *     images in host DRAM.
+ *
+ * 3)  Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
+ *     BSM_WR_MEM_SRC_REG = 0
+ *     BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
+ *     BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
+ *
+ * 4)  Load bootstrap into instruction SRAM:
+ *     BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
+ *
+ * 5)  Wait for load completion:
+ *     Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
+ *
+ * 6)  Enable future boot loads whenever NIC's power management triggers it:
+ *     BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
+ *
+ * 7)  Start the NIC by removing all reset bits:
+ *     CSR_RESET = 0
+ *
+ *     The bootstrap uCode (already in instruction SRAM) loads initialization
+ *     uCode.  Initialization uCode performs data initialization, sends
+ *     "initialize alive" notification to host, and waits for a signal from
+ *     host to load runtime code.
+ *
+ * 4)  Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
+ *     images in host DRAM.  The last register loaded must be the instruction
+ *     byte count register ("1" in MSbit tells initialization uCode to load
+ *     the runtime uCode):
+ *     BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
+ *
+ * 5)  Wait for "alive" notification, then issue normal runtime commands.
+ *
+ * Data caching during power-downs:
+ *
+ * Just before the embedded controller powers down (e.g for automatic
+ * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
+ * a current snapshot of the embedded processor's data SRAM into host DRAM.
+ * This caches the data while the embedded processor's memory is powered down.
+ * Location and size are controlled by BSM_DRAM_DATA_* registers.
+ *
+ * NOTE:  Instruction SRAM does not need to be saved, since that doesn't
+ *        change during operation; the original image (from uCode distribution
+ *        file) can be used for reload.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  Bootstrap looks
+ * at the BSM_DRAM_* registers, which now point to the runtime instruction
+ * image and the cached (modified) runtime data (*not* the initialization
+ * uCode).  Bootstrap reloads these runtime images into SRAM, and restarts the
+ * uCode from where it left off before the power-down.
+ *
+ * NOTE:  Initialization uCode does *not* run as part of the save/restore
+ *        procedure.
+ *
+ * This save/restore method is mostly for autonomous power management during
+ * normal operation (result of POWER_TABLE_CMD).  Platform suspend/resume and
+ * RFKILL should use complete restarts (with total re-initialization) of uCode,
+ * allowing total shutdown (including BSM memory).
+ *
+ * Note that, during normal operation, the host DRAM that held the initial
+ * startup data for the runtime code is now being used as a backup data cache
+ * for modified data!  If you need to completely re-initialize the NIC, make
+ * sure that you use the runtime data image from the uCode distribution file,
+ * not the modified/saved runtime data.  You may want to store a separate
+ * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
+ */
+
+/* BSM bit fields */
+#define BSM_WR_CTRL_REG_BIT_START     (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000) /* enable boot after pwrup*/
+#define BSM_DRAM_INST_LOAD            (0x80000000) /* start program load now */
+
+/* BSM addresses */
+#define BSM_BASE                     (PRPH_BASE + 0x3400)
+#define BSM_END                      (PRPH_BASE + 0x3800)
+
+#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010) /* bit 0:  1 == done */
+
+/*
+ * Pointers and size regs for bootstrap load and data SRAM save/restore.
+ * NOTE:  3945 pointers use bits 31:0 of DRAM address.
+ *        4965 pointers use bits 35:4 of DRAM address.
+ */
+#define BSM_DRAM_INST_PTR_REG        (BSM_BASE + 0x090)
+#define BSM_DRAM_INST_BYTECOUNT_REG  (BSM_BASE + 0x094)
+#define BSM_DRAM_DATA_PTR_REG        (BSM_BASE + 0x098)
+#define BSM_DRAM_DATA_BYTECOUNT_REG  (BSM_BASE + 0x09C)
+
+/*
+ * BSM special memory, stays powered on during power-save sleeps.
+ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
+ */
+#define BSM_SRAM_LOWER_BOUND         (PRPH_BASE + 0x3800)
+#define BSM_SRAM_SIZE                  (1024) /* bytes */
+
+
+/* 3945 Tx scheduler registers */
+#define ALM_SCD_BASE                        (PRPH_BASE + 0x2E00)
+#define ALM_SCD_MODE_REG                    (ALM_SCD_BASE + 0x000)
+#define ALM_SCD_ARASTAT_REG                 (ALM_SCD_BASE + 0x004)
+#define ALM_SCD_TXFACT_REG                  (ALM_SCD_BASE + 0x010)
+#define ALM_SCD_TXF4MF_REG                  (ALM_SCD_BASE + 0x014)
+#define ALM_SCD_TXF5MF_REG                  (ALM_SCD_BASE + 0x020)
+#define ALM_SCD_SBYP_MODE_1_REG             (ALM_SCD_BASE + 0x02C)
+#define ALM_SCD_SBYP_MODE_2_REG             (ALM_SCD_BASE + 0x030)
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM.  It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device.  A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes.  For 4965, they are used as follows
+ * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- Commands (e.g. RXON, etc.)
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
+ * 7 -- not used by driver (device-internal only)
+ *
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1)  Scheduler-Ack, in which the scheduler automatically supports a
+ *     block-ack (BA) window of up to 64 TFDs.  In this mode, each queue
+ *     contains TFDs for a unique combination of Recipient Address (RA)
+ *     and Traffic Identifier (TID), that is, traffic of a given
+ *     Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ *     In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ *     each frame within the BA window, including whether it's been transmitted,
+ *     and whether it's been acknowledged by the receiving station.  The device
+ *     automatically processes block-acks received from the receiving STA,
+ *     and reschedules un-acked frames to be retransmitted (successful
+ *     Tx completion may end up being out-of-order).
+ *
+ *     The driver must maintain the queue's Byte Count table in host DRAM
+ *     (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ *     This mode does not support fragmentation.
+ *
+ * 2)  FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ *     The device may automatically retry Tx, but will retry only one frame
+ *     at a time, until receiving ACK from receiving station, or reaching
+ *     retry limit and giving up.
+ *
+ *     The command queue (#4/#9) must use this mode!
+ *     This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1)  Scheduler registers
+ * 2)  Shared scheduler data base in internal 4956 SRAM
+ * 3)  Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1)  16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2)  16 Byte Count circular buffers in 16 KBytes contiguous memory
+ *     (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
+ * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ */
+#define SCD_WIN_SIZE                           64
+#define SCD_FRAME_LIMIT                                64
+
+/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
+#define IWL49_SCD_START_OFFSET         0xa02c00
+
+/*
+ * 4965 tells driver SRAM address for internal scheduler structs via this reg.
+ * Value is valid only after "Alive" response from uCode.
+ */
+#define IWL49_SCD_SRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x0)
+
+/*
+ * Driver may need to update queue-empty bits after changing queue's
+ * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * scheduler is not tracking what's happening).
+ * Bit fields:
+ * 31-16:  Write mask -- 1: update empty bit, 0: don't change empty bit
+ * 15-00:  Empty state, one for each queue -- 1: empty, 0: non-empty
+ * NOTE:  This register is not used by Linux driver.
+ */
+#define IWL49_SCD_EMPTY_BITS               (IWL49_SCD_START_OFFSET + 0x4)
+
+/*
+ * Physical base address of array of byte count (BC) circular buffers (CBs).
+ * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
+ * This register points to BC CB for queue 0, must be on 1024-byte boundary.
+ * Others are spaced by 1024 bytes.
+ * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
+ * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * Bit fields:
+ * 25-00:  Byte Count CB physical address [35:10], must be 1024-byte aligned.
+ */
+#define IWL49_SCD_DRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x10)
+
+/*
+ * Enables any/all Tx DMA/FIFO channels.
+ * Scheduler generates requests for only the active channels.
+ * Set this to 0xff to enable all 8 channels (normal usage).
+ * Bit fields:
+ *  7- 0:  Enable (1), disable (0), one bit for each channel 0-7
+ */
+#define IWL49_SCD_TXFACT                   (IWL49_SCD_START_OFFSET + 0x1c)
+/*
+ * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Initialized and updated by driver as new TFDs are added to queue.
+ * NOTE:  If using Block Ack, index must correspond to frame's
+ *        Start Sequence Number; index = (SSN & 0xff)
+ * NOTE:  Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
+ */
+#define IWL49_SCD_QUEUE_WRPTR(x)  (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+
+/*
+ * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
+ * For FIFO mode, index indicates next frame to transmit.
+ * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Initialized by driver, updated by scheduler.
+ */
+#define IWL49_SCD_QUEUE_RDPTR(x)  (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+
+/*
+ * Select which queues work in chain mode (1) vs. not (0).
+ * Use chain mode to build chains of aggregated frames.
+ * Bit fields:
+ * 31-16:  Reserved
+ * 15-00:  Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
+ * NOTE:  If driver sets up queue for chain mode, it should be also set up
+ *        Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
+ */
+#define IWL49_SCD_QUEUECHAIN_SEL  (IWL49_SCD_START_OFFSET + 0xd0)
+
+/*
+ * Select which queues interrupt driver when scheduler increments
+ * a queue's read pointer (index).
+ * Bit fields:
+ * 31-16:  Reserved
+ * 15-00:  Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
+ * NOTE:  This functionality is apparently a no-op; driver relies on interrupts
+ *        from Rx queue to read Tx command responses and update Tx queues.
+ */
+#define IWL49_SCD_INTERRUPT_MASK  (IWL49_SCD_START_OFFSET + 0xe4)
+
+/*
+ * Queue search status registers.  One for each queue.
+ * Sets up queue mode and assigns queue to Tx DMA channel.
+ * Bit fields:
+ * 19-10: Write mask/enable bits for bits 0-9
+ *     9: Driver should init to "0"
+ *     8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
+ *        Driver should init to "1" for aggregation mode, or "0" otherwise.
+ *   7-6: Driver should init to "0"
+ *     5: Window Size Left; indicates whether scheduler can request
+ *        another TFD, based on window size, etc.  Driver should init
+ *        this bit to "1" for aggregation mode, or "0" for non-agg.
+ *   4-1: Tx FIFO to use (range 0-7).
+ *     0: Queue is active (1), not active (0).
+ * Other bits should be written as "0"
+ *
+ * NOTE:  If enabling Scheduler-ACK mode, chain mode should also be enabled
+ *        via SCD_QUEUECHAIN_SEL.
+ */
+#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
+       (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+
+/* Bit field positions */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE    (0)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF       (1)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL       (5)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK   (8)
+
+/* Write masks */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN        (10)
+#define IWL49_SCD_QUEUE_STTS_REG_MSK           (0x0007FC00)
+
+/**
+ * 4965 internal SRAM structures for scheduler, shared with driver ...
+ *
+ * Driver should clear and initialize the following areas after receiving
+ * "Alive" response from 4965 uCode, i.e. after initial
+ * uCode load, or after a uCode load done for error recovery:
+ *
+ * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
+ * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
+ * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
+ *
+ * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
+ * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
+ * All OFFSET values must be added to this base address.
+ */
+
+/*
+ * Queue context.  One 8-byte entry for each of 16 queues.
+ *
+ * Driver should clear this entire area (size 0x80) to 0 after receiving
+ * "Alive" notification from uCode.  Additionally, driver should init
+ * each queue's entry as follows:
+ *
+ * LS Dword bit fields:
+ *  0-06:  Max Tx window size for Scheduler-ACK.  Driver should init to 64.
+ *
+ * MS Dword bit fields:
+ * 16-22:  Frame limit.  Driver should init to 10 (0xa).
+ *
+ * Driver should init all other bits to 0.
+ *
+ * Init must be done after driver receives "Alive" response from 4965 uCode,
+ * and when setting up queue for aggregation.
+ */
+#define IWL49_SCD_CONTEXT_DATA_OFFSET                  0x380
+#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+                       (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS          (0)
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK          (0x0000007F)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS       (16)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK       (0x007F0000)
+
+/*
+ * Tx Status Bitmap
+ *
+ * Driver should clear this entire area (size 0x100) to 0 after receiving
+ * "Alive" notification from uCode.  Area is used only by device itself;
+ * no other support (besides clearing) is required from driver.
+ */
+#define IWL49_SCD_TX_STTS_BITMAP_OFFSET                0x400
+
+/*
+ * RAxTID to queue translation mapping.
+ *
+ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
+ * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
+ * one QOS priority level destined for one station (for this wireless link,
+ * not final destination).  The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * mappings, one for each of the 16 queues.  If queue is not in Scheduler-ACK
+ * mode, the device ignores the mapping value.
+ *
+ * Bit fields, for each 16-bit map:
+ * 15-9:  Reserved, set to 0
+ *  8-4:  Index into device's station table for recipient station
+ *  3-0:  Traffic ID (tid), range 0-15
+ *
+ * Driver should clear this entire area (size 32 bytes) to 0 after receiving
+ * "Alive" notification from uCode.  To update a 16-bit map value, driver
+ * must read a dword-aligned value from device SRAM, replace the 16-bit map
+ * value of interest, and write the dword value back into device SRAM.
+ */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET         0x500
+
+/* Find translation table dword to read/write for given queue */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+       ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+
+#define IWL_SCD_TXFIFO_POS_TID                 (0)
+#define IWL_SCD_TXFIFO_POS_RA                  (4)
+#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK     (0x01FF)
+
+/*********************** END TX SCHEDULER *************************************/
+
+#endif                         /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644 (file)
index 0000000..654cf23
--- /dev/null
@@ -0,0 +1,302 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_legacy_rx_queue_alloc()   Allocates rx_free
+ * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls iwl_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
+ */
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
+{
+       int s = q->read - q->write;
+       if (s <= 0)
+               s += RX_QUEUE_SIZE;
+       /* keep some buffer to not confuse full and empty queue */
+       s -= 2;
+       if (s < 0)
+               s = 0;
+       return s;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
+
+/**
+ * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+                                       struct iwl_rx_queue *q)
+{
+       unsigned long flags;
+       u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
+       u32 reg;
+
+       spin_lock_irqsave(&q->lock, flags);
+
+       if (q->need_update == 0)
+               goto exit_unlock;
+
+       /* If power-saving is in use, make sure device is awake */
+       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(priv,
+                               "Rx queue requesting wakeup,"
+                               " GP1 = 0x%x\n", reg);
+                       iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       goto exit_unlock;
+               }
+
+               q->write_actual = (q->write & ~0x7);
+               iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+                               q->write_actual);
+
+       /* Else device is assumed to be awake */
+       } else {
+               /* Device expects a multiple of 8 */
+               q->write_actual = (q->write & ~0x7);
+               iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+                       q->write_actual);
+       }
+
+       q->need_update = 0;
+
+ exit_unlock:
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
+
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       spin_lock_init(&rxq->lock);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+
+       /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+       rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+                                    GFP_KERNEL);
+       if (!rxq->bd)
+               goto err_bd;
+
+       rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+                                         &rxq->rb_stts_dma, GFP_KERNEL);
+       if (!rxq->rb_stts)
+               goto err_rb;
+
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       rxq->need_update = 0;
+       return 0;
+
+err_rb:
+       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+err_bd:
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
+
+
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+       if (!report->state) {
+               IWL_DEBUG_11H(priv,
+                       "Spectrum Measure Notification: Start\n");
+               return;
+       }
+
+       memcpy(&priv->measure_report, report, sizeof(*report));
+       priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
+
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+                               struct iwl_rx_packet *pkt)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+       if (iwl_legacy_is_any_associated(priv)) {
+               if (priv->cfg->ops->lib->check_plcp_health) {
+                       if (!priv->cfg->ops->lib->check_plcp_health(
+                           priv, pkt)) {
+                               /*
+                                * high plcp error detected
+                                * reset Radio
+                                */
+                               iwl_legacy_force_reset(priv,
+                                                       IWL_RF_RESET, false);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+                          struct ieee80211_hdr *hdr,
+                          u32 decrypt_res,
+                          struct ieee80211_rx_status *stats)
+{
+       u16 fc = le16_to_cpu(hdr->frame_control);
+
+       /*
+        * All contexts have the same setting here due to it being
+        * a module parameter, so OK to check any context.
+        */
+       if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
+                                               RXON_FILTER_DIS_DECRYPT_MSK)
+               return 0;
+
+       if (!(fc & IEEE80211_FCTL_PROTECTED))
+               return 0;
+
+       IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
+       switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               /* The uCode has got a bad phase 1 Key, pushes the packet.
+                * Decryption will be done in SW. */
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_BAD_KEY_TTAK)
+                       break;
+
+       case RX_RES_STATUS_SEC_TYPE_WEP:
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_BAD_ICV_MIC) {
+                       /* bad ICV, the packet is destroyed since the
+                        * decryption is inplace, drop it */
+                       IWL_DEBUG_RX(priv, "Packet destroyed\n");
+                       return -1;
+               }
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_DECRYPT_OK) {
+                       IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
+                       stats->flag |= RX_FLAG_DECRYPTED;
+               }
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644 (file)
index 0000000..60f597f
--- /dev/null
@@ -0,0 +1,625 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req.  This should be set long enough to hear probe responses
+ * from more than one AP.  */
+#define IWL_ACTIVE_DWELL_TIME_24    (30)       /* all times in msec */
+#define IWL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IWL_PASSIVE_DWELL_TIME_24   (20)       /* all times in msec */
+#define IWL_PASSIVE_DWELL_TIME_52   (10)
+#define IWL_PASSIVE_DWELL_BASE      (100)
+#define IWL_CHANNEL_TUNE_TIME       5
+
+static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
+{
+       int ret;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_SCAN_ABORT_CMD,
+               .flags = CMD_WANT_SKB,
+       };
+
+       /* Exit instantly with error when device is not ready
+        * to receive scan abort command or it does not perform
+        * hardware scan currently */
+       if (!test_bit(STATUS_READY, &priv->status) ||
+           !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
+           !test_bit(STATUS_SCAN_HW, &priv->status) ||
+           test_bit(STATUS_FW_ERROR, &priv->status) ||
+           test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return -EIO;
+
+       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (ret)
+               return ret;
+
+       pkt = (struct iwl_rx_packet *)cmd.reply_page;
+       if (pkt->u.status != CAN_ABORT_STATUS) {
+               /* The scan abort will return 1 for success or
+                * 2 for "failure".  A failure condition can be
+                * due to simply not being in an active scan which
+                * can occur if we send the scan abort before we
+                * the microcode has notified us that a scan is
+                * completed. */
+               IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
+               ret = -EIO;
+       }
+
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+       return ret;
+}
+
+static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
+{
+       /* check if scan was requested from mac80211 */
+       if (priv->scan_request) {
+               IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
+               ieee80211_scan_completed(priv->hw, aborted);
+       }
+
+       priv->is_internal_short_scan = false;
+       priv->scan_vif = NULL;
+       priv->scan_request = NULL;
+}
+
+void iwl_legacy_force_scan_end(struct iwl_priv *priv)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       if (!test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
+               return;
+       }
+
+       IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
+       clear_bit(STATUS_SCANNING, &priv->status);
+       clear_bit(STATUS_SCAN_HW, &priv->status);
+       clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+       iwl_legacy_complete_scan(priv, true);
+}
+
+static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
+               return;
+       }
+
+       if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
+               return;
+       }
+
+       ret = iwl_legacy_send_scan_abort(priv);
+       if (ret) {
+               IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
+               iwl_legacy_force_scan_end(priv);
+       } else
+               IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
+}
+
+/**
+ * iwl_scan_cancel - Cancel any currently executing HW scan
+ */
+int iwl_legacy_scan_cancel(struct iwl_priv *priv)
+{
+       IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
+       queue_work(priv->workqueue, &priv->abort_scan);
+       return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel);
+
+/**
+ * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+       lockdep_assert_held(&priv->mutex);
+
+       IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
+
+       iwl_legacy_do_scan_abort(priv);
+
+       while (time_before_eq(jiffies, timeout)) {
+               if (!test_bit(STATUS_SCAN_HW, &priv->status))
+                       break;
+               msleep(20);
+       }
+
+       return test_bit(STATUS_SCAN_HW, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
+
+/* Service response to REPLY_SCAN_CMD (0x80) */
+static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scanreq_notification *notif =
+           (struct iwl_scanreq_notification *)pkt->u.raw;
+
+       IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service SCAN_START_NOTIFICATION (0x82) */
+static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scanstart_notification *notif =
+           (struct iwl_scanstart_notification *)pkt->u.raw;
+       priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+       IWL_DEBUG_SCAN(priv, "Scan start: "
+                      "%d [802.11%s] "
+                      "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
+                      notif->channel,
+                      notif->band ? "bg" : "a",
+                      le32_to_cpu(notif->tsf_high),
+                      le32_to_cpu(notif->tsf_low),
+                      notif->status, notif->beacon_timer);
+}
+
+/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
+static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scanresults_notification *notif =
+           (struct iwl_scanresults_notification *)pkt->u.raw;
+
+       IWL_DEBUG_SCAN(priv, "Scan ch.res: "
+                      "%d [802.11%s] "
+                      "(TSF: 0x%08X:%08X) - %d "
+                      "elapsed=%lu usec\n",
+                      notif->channel,
+                      notif->band ? "bg" : "a",
+                      le32_to_cpu(notif->tsf_high),
+                      le32_to_cpu(notif->tsf_low),
+                      le32_to_cpu(notif->statistics[0]),
+                      le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
+#endif
+}
+
+/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
+static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_mem_buffer *rxb)
+{
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+       IWL_DEBUG_SCAN(priv,
+                       "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+                      scan_notif->scanned_channels,
+                      scan_notif->tsf_low,
+                      scan_notif->tsf_high, scan_notif->status);
+
+       /* The HW is no longer scanning */
+       clear_bit(STATUS_SCAN_HW, &priv->status);
+
+       IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
+                      (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+                      jiffies_to_msecs(jiffies - priv->scan_start));
+
+       queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
+{
+       /* scan handlers */
+       priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
+       priv->rx_handlers[SCAN_START_NOTIFICATION] =
+                                       iwl_legacy_rx_scan_start_notif;
+       priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
+                                       iwl_legacy_rx_scan_results_notif;
+       priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
+                                       iwl_legacy_rx_scan_complete_notif;
+}
+EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
+
+inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+                                    enum ieee80211_band band,
+                                    u8 n_probes)
+{
+       if (band == IEEE80211_BAND_5GHZ)
+               return IWL_ACTIVE_DWELL_TIME_52 +
+                       IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+       else
+               return IWL_ACTIVE_DWELL_TIME_24 +
+                       IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
+
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+                              enum ieee80211_band band,
+                              struct ieee80211_vif *vif)
+{
+       struct iwl_rxon_context *ctx;
+       u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+           IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
+           IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
+
+       if (iwl_legacy_is_any_associated(priv)) {
+               /*
+                * If we're associated, we clamp the maximum passive
+                * dwell time to be 98% of the smallest beacon interval
+                * (minus 2 * channel tune time)
+                */
+               for_each_context(priv, ctx) {
+                       u16 value;
+
+                       if (!iwl_legacy_is_associated_ctx(ctx))
+                               continue;
+                       value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+                       if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
+                               value = IWL_PASSIVE_DWELL_BASE;
+                       value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+                       passive = min(value, passive);
+               }
+       }
+
+       return passive;
+}
+EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
+
+void iwl_legacy_init_scan_params(struct iwl_priv *priv)
+{
+       u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+       if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
+               priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+       if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
+               priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(iwl_legacy_init_scan_params);
+
+static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
+                                         struct ieee80211_vif *vif,
+                                         bool internal,
+                                         enum ieee80211_band band)
+{
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+               return -EOPNOTSUPP;
+
+       cancel_delayed_work(&priv->scan_check);
+
+       if (!iwl_legacy_is_ready_rf(priv)) {
+               IWL_WARN(priv, "Request scan called when driver not ready.\n");
+               return -EIO;
+       }
+
+       if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+               IWL_DEBUG_SCAN(priv,
+                       "Multiple concurrent scan requests in parallel.\n");
+               return -EBUSY;
+       }
+
+       if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
+               return -EBUSY;
+       }
+
+       IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
+                       internal ? "internal short " : "");
+
+       set_bit(STATUS_SCANNING, &priv->status);
+       priv->is_internal_short_scan = internal;
+       priv->scan_start = jiffies;
+       priv->scan_band = band;
+
+       ret = priv->cfg->ops->utils->request_scan(priv, vif);
+       if (ret) {
+               clear_bit(STATUS_SCANNING, &priv->status);
+               priv->is_internal_short_scan = false;
+               return ret;
+       }
+
+       queue_delayed_work(priv->workqueue, &priv->scan_check,
+                          IWL_SCAN_CHECK_WATCHDOG);
+
+       return 0;
+}
+
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif,
+                   struct cfg80211_scan_request *req)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (req->n_channels == 0)
+               return -EINVAL;
+
+       mutex_lock(&priv->mutex);
+
+       if (test_bit(STATUS_SCANNING, &priv->status) &&
+           !priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+
+       /* mac80211 will only ask for one band at a time */
+       priv->scan_request = req;
+       priv->scan_vif = vif;
+
+       /*
+        * If an internal scan is in progress, just set
+        * up the scan_request as per above.
+        */
+       if (priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+               ret = 0;
+       } else
+               ret = iwl_legacy_scan_initiate(priv, vif, false,
+                                       req->channels[0]->band);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+out_unlock:
+       mutex_unlock(&priv->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
+
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
+{
+       queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
+{
+       struct iwl_priv *priv =
+               container_of(work, struct iwl_priv, start_internal_scan);
+
+       IWL_DEBUG_SCAN(priv, "Start internal scan\n");
+
+       mutex_lock(&priv->mutex);
+
+       if (priv->is_internal_short_scan == true) {
+               IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+               goto unlock;
+       }
+
+       if (test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+               goto unlock;
+       }
+
+       if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
+               IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
+ unlock:
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_check(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, scan_check.work);
+
+       IWL_DEBUG_SCAN(priv, "Scan check work\n");
+
+       /* Since we are here firmware does not finish scan and
+        * most likely is in bad shape, so we don't bother to
+        * send abort command, just force scan complete to mac80211 */
+       mutex_lock(&priv->mutex);
+       iwl_legacy_force_scan_end(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+/**
+ * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+                      const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+       int len = 0;
+       u8 *pos = NULL;
+
+       /* Make sure there is enough space for the probe request,
+        * two mandatory IEs and the data */
+       left -= 24;
+       if (left < 0)
+               return 0;
+
+       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+       memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
+       memcpy(frame->sa, ta, ETH_ALEN);
+       memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
+       frame->seq_ctrl = 0;
+
+       len += 24;
+
+       /* ...next IE... */
+       pos = &frame->u.probe_req.variable[0];
+
+       /* fill in our indirect SSID IE */
+       left -= 2;
+       if (left < 0)
+               return 0;
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = 0;
+
+       len += 2;
+
+       if (WARN_ON(left < ie_len))
+               return len;
+
+       if (ies && ie_len) {
+               memcpy(pos, ies, ie_len);
+               len += ie_len;
+       }
+
+       return (u16)len;
+}
+EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
+
+static void iwl_legacy_bg_abort_scan(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
+
+       IWL_DEBUG_SCAN(priv, "Abort scan work\n");
+
+       /* We keep scan_check work queued in case when firmware will not
+        * report back scan completed notification */
+       mutex_lock(&priv->mutex);
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_completed(struct work_struct *work)
+{
+       struct iwl_priv *priv =
+           container_of(work, struct iwl_priv, scan_completed);
+       bool aborted;
+
+       IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
+                      priv->is_internal_short_scan ? "internal short " : "");
+
+       cancel_delayed_work(&priv->scan_check);
+
+       mutex_lock(&priv->mutex);
+
+       aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+       if (aborted)
+               IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+       if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+               goto out_settings;
+       }
+
+       if (priv->is_internal_short_scan && !aborted) {
+               int err;
+
+               /* Check if mac80211 requested scan during our internal scan */
+               if (priv->scan_request == NULL)
+                       goto out_complete;
+
+               /* If so request a new scan */
+               err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
+                                       priv->scan_request->channels[0]->band);
+               if (err) {
+                       IWL_DEBUG_SCAN(priv,
+                               "failed to initiate pending scan: %d\n", err);
+                       aborted = true;
+                       goto out_complete;
+               }
+
+               goto out;
+       }
+
+out_complete:
+       iwl_legacy_complete_scan(priv, aborted);
+
+out_settings:
+       /* Can we still talk to firmware ? */
+       if (!iwl_legacy_is_ready_rf(priv))
+               goto out;
+
+       /*
+        * We do not commit power settings while scan is pending,
+        * do it now if the settings changed.
+        */
+       iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
+                                                               false);
+       iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+
+       priv->cfg->ops->utils->post_scan(priv);
+
+out:
+       mutex_unlock(&priv->mutex);
+}
+
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
+{
+       INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
+       INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
+       INIT_WORK(&priv->start_internal_scan,
+                               iwl_legacy_bg_start_internal_scan);
+       INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
+
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
+{
+       cancel_work_sync(&priv->start_internal_scan);
+       cancel_work_sync(&priv->abort_scan);
+       cancel_work_sync(&priv->scan_completed);
+
+       if (cancel_delayed_work_sync(&priv->scan_check)) {
+               mutex_lock(&priv->mutex);
+               iwl_legacy_force_scan_end(priv);
+               mutex_unlock(&priv->mutex);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644 (file)
index 0000000..9f70a47
--- /dev/null
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_spectrum_h__
+#define __iwl_legacy_spectrum_h__
+enum {                         /* ieee80211_basic_report.map */
+       IEEE80211_BASIC_MAP_BSS = (1 << 0),
+       IEEE80211_BASIC_MAP_OFDM = (1 << 1),
+       IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
+       IEEE80211_BASIC_MAP_RADAR = (1 << 3),
+       IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
+       /* Bits 5-7 are reserved */
+
+};
+struct ieee80211_basic_report {
+       u8 channel;
+       __le64 start_time;
+       __le16 duration;
+       u8 map;
+} __packed;
+
+enum {                         /* ieee80211_measurement_request.mode */
+       /* Bit 0 is reserved */
+       IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
+       IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
+       IEEE80211_MEASUREMENT_REPORT = (1 << 3),
+       /* Bits 4-7 are reserved */
+};
+
+enum {
+       IEEE80211_REPORT_BASIC = 0,     /* required */
+       IEEE80211_REPORT_CCA = 1,       /* optional */
+       IEEE80211_REPORT_RPI = 2,       /* optional */
+       /* 3-255 reserved */
+};
+
+struct ieee80211_measurement_params {
+       u8 channel;
+       __le64 start_time;
+       __le16 duration;
+} __packed;
+
+struct ieee80211_info_element {
+       u8 id;
+       u8 len;
+       u8 data[0];
+} __packed;
+
+struct ieee80211_measurement_request {
+       struct ieee80211_info_element ie;
+       u8 token;
+       u8 mode;
+       u8 type;
+       struct ieee80211_measurement_params params[0];
+} __packed;
+
+struct ieee80211_measurement_report {
+       struct ieee80211_info_element ie;
+       u8 token;
+       u8 mode;
+       u8 type;
+       union {
+               struct ieee80211_basic_report basic[0];
+       } u;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644 (file)
index 0000000..47c9da3
--- /dev/null
@@ -0,0 +1,816 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/lockdep.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+
+/* priv->sta_lock must be held */
+static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+
+       if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+               IWL_ERR(priv,
+                       "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+                       sta_id, priv->stations[sta_id].sta.sta.addr);
+
+       if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+               IWL_DEBUG_ASSOC(priv,
+                       "STA id %u addr %pM already present"
+                       " in uCode (according to driver)\n",
+                       sta_id, priv->stations[sta_id].sta.sta.addr);
+       } else {
+               priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+               IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+                               sta_id, priv->stations[sta_id].sta.sta.addr);
+       }
+}
+
+static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
+                                   struct iwl_legacy_addsta_cmd *addsta,
+                                   struct iwl_rx_packet *pkt,
+                                   bool sync)
+{
+       u8 sta_id = addsta->sta.sta_id;
+       unsigned long flags;
+       int ret = -EIO;
+
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+                       pkt->hdr.flags);
+               return ret;
+       }
+
+       IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+                      sta_id);
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       switch (pkt->u.add_sta.status) {
+       case ADD_STA_SUCCESS_MSK:
+               IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+               iwl_legacy_sta_ucode_activate(priv, sta_id);
+               ret = 0;
+               break;
+       case ADD_STA_NO_ROOM_IN_TABLE:
+               IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+                       sta_id);
+               break;
+       case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+               IWL_ERR(priv,
+                       "Adding station %d failed, no block ack resource.\n",
+                       sta_id);
+               break;
+       case ADD_STA_MODIFY_NON_EXIST_STA:
+               IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+                       sta_id);
+               break;
+       default:
+               IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+                               pkt->u.add_sta.status);
+               break;
+       }
+
+       IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+                      priv->stations[sta_id].sta.mode ==
+                      STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
+                      sta_id, priv->stations[sta_id].sta.sta.addr);
+
+       /*
+        * XXX: The MAC address in the command buffer is often changed from
+        * the original sent to the device. That is, the MAC address
+        * written to the command buffer often is not the same MAC adress
+        * read from the command buffer when the command returns. This
+        * issue has not yet been resolved and this debugging is left to
+        * observe the problem.
+        */
+       IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+                      priv->stations[sta_id].sta.mode ==
+                      STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+                      addsta->sta.addr);
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return ret;
+}
+
+static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
+                                struct iwl_device_cmd *cmd,
+                                struct iwl_rx_packet *pkt)
+{
+       struct iwl_legacy_addsta_cmd *addsta =
+               (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
+
+       iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
+
+}
+
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+                    struct iwl_legacy_addsta_cmd *sta, u8 flags)
+{
+       struct iwl_rx_packet *pkt = NULL;
+       int ret = 0;
+       u8 data[sizeof(*sta)];
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_ADD_STA,
+               .flags = flags,
+               .data = data,
+       };
+       u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+       IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+                      sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
+
+       if (flags & CMD_ASYNC)
+               cmd.callback = iwl_legacy_add_sta_callback;
+       else {
+               cmd.flags |= CMD_WANT_SKB;
+               might_sleep();
+       }
+
+       cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
+       ret = iwl_legacy_send_cmd(priv, &cmd);
+
+       if (ret || (flags & CMD_ASYNC))
+               return ret;
+
+       if (ret == 0) {
+               pkt = (struct iwl_rx_packet *)cmd.reply_page;
+               ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
+       }
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_add_sta);
+
+static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
+                                  struct ieee80211_sta *sta,
+                                  struct iwl_rxon_context *ctx)
+{
+       struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+       __le32 sta_flags;
+       u8 mimo_ps_mode;
+
+       if (!sta || !sta_ht_inf->ht_supported)
+               goto done;
+
+       mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+       IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+                       (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+                       "static" :
+                       (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+                       "dynamic" : "disabled");
+
+       sta_flags = priv->stations[index].sta.station_flags;
+
+       sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+       switch (mimo_ps_mode) {
+       case WLAN_HT_CAP_SM_PS_STATIC:
+               sta_flags |= STA_FLG_MIMO_DIS_MSK;
+               break;
+       case WLAN_HT_CAP_SM_PS_DYNAMIC:
+               sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+               break;
+       case WLAN_HT_CAP_SM_PS_DISABLED:
+               break;
+       default:
+               IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+               break;
+       }
+
+       sta_flags |= cpu_to_le32(
+             (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+       sta_flags |= cpu_to_le32(
+             (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+               sta_flags |= STA_FLG_HT40_EN_MSK;
+       else
+               sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+       priv->stations[index].sta.station_flags = sta_flags;
+ done:
+       return;
+}
+
+/**
+ * iwl_legacy_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                   const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+       struct iwl_station_entry *station;
+       int i;
+       u8 sta_id = IWL_INVALID_STATION;
+       u16 rate;
+
+       if (is_ap)
+               sta_id = ctx->ap_sta_id;
+       else if (is_broadcast_ether_addr(addr))
+               sta_id = ctx->bcast_sta_id;
+       else
+               for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
+                       if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+                                               addr)) {
+                               sta_id = i;
+                               break;
+                       }
+
+                       if (!priv->stations[i].used &&
+                           sta_id == IWL_INVALID_STATION)
+                               sta_id = i;
+               }
+
+       /*
+        * These two conditions have the same outcome, but keep them
+        * separate
+        */
+       if (unlikely(sta_id == IWL_INVALID_STATION))
+               return sta_id;
+
+       /*
+        * uCode is not able to deal with multiple requests to add a
+        * station. Keep track if one is in progress so that we do not send
+        * another.
+        */
+       if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+               IWL_DEBUG_INFO(priv,
+                               "STA %d already in process of being added.\n",
+                               sta_id);
+               return sta_id;
+       }
+
+       if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+           (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+           !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+               IWL_DEBUG_ASSOC(priv,
+                               "STA %d (%pM) already added, not adding again.\n",
+                               sta_id, addr);
+               return sta_id;
+       }
+
+       station = &priv->stations[sta_id];
+       station->used = IWL_STA_DRIVER_ACTIVE;
+       IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+                       sta_id, addr);
+       priv->num_stations++;
+
+       /* Set up the REPLY_ADD_STA command to send to device */
+       memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
+       memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+       station->sta.mode = 0;
+       station->sta.sta.sta_id = sta_id;
+       station->sta.station_flags = ctx->station_flags;
+       station->ctxid = ctx->ctxid;
+
+       if (sta) {
+               struct iwl_station_priv_common *sta_priv;
+
+               sta_priv = (void *)sta->drv_priv;
+               sta_priv->ctx = ctx;
+       }
+
+       /*
+        * OK to call unconditionally, since local stations (IBSS BSSID
+        * STA and broadcast STA) pass in a NULL sta, and mac80211
+        * doesn't allow HT IBSS.
+        */
+       iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
+
+       /* 3945 only */
+       rate = (priv->band == IEEE80211_BAND_5GHZ) ?
+               IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+       /* Turn on both antennas for the station... */
+       station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+       return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_legacy_add_station_common -
+ */
+int
+iwl_legacy_add_station_common(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                          const u8 *addr, bool is_ap,
+                          struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+       unsigned long flags_spin;
+       int ret = 0;
+       u8 sta_id;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       *sta_id_r = 0;
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+                       addr);
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EINVAL;
+       }
+
+       /*
+        * uCode is not able to deal with multiple requests to add a
+        * station. Keep track if one is in progress so that we do not send
+        * another.
+        */
+       if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+               IWL_DEBUG_INFO(priv,
+                       "STA %d already in process of being added.\n",
+                      sta_id);
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EEXIST;
+       }
+
+       if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+           (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+               IWL_DEBUG_ASSOC(priv,
+                       "STA %d (%pM) already added, not adding again.\n",
+                       sta_id, addr);
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EEXIST;
+       }
+
+       priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+                               sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       /* Add station to device's station table */
+       ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       if (ret) {
+               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+               IWL_ERR(priv, "Adding station %pM failed.\n",
+                       priv->stations[sta_id].sta.sta.addr);
+               priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+               priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       }
+       *sta_id_r = sta_id;
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_add_station_common);
+
+/**
+ * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+       /* Ucode must be active and driver must be non active */
+       if ((priv->stations[sta_id].used &
+            (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+                                               IWL_STA_UCODE_ACTIVE)
+               IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+       priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+       memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+       IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
+                                  const u8 *addr, int sta_id,
+                                  bool temporary)
+{
+       struct iwl_rx_packet *pkt;
+       int ret;
+
+       unsigned long flags_spin;
+       struct iwl_rem_sta_cmd rm_sta_cmd;
+
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_REMOVE_STA,
+               .len = sizeof(struct iwl_rem_sta_cmd),
+               .flags = CMD_SYNC,
+               .data = &rm_sta_cmd,
+       };
+
+       memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+       rm_sta_cmd.num_sta = 1;
+       memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+       cmd.flags |= CMD_WANT_SKB;
+
+       ret = iwl_legacy_send_cmd(priv, &cmd);
+
+       if (ret)
+               return ret;
+
+       pkt = (struct iwl_rx_packet *)cmd.reply_page;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+                         pkt->hdr.flags);
+               ret = -EIO;
+       }
+
+       if (!ret) {
+               switch (pkt->u.rem_sta.status) {
+               case REM_STA_SUCCESS_MSK:
+                       if (!temporary) {
+                               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+                               iwl_legacy_sta_ucode_deactivate(priv, sta_id);
+                               spin_unlock_irqrestore(&priv->sta_lock,
+                                                               flags_spin);
+                       }
+                       IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+                       break;
+               default:
+                       ret = -EIO;
+                       IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+                       break;
+               }
+       }
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+
+       return ret;
+}
+
+/**
+ * iwl_legacy_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
+                      const u8 *addr)
+{
+       unsigned long flags;
+
+       if (!iwl_legacy_is_ready(priv)) {
+               IWL_DEBUG_INFO(priv,
+                       "Unable to remove station %pM, device not ready.\n",
+                       addr);
+               /*
+                * It is typical for stations to be removed when we are
+                * going down. Return success since device will be down
+                * soon anyway
+                */
+               return 0;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d  %pM\n",
+                       sta_id, addr);
+
+       if (WARN_ON(sta_id == IWL_INVALID_STATION))
+               return -EINVAL;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+
+       if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+               IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+                               addr);
+               goto out_err;
+       }
+
+       if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+               IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+                               addr);
+               goto out_err;
+       }
+
+       if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+               kfree(priv->stations[sta_id].lq);
+               priv->stations[sta_id].lq = NULL;
+       }
+
+       priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+       priv->num_stations--;
+
+       BUG_ON(priv->num_stations < 0);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
+out_err:
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
+
+/**
+ * iwl_legacy_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx)
+{
+       int i;
+       unsigned long flags_spin;
+       bool cleared = false;
+
+       IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+                       continue;
+
+               if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+                       IWL_DEBUG_INFO(priv,
+                               "Clearing ucode active for station %d\n", i);
+                       priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+                       cleared = true;
+               }
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       if (!cleared)
+               IWL_DEBUG_INFO(priv,
+                       "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
+
+/**
+ * iwl_legacy_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       struct iwl_legacy_addsta_cmd sta_cmd;
+       struct iwl_link_quality_cmd lq;
+       unsigned long flags_spin;
+       int i;
+       bool found = false;
+       int ret;
+       bool send_lq;
+
+       if (!iwl_legacy_is_ready(priv)) {
+               IWL_DEBUG_INFO(priv,
+                       "Not ready yet, not restoring any stations.\n");
+               return;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if (ctx->ctxid != priv->stations[i].ctxid)
+                       continue;
+               if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+                           !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+                       IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+                                       priv->stations[i].sta.sta.addr);
+                       priv->stations[i].sta.mode = 0;
+                       priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+                       found = true;
+               }
+       }
+
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+                       memcpy(&sta_cmd, &priv->stations[i].sta,
+                              sizeof(struct iwl_legacy_addsta_cmd));
+                       send_lq = false;
+                       if (priv->stations[i].lq) {
+                               memcpy(&lq, priv->stations[i].lq,
+                                      sizeof(struct iwl_link_quality_cmd));
+                               send_lq = true;
+                       }
+                       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+                       ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+                       if (ret) {
+                               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+                               IWL_ERR(priv, "Adding station %pM failed.\n",
+                                       priv->stations[i].sta.sta.addr);
+                               priv->stations[i].used &=
+                                               ~IWL_STA_DRIVER_ACTIVE;
+                               priv->stations[i].used &=
+                                               ~IWL_STA_UCODE_INPROGRESS;
+                               spin_unlock_irqrestore(&priv->sta_lock,
+                                                               flags_spin);
+                       }
+                       /*
+                        * Rate scaling has already been initialized, send
+                        * current LQ command
+                        */
+                       if (send_lq)
+                               iwl_legacy_send_lq_cmd(priv, ctx, &lq,
+                                                               CMD_SYNC, true);
+                       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+                       priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       if (!found)
+               IWL_DEBUG_INFO(priv, "Restoring all known stations"
+                               " .... no stations to be restored.\n");
+       else
+               IWL_DEBUG_INFO(priv, "Restoring all known stations"
+                               " .... complete.\n");
+}
+EXPORT_SYMBOL(iwl_legacy_restore_stations);
+
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->sta_key_max_num; i++)
+               if (!test_and_set_bit(i, &priv->ucode_key_table))
+                       return i;
+
+       return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
+
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       for (i = 0; i < priv->hw_params.max_stations; i++) {
+               if (!(priv->stations[i].used & IWL_STA_BCAST))
+                       continue;
+
+               priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+               priv->num_stations--;
+               BUG_ON(priv->num_stations < 0);
+               kfree(priv->stations[i].lq);
+               priv->stations[i].lq = NULL;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+                          struct iwl_link_quality_cmd *lq)
+{
+       int i;
+       IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+       IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+                      lq->general_params.single_stream_ant_msk,
+                      lq->general_params.dual_stream_ant_msk);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+               IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+                              i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+                                  struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx,
+                             struct iwl_link_quality_cmd *lq)
+{
+       int i;
+
+       if (ctx->ht.enabled)
+               return true;
+
+       IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+                      ctx->active.channel);
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+                                               RATE_MCS_HT_MSK) {
+                       IWL_DEBUG_INFO(priv,
+                                      "index %d of LQ expects HT channel\n",
+                                      i);
+                       return false;
+               }
+       }
+       return true;
+}
+
+/**
+ * iwl_legacy_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+                   struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+       int ret = 0;
+       unsigned long flags_spin;
+
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_TX_LINK_QUALITY_CMD,
+               .len = sizeof(struct iwl_link_quality_cmd),
+               .flags = flags,
+               .data = lq,
+       };
+
+       if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+               return -EINVAL;
+
+
+       spin_lock_irqsave(&priv->sta_lock, flags_spin);
+       if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               return -EINVAL;
+       }
+       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+       iwl_legacy_dump_lq_cmd(priv, lq);
+       BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+       if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
+               ret = iwl_legacy_send_cmd(priv, &cmd);
+       else
+               ret = -EINVAL;
+
+       if (cmd.flags & CMD_ASYNC)
+               return ret;
+
+       if (init) {
+               IWL_DEBUG_INFO(priv, "init LQ command complete,"
+                               " clearing sta addition status for sta %d\n",
+                              lq->sta_id);
+               spin_lock_irqsave(&priv->sta_lock, flags_spin);
+               priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
+
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+       int ret;
+
+       IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+                       sta->addr);
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+                       sta->addr);
+       ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
+       if (ret)
+               IWL_ERR(priv, "Error removing station %pM\n",
+                       sta->addr);
+       mutex_unlock(&priv->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644 (file)
index 0000000..67bd75f
--- /dev/null
@@ -0,0 +1,148 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_sta_h__
+#define __iwl_legacy_sta_h__
+
+#include "iwl-dev.h"
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE  BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS  BIT(2) /* ucode entry is in process of
+                                           being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+                               (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_legacy_restore_stations(struct iwl_priv *priv,
+                               struct iwl_rxon_context *ctx);
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx);
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+                       struct iwl_legacy_addsta_cmd *sta, u8 flags);
+int iwl_legacy_add_station_common(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       const u8 *addr, bool is_ap,
+                       struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_legacy_remove_station(struct iwl_priv *priv,
+                       const u8 sta_id,
+                       const u8 *addr);
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+                       struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta);
+
+u8 iwl_legacy_prep_station(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       const u8 *addr, bool is_ap,
+                       struct ieee80211_sta *sta);
+
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
+                       struct iwl_rxon_context *ctx,
+                       struct iwl_link_quality_cmd *lq,
+                       u8 flags, bool init);
+
+/**
+ * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       struct iwl_rxon_context *ctx;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       memset(priv->stations, 0, sizeof(priv->stations));
+       priv->num_stations = 0;
+
+       priv->ucode_key_table = 0;
+
+       for_each_context(priv, ctx) {
+               /*
+                * Remove all key information that is not stored as part
+                * of station information since mac80211 may not have had
+                * a chance to remove all the keys. When device is
+                * reconfigured by mac80211 after an error all keys will
+                * be reconfigured.
+                */
+               memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+               ctx->key_mapping_keys = 0;
+       }
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
+{
+       if (WARN_ON(!sta))
+               return IWL_INVALID_STATION;
+
+       return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
+                                         struct iwl_rxon_context *context,
+                                         struct ieee80211_sta *sta)
+{
+       int sta_id;
+
+       if (!sta)
+               return context->bcast_sta_id;
+
+       sta_id = iwl_legacy_sta_id(sta);
+
+       /*
+        * mac80211 should not be passing a partially
+        * initialised station!
+        */
+       WARN_ON(sta_id == IWL_INVALID_STATION);
+
+       return sta_id;
+}
+#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644 (file)
index 0000000..a227773
--- /dev/null
@@ -0,0 +1,660 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/**
+ * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
+ */
+void
+iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       u32 reg = 0;
+       int txq_id = txq->q.id;
+
+       if (txq->need_update == 0)
+               return;
+
+       /* if we're trying to save power */
+       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+               /* wake up nic if it's powered down ...
+                * uCode will wake up, and interrupt us again, so next
+                * time we'll skip this part. */
+               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(priv,
+                                       "Tx queue %d requesting wakeup,"
+                                       " GP1 = 0x%x\n", txq_id, reg);
+                       iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       return;
+               }
+
+               iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+                               txq->q.write_ptr | (txq_id << 8));
+
+               /*
+                * else not in power-save mode,
+                * uCode will never sleep when we're
+                * trying to tx (during RFKILL, we're not trying to tx).
+                */
+       } else
+               iwl_write32(priv, HBUS_TARG_WRPTR,
+                           txq->q.write_ptr | (txq_id << 8));
+       txq->need_update = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
+
+/**
+ * iwl_legacy_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+
+       if (q->n_bd == 0)
+               return;
+
+       while (q->write_ptr != q->read_ptr) {
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
+
+/**
+ * iwl_legacy_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       iwl_legacy_tx_queue_unmap(priv, txq_id);
+
+       /* De-alloc array of command/tx buffers */
+       for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+               kfree(txq->cmd[i]);
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->q.n_bd)
+               dma_free_coherent(dev, priv->hw_params.tfd_size *
+                                 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+
+       /* De-alloc array of per-TFD driver data */
+       kfree(txq->txb);
+       txq->txb = NULL;
+
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
+       /* 0-fill queue descriptor structure */
+       memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
+
+/**
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct iwl_queue *q = &txq->q;
+       bool huge = false;
+       int i;
+
+       if (q->n_bd == 0)
+               return;
+
+       while (q->read_ptr != q->write_ptr) {
+               /* we have no way to tell if it is a huge cmd ATM */
+               i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
+
+               if (txq->meta[i].flags & CMD_SIZE_HUGE)
+                       huge = true;
+               else
+                       pci_unmap_single(priv->pci_dev,
+                                        dma_unmap_addr(&txq->meta[i], mapping),
+                                        dma_unmap_len(&txq->meta[i], len),
+                                        PCI_DMA_BIDIRECTIONAL);
+
+               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+
+       if (huge) {
+               i = q->n_window;
+               pci_unmap_single(priv->pci_dev,
+                                dma_unmap_addr(&txq->meta[i], mapping),
+                                dma_unmap_len(&txq->meta[i], len),
+                                PCI_DMA_BIDIRECTIONAL);
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
+
+/**
+ * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       iwl_legacy_cmd_queue_unmap(priv);
+
+       /* De-alloc array of command/tx buffers */
+       for (i = 0; i <= TFD_CMD_SLOTS; i++)
+               kfree(txq->cmd[i]);
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->q.n_bd)
+               dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+                                 txq->tfds, txq->q.dma_addr);
+
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
+       /* 0-fill queue descriptor structure */
+       memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in iwl-4965-hw.h.
+ ***************************************************/
+
+int iwl_legacy_queue_space(const struct iwl_queue *q)
+{
+       int s = q->read_ptr - q->write_ptr;
+
+       if (q->read_ptr > q->write_ptr)
+               s -= q->n_bd;
+
+       if (s <= 0)
+               s += q->n_window;
+       /* keep some reserve to not confuse empty and full situations */
+       s -= 2;
+       if (s < 0)
+               s = 0;
+       return s;
+}
+EXPORT_SYMBOL(iwl_legacy_queue_space);
+
+
+/**
+ * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
+                         int count, int slots_num, u32 id)
+{
+       q->n_bd = count;
+       q->n_window = slots_num;
+       q->id = id;
+
+       /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
+        * and iwl_legacy_queue_dec_wrap are broken. */
+       BUG_ON(!is_power_of_2(count));
+
+       /* slots_num must be power-of-two size, otherwise
+        * iwl_legacy_get_cmd_index is broken. */
+       BUG_ON(!is_power_of_2(slots_num));
+
+       q->low_mark = q->n_window / 4;
+       if (q->low_mark < 4)
+               q->low_mark = 4;
+
+       q->high_mark = q->n_window / 8;
+       if (q->high_mark < 2)
+               q->high_mark = 2;
+
+       q->write_ptr = q->read_ptr = 0;
+
+       return 0;
+}
+
+/**
+ * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
+                             struct iwl_tx_queue *txq, u32 id)
+{
+       struct device *dev = &priv->pci_dev->dev;
+       size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+       /* Driver private data, only for Tx (not command) queues,
+        * not shared with device. */
+       if (id != priv->cmd_queue) {
+               txq->txb = kzalloc(sizeof(txq->txb[0]) *
+                                  TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
+               if (!txq->txb) {
+                       IWL_ERR(priv, "kmalloc for auxiliary BD "
+                                 "structures failed\n");
+                       goto error;
+               }
+       } else {
+               txq->txb = NULL;
+       }
+
+       /* Circular buffer of transmit frame descriptors (TFDs),
+        * shared with device */
+       txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+                                      GFP_KERNEL);
+       if (!txq->tfds) {
+               IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
+               goto error;
+       }
+       txq->q.id = id;
+
+       return 0;
+
+ error:
+       kfree(txq->txb);
+       txq->txb = NULL;
+
+       return -ENOMEM;
+}
+
+/**
+ * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                     int slots_num, u32 txq_id)
+{
+       int i, len;
+       int ret;
+       int actual_slots = slots_num;
+
+       /*
+        * Alloc buffer array for commands (Tx or other types of commands).
+        * For the command queue (#4/#9), allocate command space + one big
+        * command for scan, since scan command is very huge; the system will
+        * not have two scans at the same time, so only one is needed.
+        * For normal Tx queues (all other queues), no super-size command
+        * space is needed.
+        */
+       if (txq_id == priv->cmd_queue)
+               actual_slots++;
+
+       txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
+                           GFP_KERNEL);
+       txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
+                          GFP_KERNEL);
+
+       if (!txq->meta || !txq->cmd)
+               goto out_free_arrays;
+
+       len = sizeof(struct iwl_device_cmd);
+       for (i = 0; i < actual_slots; i++) {
+               /* only happens for cmd queue */
+               if (i == slots_num)
+                       len = IWL_MAX_CMD_SIZE;
+
+               txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+               if (!txq->cmd[i])
+                       goto err;
+       }
+
+       /* Alloc driver data array and TFD circular buffer */
+       ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
+       if (ret)
+               goto err;
+
+       txq->need_update = 0;
+
+       /*
+        * For the default queues 0-3, set up the swq_id
+        * already -- all others need to get one later
+        * (if they need one at all).
+        */
+       if (txq_id < 4)
+               iwl_legacy_set_swq_id(txq, txq_id, txq_id);
+
+       /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+        * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
+       BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+       /* Initialize queue's high/low-water marks, and head/tail indexes */
+       iwl_legacy_queue_init(priv, &txq->q,
+                               TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       priv->cfg->ops->lib->txq_init(priv, txq);
+
+       return 0;
+err:
+       for (i = 0; i < actual_slots; i++)
+               kfree(txq->cmd[i]);
+out_free_arrays:
+       kfree(txq->meta);
+       kfree(txq->cmd);
+
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
+
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+                       int slots_num, u32 txq_id)
+{
+       int actual_slots = slots_num;
+
+       if (txq_id == priv->cmd_queue)
+               actual_slots++;
+
+       memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+       txq->need_update = 0;
+
+       /* Initialize queue's high/low-water marks, and head/tail indexes */
+       iwl_legacy_queue_init(priv, &txq->q,
+                               TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/**
+ * iwl_legacy_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the index (> 0) of command in the
+ * command queue.
+ */
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct iwl_queue *q = &txq->q;
+       struct iwl_device_cmd *out_cmd;
+       struct iwl_cmd_meta *out_meta;
+       dma_addr_t phys_addr;
+       unsigned long flags;
+       int len;
+       u32 idx;
+       u16 fix_size;
+
+       cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+       fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
+
+       /* If any of the command structures end up being larger than
+        * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+        * we will need to increase the size of the TFD entries
+        * Also, check to see if command buffer should not exceed the size
+        * of device_cmd and max_cmd_size. */
+       BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+              !(cmd->flags & CMD_SIZE_HUGE));
+       BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
+
+       if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
+               IWL_WARN(priv, "Not sending command - %s KILL\n",
+                        iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
+               return -EIO;
+       }
+
+       if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+               IWL_ERR(priv, "No space in command queue\n");
+               IWL_ERR(priv, "Restarting adapter due to queue full\n");
+               queue_work(priv->workqueue, &priv->restart);
+               return -ENOSPC;
+       }
+
+       spin_lock_irqsave(&priv->hcmd_lock, flags);
+
+       /* If this is a huge cmd, mark the huge flag also on the meta.flags
+        * of the _original_ cmd. This is used for DMA mapping clean up.
+        */
+       if (cmd->flags & CMD_SIZE_HUGE) {
+               idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
+               txq->meta[idx].flags = CMD_SIZE_HUGE;
+       }
+
+       idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+       out_cmd = txq->cmd[idx];
+       out_meta = &txq->meta[idx];
+
+       memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+       out_meta->flags = cmd->flags;
+       if (cmd->flags & CMD_WANT_SKB)
+               out_meta->source = cmd;
+       if (cmd->flags & CMD_ASYNC)
+               out_meta->callback = cmd->callback;
+
+       out_cmd->hdr.cmd = cmd->id;
+       memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+       /* At this point, the out_cmd now has all of the incoming cmd
+        * information */
+
+       out_cmd->hdr.flags = 0;
+       out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
+                       INDEX_TO_SEQ(q->write_ptr));
+       if (cmd->flags & CMD_SIZE_HUGE)
+               out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+       len = sizeof(struct iwl_device_cmd);
+       if (idx == TFD_CMD_SLOTS)
+               len = IWL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       switch (out_cmd->hdr.cmd) {
+       case REPLY_TX_LINK_QUALITY_CMD:
+       case SENSITIVITY_CMD:
+               IWL_DEBUG_HC_DUMP(priv,
+                               "Sending command %s (#%x), seq: 0x%04X, "
+                               "%d bytes at %d[%d]:%d\n",
+                               iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+                               out_cmd->hdr.cmd,
+                               le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+                               q->write_ptr, idx, priv->cmd_queue);
+               break;
+       default:
+               IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
+                               "%d bytes at %d[%d]:%d\n",
+                               iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+                               out_cmd->hdr.cmd,
+                               le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+                               q->write_ptr, idx, priv->cmd_queue);
+       }
+#endif
+       txq->need_update = 1;
+
+       if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
+               /* Set up entry in queue's byte count circular buffer */
+               priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
+
+       phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+                                  fix_size, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_addr_set(out_meta, mapping, phys_addr);
+       dma_unmap_len_set(out_meta, len, fix_size);
+
+       trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
+                                               fix_size, cmd->flags);
+
+       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                  phys_addr, fix_size, 1,
+                                                  U32_PAD(cmd->len));
+
+       /* Increment and update queue's write index */
+       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+       iwl_legacy_txq_update_write_ptr(priv, txq);
+
+       spin_unlock_irqrestore(&priv->hcmd_lock, flags);
+       return idx;
+}
+
+/**
+ * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
+                                  int idx, int cmd_idx)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+       int nfreed = 0;
+
+       if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
+               IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+                         "is out of range [0-%d] %d %d.\n", txq_id,
+                         idx, q->n_bd, q->write_ptr, q->read_ptr);
+               return;
+       }
+
+       for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+            q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               if (nfreed++ > 0) {
+                       IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
+                                       q->write_ptr, q->read_ptr);
+                       queue_work(priv->workqueue, &priv->restart);
+               }
+
+       }
+}
+
+/**
+ * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed.  The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int index = SEQ_TO_INDEX(sequence);
+       int cmd_index;
+       bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+       struct iwl_device_cmd *cmd;
+       struct iwl_cmd_meta *meta;
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+
+       /* If a Tx command is being handled and it isn't in the actual
+        * command queue then there a command routing bug has been introduced
+        * in the queue management code. */
+       if (WARN(txq_id != priv->cmd_queue,
+                "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+                 txq_id, priv->cmd_queue, sequence,
+                 priv->txq[priv->cmd_queue].q.read_ptr,
+                 priv->txq[priv->cmd_queue].q.write_ptr)) {
+               iwl_print_hex_error(priv, pkt, 32);
+               return;
+       }
+
+       /* If this is a huge cmd, clear the huge flag on the meta.flags
+        * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
+        * the DMA buffer for the scan (huge) command.
+        */
+       if (huge) {
+               cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
+               txq->meta[cmd_index].flags = 0;
+       }
+       cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
+       cmd = txq->cmd[cmd_index];
+       meta = &txq->meta[cmd_index];
+
+       pci_unmap_single(priv->pci_dev,
+                        dma_unmap_addr(meta, mapping),
+                        dma_unmap_len(meta, len),
+                        PCI_DMA_BIDIRECTIONAL);
+
+       /* Input error checking is done when commands are added to queue. */
+       if (meta->flags & CMD_WANT_SKB) {
+               meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+               rxb->page = NULL;
+       } else if (meta->callback)
+               meta->callback(priv, cmd, pkt);
+
+       iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
+
+       if (!(meta->flags & CMD_ASYNC)) {
+               clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+               IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
+                              iwl_legacy_get_cmd_string(cmd->hdr.cmd));
+               wake_up_interruptible(&priv->wait_command_queue);
+       }
+       meta->flags = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
new file mode 100644 (file)
index 0000000..ab87e1b
--- /dev/null
@@ -0,0 +1,4293 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME       "iwl3945"
+
+#include "iwl-fh.h"
+#include "iwl-3945-fh.h"
+#include "iwl-commands.h"
+#include "iwl-sta.h"
+#include "iwl-3945.h"
+#include "iwl-core.h"
+#include "iwl-helpers.h"
+#include "iwl-dev.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION        \
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION  IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct iwl_mod_params iwl3945_mod_params = {
+       .sw_crypto = 1,
+       .restart_fw = 1,
+       /* the rest are 0 by default */
+};
+
+/**
+ * iwl3945_get_antenna_flags - Get antenna flags for RXON command
+ * @priv: eeprom and antenna fields are used to determine antenna flags
+ *
+ * priv->eeprom39  is used to determine if antenna AUX/MAIN are reversed
+ * iwl3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IWL_ANTENNA_MAIN      - Force MAIN antenna
+ * IWL_ANTENNA_AUX       - Force AUX antenna
+ */
+__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
+{
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+
+       switch (iwl3945_mod_params.antenna) {
+       case IWL_ANTENNA_DIVERSITY:
+               return 0;
+
+       case IWL_ANTENNA_MAIN:
+               if (eeprom->antenna_switch_type)
+                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+       case IWL_ANTENNA_AUX:
+               if (eeprom->antenna_switch_type)
+                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+       }
+
+       /* bad antenna selector value */
+       IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
+               iwl3945_mod_params.antenna);
+
+       return 0;               /* "diversity" is default if error */
+}
+
+static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
+                                  struct ieee80211_key_conf *keyconf,
+                                  u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       int ret;
+
+       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+       if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->hw_key_idx = keyconf->keyidx;
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
+              keyconf->keylen);
+
+       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
+              keyconf->keylen);
+
+       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+                       == STA_KEY_FLG_NO_ENC)
+               priv->stations[sta_id].sta.key.key_offset =
+                                iwl_legacy_get_free_ucode_key_index(priv);
+       /* else, we are overriding an existing key => no need to allocated room
+       * in uCode. */
+
+       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+               "no space for a new key");
+
+       priv->stations[sta_id].sta.key.key_flags = key_flags;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
+
+       ret = iwl_legacy_send_add_sta(priv,
+                               &priv->stations[sta_id].sta, CMD_ASYNC);
+
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return ret;
+}
+
+static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
+                                 struct ieee80211_key_conf *keyconf,
+                                 u8 sta_id)
+{
+       return -EOPNOTSUPP;
+}
+
+static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
+                                 struct ieee80211_key_conf *keyconf,
+                                 u8 sta_id)
+{
+       return -EOPNOTSUPP;
+}
+
+static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
+{
+       unsigned long flags;
+       struct iwl_legacy_addsta_cmd sta_cmd;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
+       memset(&priv->stations[sta_id].sta.key, 0,
+               sizeof(struct iwl4965_keyinfo));
+       priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
+       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
+                       struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       int ret = 0;
+
+       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
+               break;
+       default:
+               IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
+                       keyconf->cipher);
+               ret = -EINVAL;
+       }
+
+       IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+                     sta_id, ret);
+
+       return ret;
+}
+
+static int iwl3945_remove_static_key(struct iwl_priv *priv)
+{
+       int ret = -EOPNOTSUPP;
+
+       return ret;
+}
+
+static int iwl3945_set_static_key(struct iwl_priv *priv,
+                               struct ieee80211_key_conf *key)
+{
+       if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+           key->cipher == WLAN_CIPHER_SUITE_WEP104)
+               return -EOPNOTSUPP;
+
+       IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
+       return -EINVAL;
+}
+
+static void iwl3945_clear_free_frames(struct iwl_priv *priv)
+{
+       struct list_head *element;
+
+       IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
+                      priv->frames_count);
+
+       while (!list_empty(&priv->free_frames)) {
+               element = priv->free_frames.next;
+               list_del(element);
+               kfree(list_entry(element, struct iwl3945_frame, list));
+               priv->frames_count--;
+       }
+
+       if (priv->frames_count) {
+               IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
+                           priv->frames_count);
+               priv->frames_count = 0;
+       }
+}
+
+static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
+{
+       struct iwl3945_frame *frame;
+       struct list_head *element;
+       if (list_empty(&priv->free_frames)) {
+               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+               if (!frame) {
+                       IWL_ERR(priv, "Could not allocate frame!\n");
+                       return NULL;
+               }
+
+               priv->frames_count++;
+               return frame;
+       }
+
+       element = priv->free_frames.next;
+       list_del(element);
+       return list_entry(element, struct iwl3945_frame, list);
+}
+
+static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
+{
+       memset(frame, 0, sizeof(*frame));
+       list_add(&frame->list, &priv->free_frames);
+}
+
+unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
+                               struct ieee80211_hdr *hdr,
+                               int left)
+{
+
+       if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
+               return 0;
+
+       if (priv->beacon_skb->len > left)
+               return 0;
+
+       memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
+
+       return priv->beacon_skb->len;
+}
+
+static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
+{
+       struct iwl3945_frame *frame;
+       unsigned int frame_size;
+       int rc;
+       u8 rate;
+
+       frame = iwl3945_get_free_frame(priv);
+
+       if (!frame) {
+               IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
+                         "command.\n");
+               return -ENOMEM;
+       }
+
+       rate = iwl_legacy_get_lowest_plcp(priv,
+                               &priv->contexts[IWL_RXON_CTX_BSS]);
+
+       frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
+
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+                             &frame->u.cmd[0]);
+
+       iwl3945_free_frame(priv, frame);
+
+       return rc;
+}
+
+static void iwl3945_unset_hw_params(struct iwl_priv *priv)
+{
+       if (priv->_3945.shared_virt)
+               dma_free_coherent(&priv->pci_dev->dev,
+                                 sizeof(struct iwl3945_shared),
+                                 priv->_3945.shared_virt,
+                                 priv->_3945.shared_phys);
+}
+
+static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_device_cmd *cmd,
+                                     struct sk_buff *skb_frag,
+                                     int sta_id)
+{
+       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+       struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
+
+       tx_cmd->sec_ctl = 0;
+
+       switch (keyinfo->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+               memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+               IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_TKIP:
+               break;
+
+       case WLAN_CIPHER_SUITE_WEP104:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_WEP40:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+                   (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
+
+               memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+               IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+                            "with key %d\n", info->control.hw_key->hw_key_idx);
+               break;
+
+       default:
+               IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
+               break;
+       }
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
+                                 struct iwl_device_cmd *cmd,
+                                 struct ieee80211_tx_info *info,
+                                 struct ieee80211_hdr *hdr, u8 std_id)
+{
+       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+       __le32 tx_flags = tx_cmd->tx_flags;
+       __le16 fc = hdr->frame_control;
+
+       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+               tx_flags |= TX_CMD_FLG_ACK_MSK;
+               if (ieee80211_is_mgmt(fc))
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (ieee80211_is_probe_resp(fc) &&
+                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+                       tx_flags |= TX_CMD_FLG_TSF_MSK;
+       } else {
+               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       tx_cmd->sta_id = std_id;
+       if (ieee80211_has_morefrags(fc))
+               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tx_cmd->tid_tspec = qc[0] & 0xf;
+               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+       } else {
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
+
+       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+               else
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+       } else {
+               tx_cmd->timeout.pm_frame_timeout = 0;
+       }
+
+       tx_cmd->driver_txop = 0;
+       tx_cmd->tx_flags = tx_flags;
+       tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start REPLY_TX command process
+ */
+static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl3945_tx_cmd *tx_cmd;
+       struct iwl_tx_queue *txq = NULL;
+       struct iwl_queue *q = NULL;
+       struct iwl_device_cmd *out_cmd;
+       struct iwl_cmd_meta *out_meta;
+       dma_addr_t phys_addr;
+       dma_addr_t txcmd_phys;
+       int txq_id = skb_get_queue_mapping(skb);
+       u16 len, idx, hdr_len;
+       u8 id;
+       u8 unicast;
+       u8 sta_id;
+       u8 tid = 0;
+       __le16 fc;
+       u8 wait_write_ptr = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (iwl_legacy_is_rfkill(priv)) {
+               IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+               goto drop_unlock;
+       }
+
+       if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
+               IWL_ERR(priv, "ERROR: No TX rate available.\n");
+               goto drop_unlock;
+       }
+
+       unicast = !is_multicast_ether_addr(hdr->addr1);
+       id = 0;
+
+       fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (ieee80211_is_auth(fc))
+               IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+       else if (ieee80211_is_assoc_req(fc))
+               IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+       else if (ieee80211_is_reassoc_req(fc))
+               IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       hdr_len = ieee80211_hdrlen(fc);
+
+       /* Find index into station table for destination station */
+       sta_id = iwl_legacy_sta_id_or_broadcast(
+                       priv, &priv->contexts[IWL_RXON_CTX_BSS],
+                       info->control.sta);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+                              hdr->addr1);
+               goto drop;
+       }
+
+       IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               if (unlikely(tid >= MAX_TID_COUNT))
+                       goto drop;
+       }
+
+       /* Descriptor for chosen Tx queue */
+       txq = &priv->txq[txq_id];
+       q = &txq->q;
+
+       if ((iwl_legacy_queue_space(q) < q->high_mark))
+               goto drop;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
+
+       /* Set up driver data for this TFD */
+       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+       txq->txb[q->write_ptr].skb = skb;
+       txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       /* Init first empty entry in queue's array of Tx/cmd buffers */
+       out_cmd = txq->cmd[idx];
+       out_meta = &txq->meta[idx];
+       tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
+       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+       memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+       /*
+        * Set up the Tx-command (not MAC!) header.
+        * Store the chosen Tx queue and TFD index within the sequence field;
+        * after Tx, uCode's Tx response will return this value so driver can
+        * locate the frame within the tx queue and do post-tx processing.
+        */
+       out_cmd->hdr.cmd = REPLY_TX;
+       out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+                               INDEX_TO_SEQ(q->write_ptr)));
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+       if (info->control.hw_key)
+               iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
+
+       /* TODO need this for burst mode later on */
+       iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
+
+       /* set is_hcca to 0; it probably will never be implemented */
+       iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
+
+       /* Total # bytes to be transmitted */
+       len = (u16)skb->len;
+       tx_cmd->len = cpu_to_le16(len);
+
+       iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+       iwl_legacy_update_stats(priv, true, fc, len);
+       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
+
+       IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+                    le16_to_cpu(out_cmd->hdr.sequence));
+       IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+       iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
+                          ieee80211_hdrlen(fc));
+
+       /*
+        * Use the first empty entry in this queue's command buffer array
+        * to contain the Tx command and MAC header concatenated together
+        * (payload data will be in another buffer).
+        * Size of this varies, due to varying MAC header length.
+        * If end is not dword aligned, we'll have 2 extra bytes at the end
+        * of the MAC header (device reads on dword boundaries).
+        * We'll tell device about this padding later.
+        */
+       len = sizeof(struct iwl3945_tx_cmd) +
+                       sizeof(struct iwl_cmd_header) + hdr_len;
+       len = (len + 3) & ~3;
+
+       /* Physical address of this Tx command's header (not MAC header!),
+        * within command buffer array. */
+       txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+                                   len, PCI_DMA_TODEVICE);
+       /* we do not map meta data ... so we can safely access address to
+        * provide to unmap command*/
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, len);
+
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                  txcmd_phys, len, 1, 0);
+
+
+       /* Set up TFD's 2nd entry to point directly to remainder of skb,
+        * if any (802.11 null frames have no payload). */
+       len = skb->len - hdr_len;
+       if (len) {
+               phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+                                          len, PCI_DMA_TODEVICE);
+               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+                                                          phys_addr, len,
+                                                          0, U32_PAD(len));
+       }
+
+
+       /* Tell device the write index *just past* this latest filled TFD */
+       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+       iwl_legacy_txq_update_write_ptr(priv, txq);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if ((iwl_legacy_queue_space(q) < q->high_mark)
+           && priv->mac80211_registered) {
+               if (wait_write_ptr) {
+                       spin_lock_irqsave(&priv->lock, flags);
+                       txq->need_update = 1;
+                       iwl_legacy_txq_update_write_ptr(priv, txq);
+                       spin_unlock_irqrestore(&priv->lock, flags);
+               }
+
+               iwl_legacy_stop_queue(priv, txq);
+       }
+
+       return 0;
+
+drop_unlock:
+       spin_unlock_irqrestore(&priv->lock, flags);
+drop:
+       return -1;
+}
+
+static int iwl3945_get_measurement(struct iwl_priv *priv,
+                              struct ieee80211_measurement_params *params,
+                              u8 type)
+{
+       struct iwl_spectrum_cmd spectrum;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
+               .data = (void *)&spectrum,
+               .flags = CMD_WANT_SKB,
+       };
+       u32 add_time = le64_to_cpu(params->start_time);
+       int rc;
+       int spectrum_resp_status;
+       int duration = le16_to_cpu(params->duration);
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
+               add_time = iwl_legacy_usecs_to_beacons(priv,
+                       le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
+                       le16_to_cpu(ctx->timing.beacon_interval));
+
+       memset(&spectrum, 0, sizeof(spectrum));
+
+       spectrum.channel_count = cpu_to_le16(1);
+       spectrum.flags =
+           RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+       spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+       cmd.len = sizeof(spectrum);
+       spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
+               spectrum.start_time =
+                       iwl_legacy_add_beacon_time(priv,
+                               priv->_3945.last_beacon_time, add_time,
+                               le16_to_cpu(ctx->timing.beacon_interval));
+       else
+               spectrum.start_time = 0;
+
+       spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+       spectrum.channels[0].channel = params->channel;
+       spectrum.channels[0].type = type;
+       if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+               spectrum.flags |= RXON_FLG_BAND_24G_MSK |
+                   RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
+
+       rc = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (rc)
+               return rc;
+
+       pkt = (struct iwl_rx_packet *)cmd.reply_page;
+       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+               IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
+               rc = -EIO;
+       }
+
+       spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+       switch (spectrum_resp_status) {
+       case 0:         /* Command will be handled */
+               if (pkt->u.spectrum.id != 0xff) {
+                       IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
+                                               pkt->u.spectrum.id);
+                       priv->measurement_status &= ~MEASUREMENT_READY;
+               }
+               priv->measurement_status |= MEASUREMENT_ACTIVE;
+               rc = 0;
+               break;
+
+       case 1:         /* Command will not be handled */
+               rc = -EAGAIN;
+               break;
+       }
+
+       iwl_legacy_free_pages(priv, cmd.reply_page);
+
+       return rc;
+}
+
+static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
+                              struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_alive_resp *palive;
+       struct delayed_work *pwork;
+
+       palive = &pkt->u.alive_frame;
+
+       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+                      "0x%01X 0x%01X\n",
+                      palive->is_valid, palive->ver_type,
+                      palive->ver_subtype);
+
+       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+               memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
+                      sizeof(struct iwl_alive_resp));
+               pwork = &priv->init_alive_start;
+       } else {
+               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+               memcpy(&priv->card_alive, &pkt->u.alive_frame,
+                      sizeof(struct iwl_alive_resp));
+               pwork = &priv->alive_start;
+               iwl3945_disable_events(priv);
+       }
+
+       /* We delay the ALIVE response by 5ms to
+        * give the HW RF Kill time to activate... */
+       if (palive->is_valid == UCODE_VALID_OK)
+               queue_delayed_work(priv->workqueue, pwork,
+                                  msecs_to_jiffies(5));
+       else
+               IWL_WARN(priv, "uCode did not respond OK.\n");
+}
+
+static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
+                                struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+#endif
+
+       IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       u8 rate = beacon->beacon_notify_hdr.rate;
+
+       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
+               "tsf %d %d rate %d\n",
+               le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+               beacon->beacon_notify_hdr.failure_frame,
+               le32_to_cpu(beacon->ibss_mgr_status),
+               le32_to_cpu(beacon->high_tsf),
+               le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+       unsigned long status = priv->status;
+
+       IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
+                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       if (flags & HW_CARD_DISABLED)
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+
+       iwl_legacy_scan_cancel(priv);
+
+       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+            test_bit(STATUS_RF_KILL_HW, &priv->status)))
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+                               test_bit(STATUS_RF_KILL_HW, &priv->status));
+       else
+               wake_up_interruptible(&priv->wait_command_queue);
+}
+
+/**
+ * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
+{
+       priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
+       priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
+       priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
+       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+                       iwl_legacy_rx_spectrum_measure_notif;
+       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
+       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+           iwl_legacy_rx_pm_debug_statistics_notif;
+       priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
+
+       /*
+        * The same handler is used for both the REPLY to a discrete
+        * statistics request from the host as well as for the periodic
+        * statistics notifications (after received beacons) from the uCode.
+        */
+       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
+       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
+
+       iwl_legacy_setup_rx_scan_handlers(priv);
+       priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
+
+       /* Set up hardware specific Rx handlers */
+       iwl3945_hw_rx_handler_setup(priv);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            iwl3945_rx_queue_restock
+ * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules iwl3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl3945_rx()         Detach iwl_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls iwl3945_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
+                                         dma_addr_t dma_addr)
+{
+       return cpu_to_le32((u32)dma_addr);
+}
+
+/**
+ * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       unsigned long flags;
+       int write;
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       write = rxq->write & ~0x7;
+       while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+               /* Get next free Rx buffer, remove from free list */
+               element = rxq->rx_free.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+               list_del(element);
+
+               /* Point to Rx buffer via next RBD in circular buffer */
+               rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
+               rxq->queue[rxq->write] = rxb;
+               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+               rxq->free_count--;
+       }
+       spin_unlock_irqrestore(&rxq->lock, flags);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+       /* If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8. */
+       if ((rxq->write_actual != (rxq->write & ~0x7))
+           || (abs(rxq->write - rxq->read) > 7)) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               rxq->need_update = 1;
+               spin_unlock_irqrestore(&rxq->lock, flags);
+               iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+       }
+}
+
+/**
+ * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       struct page *page;
+       unsigned long flags;
+       gfp_t gfp_mask = priority;
+
+       while (1) {
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (priv->hw_params.rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
+               /* Alloc a new receive buffer */
+               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+                           net_ratelimit())
+                               IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+                                        priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
+                                        rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
+                       break;
+               }
+
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       __free_pages(page, priv->hw_params.rx_page_order);
+                       return;
+               }
+               element = rxq->rx_used.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+               list_del(element);
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               rxb->page = page;
+               /* Get physical address of RB/SKB */
+               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               list_add_tail(&rxb->list, &rxq->rx_free);
+               rxq->free_count++;
+               priv->alloc_rxb_page++;
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+       }
+}
+
+void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       unsigned long flags;
+       int i;
+       spin_lock_irqsave(&rxq->lock, flags);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+               /* In the reset function, these buffers may have been allocated
+                * to an SKB, so we need to unmap and free potential storage */
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+       }
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void iwl3945_rx_replenish(void *data)
+{
+       struct iwl_priv *priv = data;
+       unsigned long flags;
+
+       iwl3945_rx_allocate(priv, GFP_KERNEL);
+
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl3945_rx_queue_restock(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
+{
+       iwl3945_rx_allocate(priv, GFP_ATOMIC);
+
+       iwl3945_rx_queue_restock(priv);
+}
+
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+       int i;
+       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+       }
+
+       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+       dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+                         rxq->rb_stts, rxq->rb_stts_dma);
+       rxq->bd = NULL;
+       rxq->rb_stts  = NULL;
+}
+
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/*      0   1   2   3   4   5   6   7   8   9 */
+        0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
+       20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
+       26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
+       29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
+       32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
+       34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
+       36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
+       37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
+       38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
+       39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ *   (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int iwl3945_calc_db_from_ratio(int sig_ratio)
+{
+       /* 1000:1 or higher just report as 60 dB */
+       if (sig_ratio >= 1000)
+               return 60;
+
+       /* 100:1 or higher, divide by 10 and use table,
+        *   add 20 dB to make up for divide by 10 */
+       if (sig_ratio >= 100)
+               return 20 + (int)ratio2dB[sig_ratio/10];
+
+       /* We shouldn't see this */
+       if (sig_ratio < 1)
+               return 0;
+
+       /* Use table for ratios 1:1 - 99:1 */
+       return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * iwl3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void iwl3945_rx_handle(struct iwl_priv *priv)
+{
+       struct iwl_rx_mem_buffer *rxb;
+       struct iwl_rx_packet *pkt;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       u32 r, i;
+       int reclaim;
+       unsigned long flags;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty = 0;
+
+       /* uCode's read index (stored in shared DRAM) indicates the last Rx
+        * buffer that the driver may process (last buffer filled by ucode). */
+       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
+       i = rxq->read;
+
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+       /* Rx interrupt, but nothing sent from uCode */
+       if (i == r)
+               IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+
+       while (i != r) {
+               int len;
+
+               rxb = rxq->queue[i];
+
+               /* If an RXB doesn't have a Rx queue slot associated with it,
+                * then a bug has been introduced in the queue refilling
+                * routines -- catch it here */
+               BUG_ON(rxb == NULL);
+
+               rxq->queue[i] = NULL;
+
+               pci_unmap_page(priv->pci_dev, rxb->page_dma,
+                              PAGE_SIZE << priv->hw_params.rx_page_order,
+                              PCI_DMA_FROMDEVICE);
+               pkt = rxb_addr(rxb);
+
+               len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+               len += sizeof(u32); /* account for status word */
+               trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
+
+               /* Reclaim a command buffer only if this packet is a response
+                *   to a (driver-originated) command.
+                * If the packet (e.g. Rx frame) originated from uCode,
+                *   there is no command buffer to reclaim.
+                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+                *   but apparently a few don't get set; catch them here. */
+               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+                       (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+                       (pkt->hdr.cmd != REPLY_TX);
+
+               /* Based on type of command response or notification,
+                *   handle those that need handling via function in
+                *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
+               if (priv->rx_handlers[pkt->hdr.cmd]) {
+                       IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
+                       iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+                       priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+                       priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+               } else {
+                       /* No handling needed */
+                       IWL_DEBUG_RX(priv,
+                               "r %d i %d No handler needed for %s, 0x%02x\n",
+                               r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+                               pkt->hdr.cmd);
+               }
+
+               /*
+                * XXX: After here, we should always check rxb->page
+                * against NULL before touching it or its virtual
+                * memory (pkt). Because some rx_handler might have
+                * already taken or freed the pages.
+                */
+
+               if (reclaim) {
+                       /* Invoke any callbacks, transfer the buffer to caller,
+                        * and fire off the (possibly) blocking iwl_legacy_send_cmd()
+                        * as we reclaim the driver command queue */
+                       if (rxb->page)
+                               iwl_legacy_tx_cmd_complete(priv, rxb);
+                       else
+                               IWL_WARN(priv, "Claim null rxb?\n");
+               }
+
+               /* Reuse the page if possible. For notification packets and
+                * SKBs that fail to Rx correctly, add them back into the
+                * rx_free list for reuse later. */
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (rxb->page != NULL) {
+                       rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+                               0, PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       list_add_tail(&rxb->list, &rxq->rx_free);
+                       rxq->free_count++;
+               } else
+                       list_add_tail(&rxb->list, &rxq->rx_used);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               i = (i + 1) & RX_QUEUE_MASK;
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode won't assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               iwl3945_rx_replenish_now(priv);
+                               count = 0;
+                       }
+               }
+       }
+
+       /* Backtrack one entry */
+       rxq->read = i;
+       if (fill_rx)
+               iwl3945_rx_replenish_now(priv);
+       else
+               iwl3945_rx_queue_restock(priv);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
+{
+       /* wait to make sure we flush pending tasklet*/
+       synchronize_irq(priv->pci_dev->irq);
+       tasklet_kill(&priv->irq_tasklet);
+}
+
+static const char *iwl3945_desc_lookup(int i)
+{
+       switch (i) {
+       case 1:
+               return "FAIL";
+       case 2:
+               return "BAD_PARAM";
+       case 3:
+               return "BAD_CHECKSUM";
+       case 4:
+               return "NMI_INTERRUPT";
+       case 5:
+               return "SYSASSERT";
+       case 6:
+               return "FATAL_ERROR";
+       }
+
+       return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
+{
+       u32 i;
+       u32 desc, time, count, base, data1;
+       u32 blink1, blink2, ilink1, ilink2;
+
+       base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+
+       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
+               return;
+       }
+
+
+       count = iwl_legacy_read_targ_mem(priv, base);
+
+       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+                       priv->status, count);
+       }
+
+       IWL_ERR(priv, "Desc       Time       asrtPC  blink2 "
+                 "ilink1  nmiPC   Line\n");
+       for (i = ERROR_START_OFFSET;
+            i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+            i += ERROR_ELEM_SIZE) {
+               desc = iwl_legacy_read_targ_mem(priv, base + i);
+               time =
+                   iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
+               blink1 =
+                   iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
+               blink2 =
+                   iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
+               ilink1 =
+                   iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
+               ilink2 =
+                   iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
+               data1 =
+                   iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
+
+               IWL_ERR(priv,
+                       "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+                       iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
+                       ilink1, ilink2, data1);
+               trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
+                                       0, blink1, blink2, ilink1, ilink2);
+       }
+}
+
+#define EVENT_START_OFFSET  (6 * sizeof(u32))
+
+/**
+ * iwl3945_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
+                                 u32 num_events, u32 mode,
+                                 int pos, char **buf, size_t bufsz)
+{
+       u32 i;
+       u32 base;       /* SRAM byte address of event log header */
+       u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+       u32 ptr;        /* SRAM byte address of log data */
+       u32 ev, time, data; /* event log data */
+       unsigned long reg_flags;
+
+       if (num_events == 0)
+               return pos;
+
+       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+
+       if (mode == 0)
+               event_size = 2 * sizeof(u32);
+       else
+               event_size = 3 * sizeof(u32);
+
+       ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+       /* Make sure device is powered up for SRAM reads */
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+
+       /* Set starting address; reads will auto-increment */
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+       rmb();
+
+       /* "time" is actually "data" for mode 0 (no timestamp).
+        * place event id # at far right for easier visual parsing. */
+       for (i = 0; i < num_events; i++) {
+               ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (mode == 0) {
+                       /* data, ev */
+                       if (bufsz) {
+                               pos += scnprintf(*buf + pos, bufsz - pos,
+                                               "0x%08x:%04u\n",
+                                               time, ev);
+                       } else {
+                               IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
+                                                             time, ev);
+                       }
+               } else {
+                       data = _iwl_legacy_read_direct32(priv,
+                                                       HBUS_TARG_MEM_RDAT);
+                       if (bufsz) {
+                               pos += scnprintf(*buf + pos, bufsz - pos,
+                                               "%010u:0x%08x:%04u\n",
+                                                time, data, ev);
+                       } else {
+                               IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
+                                       time, data, ev);
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, time,
+                                                             data, ev);
+                       }
+               }
+       }
+
+       /* Allow device to power down */
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return pos;
+}
+
+/**
+ * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+                                     u32 num_wraps, u32 next_entry,
+                                     u32 size, u32 mode,
+                                     int pos, char **buf, size_t bufsz)
+{
+       /*
+        * display the newest DEFAULT_LOG_ENTRIES entries
+        * i.e the entries just before the next ont that uCode would fill.
+        */
+       if (num_wraps) {
+               if (next_entry < size) {
+                       pos = iwl3945_print_event_log(priv,
+                                            capacity - (size - next_entry),
+                                            size - next_entry, mode,
+                                            pos, buf, bufsz);
+                       pos = iwl3945_print_event_log(priv, 0,
+                                                     next_entry, mode,
+                                                     pos, buf, bufsz);
+               } else
+                       pos = iwl3945_print_event_log(priv, next_entry - size,
+                                                     size, mode,
+                                                     pos, buf, bufsz);
+       } else {
+               if (next_entry < size)
+                       pos = iwl3945_print_event_log(priv, 0,
+                                                     next_entry, mode,
+                                                     pos, buf, bufsz);
+               else
+                       pos = iwl3945_print_event_log(priv, next_entry - size,
+                                                     size, mode,
+                                                     pos, buf, bufsz);
+       }
+       return pos;
+}
+
+#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+                           char **buf, bool display)
+{
+       u32 base;       /* SRAM byte address of event log header */
+       u32 capacity;   /* event log capacity in # entries */
+       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+       u32 num_wraps;  /* # times uCode wrapped to top of log */
+       u32 next_entry; /* index of next entry to be written by uCode */
+       u32 size;       /* # entries that we'll print */
+       int pos = 0;
+       size_t bufsz = 0;
+
+       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
+               return  -EINVAL;
+       }
+
+       /* event log header */
+       capacity = iwl_legacy_read_targ_mem(priv, base);
+       mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+       num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+       next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+       if (capacity > priv->cfg->base_params->max_event_log_size) {
+               IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
+                       capacity, priv->cfg->base_params->max_event_log_size);
+               capacity = priv->cfg->base_params->max_event_log_size;
+       }
+
+       if (next_entry > priv->cfg->base_params->max_event_log_size) {
+               IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
+                       next_entry, priv->cfg->base_params->max_event_log_size);
+               next_entry = priv->cfg->base_params->max_event_log_size;
+       }
+
+       size = num_wraps ? capacity : next_entry;
+
+       /* bail out if nothing in log */
+       if (size == 0) {
+               IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+               return pos;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+               size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
+                       ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+       size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
+               ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+
+       IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
+                 size);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (display) {
+               if (full_log)
+                       bufsz = capacity * 48;
+               else
+                       bufsz = size * 48;
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+       }
+       if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+               /* if uCode has wrapped back to top of log,
+                * start at the oldest entry,
+                * i.e the next one that uCode would fill.
+                */
+               if (num_wraps)
+                       pos = iwl3945_print_event_log(priv, next_entry,
+                                               capacity - next_entry, mode,
+                                               pos, buf, bufsz);
+
+               /* (then/else) start at top of log */
+               pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
+                                             pos, buf, bufsz);
+       } else
+               pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+                                                   next_entry, size, mode,
+                                                   pos, buf, bufsz);
+#else
+       pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+                                           next_entry, size, mode,
+                                           pos, buf, bufsz);
+#endif
+       return pos;
+}
+
+static void iwl3945_irq_tasklet(struct iwl_priv *priv)
+{
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       u32 inta_mask;
+#endif
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Ack/clear/reset pending uCode interrupts.
+        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+       inta = iwl_read32(priv, CSR_INT);
+       iwl_write32(priv, CSR_INT, inta);
+
+       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+        * Any new interrupts that happen after this, either while we're
+        * in this tasklet, or later, will show up in next ISR/tasklet. */
+       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+       iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
+               /* just for debug */
+               inta_mask = iwl_read32(priv, CSR_INT_MASK);
+               IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                             inta, inta_mask, inta_fh);
+       }
+#endif
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+        * atomic, make sure that inta covers all the interrupts that
+        * we've discovered, even if FH interrupt came in just after
+        * reading CSR_INT. */
+       if (inta_fh & CSR39_FH_INT_RX_MASK)
+               inta |= CSR_INT_BIT_FH_RX;
+       if (inta_fh & CSR39_FH_INT_TX_MASK)
+               inta |= CSR_INT_BIT_FH_TX;
+
+       /* Now service all interrupt bits discovered above. */
+       if (inta & CSR_INT_BIT_HW_ERR) {
+               IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
+
+               /* Tell the device to stop sending interrupts */
+               iwl_legacy_disable_interrupts(priv);
+
+               priv->isr_stats.hw++;
+               iwl_legacy_irq_handle_error(priv);
+
+               handled |= CSR_INT_BIT_HW_ERR;
+
+               return;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+               /* NIC fires this, but we don't use it, redundant with WAKEUP */
+               if (inta & CSR_INT_BIT_SCD) {
+                       IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+                                     "the frame/frames.\n");
+                       priv->isr_stats.sch++;
+               }
+
+               /* Alive notification via Rx interrupt will do the real work */
+               if (inta & CSR_INT_BIT_ALIVE) {
+                       IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+                       priv->isr_stats.alive++;
+               }
+       }
+#endif
+       /* Safely ignore these bits for debug checks below */
+       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+       /* Error detected by uCode */
+       if (inta & CSR_INT_BIT_SW_ERR) {
+               IWL_ERR(priv, "Microcode SW error detected. "
+                       "Restarting 0x%X.\n", inta);
+               priv->isr_stats.sw++;
+               iwl_legacy_irq_handle_error(priv);
+               handled |= CSR_INT_BIT_SW_ERR;
+       }
+
+       /* uCode wakes up after power-down sleep */
+       if (inta & CSR_INT_BIT_WAKEUP) {
+               IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+               iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
+               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
+
+               priv->isr_stats.wakeup++;
+               handled |= CSR_INT_BIT_WAKEUP;
+       }
+
+       /* All uCode command responses, including Tx command responses,
+        * Rx "responses" (frame-received notification), and other
+        * notifications from uCode come through here*/
+       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+               iwl3945_rx_handle(priv);
+               priv->isr_stats.rx++;
+               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+       }
+
+       if (inta & CSR_INT_BIT_FH_TX) {
+               IWL_DEBUG_ISR(priv, "Tx interrupt\n");
+               priv->isr_stats.tx++;
+
+               iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
+               iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
+                                       (FH39_SRVC_CHNL), 0x0);
+               handled |= CSR_INT_BIT_FH_TX;
+       }
+
+       if (inta & ~handled) {
+               IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+               priv->isr_stats.unhandled++;
+       }
+
+       if (inta & ~priv->inta_mask) {
+               IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+                        inta & ~priv->inta_mask);
+               IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
+       }
+
+       /* Re-enable all interrupts */
+       /* only Re-enable if disabled by irq */
+       if (test_bit(STATUS_INT_ENABLED, &priv->status))
+               iwl_legacy_enable_interrupts(priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+               inta = iwl_read32(priv, CSR_INT);
+               inta_mask = iwl_read32(priv, CSR_INT_MASK);
+               inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+               IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+                       "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+       }
+#endif
+}
+
+static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
+                                              struct ieee80211_vif *vif,
+                                              enum ieee80211_band band,
+                                              struct iwl3945_scan_channel *scan_ch)
+{
+       const struct ieee80211_supported_band *sband;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added = 0;
+       u8 channel = 0;
+
+       sband = iwl_get_hw_mode(priv, band);
+       if (!sband) {
+               IWL_ERR(priv, "invalid band\n");
+               return added;
+       }
+
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+
+       channel = iwl_legacy_get_single_channel_number(priv, band);
+
+       if (channel) {
+               scan_ch->channel = channel;
+               scan_ch->type = 0;      /* passive */
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+               /* Set txpower levels to defaults */
+               scan_ch->tpc.dsp_atten = 110;
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+               added++;
+       } else
+               IWL_ERR(priv, "no valid channel found\n");
+       return added;
+}
+
+static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
+                                        enum ieee80211_band band,
+                                    u8 is_active, u8 n_probes,
+                                    struct iwl3945_scan_channel *scan_ch,
+                                    struct ieee80211_vif *vif)
+{
+       struct ieee80211_channel *chan;
+       const struct ieee80211_supported_band *sband;
+       const struct iwl_channel_info *ch_info;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added, i;
+
+       sband = iwl_get_hw_mode(priv, band);
+       if (!sband)
+               return 0;
+
+       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+               chan = priv->scan_request->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               scan_ch->channel = chan->hw_value;
+
+               ch_info = iwl_legacy_get_channel_info(priv, band,
+                                                       scan_ch->channel);
+               if (!iwl_legacy_is_channel_valid(ch_info)) {
+                       IWL_DEBUG_SCAN(priv,
+                               "Channel %d is INVALID for this band.\n",
+                              scan_ch->channel);
+                       continue;
+               }
+
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+               /* If passive , set up for auto-switch
+                *  and use long active_dwell time.
+                */
+               if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
+                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+                       scan_ch->type = 0;      /* passive */
+                       if (IWL_UCODE_API(priv->ucode_ver) == 1)
+                               scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
+               } else {
+                       scan_ch->type = 1;      /* active */
+               }
+
+               /* Set direct probe bits. These may be used both for active
+                * scan channels (probes gets sent right away),
+                * or for passive channels (probes get se sent only after
+                * hearing clear Rx packet).*/
+               if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
+                       if (n_probes)
+                               scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
+               } else {
+                       /* uCode v1 does not allow setting direct probe bits on
+                        * passive channel. */
+                       if ((scan_ch->type & 1) && n_probes)
+                               scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
+               }
+
+               /* Set txpower levels to defaults */
+               scan_ch->tpc.dsp_atten = 110;
+               /* scan_pwr_info->tpc.dsp_atten; */
+
+               /*scan_pwr_info->tpc.tx_gain; */
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else {
+                       scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+                       /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+                        * power level:
+                        * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+                        */
+               }
+
+               IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
+                              scan_ch->channel,
+                              (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+                              (scan_ch->type & 1) ?
+                              active_dwell : passive_dwell);
+
+               scan_ch++;
+               added++;
+       }
+
+       IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+       return added;
+}
+
+static void iwl3945_init_hw_rates(struct iwl_priv *priv,
+                             struct ieee80211_rate *rates)
+{
+       int i;
+
+       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+               rates[i].bitrate = iwl3945_rates[i].ieee * 5;
+               rates[i].hw_value = i; /* Rate scaling will work on indexes */
+               rates[i].hw_value_short = i;
+               rates[i].flags = 0;
+               if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
+                       /*
+                        * If CCK != 1M then set short preamble rate flag.
+                        */
+                       rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
+                               0 : IEEE80211_RATE_SHORT_PREAMBLE;
+               }
+       }
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
+{
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+}
+
+/**
+ * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+       u32 val;
+       u32 save_len = len;
+       int rc = 0;
+       u32 errcnt;
+
+       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+       iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+                              IWL39_RTC_INST_LOWER_BOUND);
+
+       errcnt = 0;
+       for (; len > 0; len -= sizeof(u32), image++) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       IWL_ERR(priv, "uCode INST section is invalid at "
+                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                                 save_len - len, val, le32_to_cpu(*image));
+                       rc = -EIO;
+                       errcnt++;
+                       if (errcnt >= 20)
+                               break;
+               }
+       }
+
+
+       if (!errcnt)
+               IWL_DEBUG_INFO(priv,
+                       "ucode image in INSTRUCTION memory is good\n");
+
+       return rc;
+}
+
+
+/**
+ * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+       u32 val;
+       int rc = 0;
+       u32 errcnt = 0;
+       u32 i;
+
+       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IWL_DL_IO is set */
+               iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+                       i + IWL39_RTC_INST_LOWER_BOUND);
+               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+#if 0 /* Enable this if you want to see details */
+                       IWL_ERR(priv, "uCode INST section is invalid at "
+                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                                 i, val, *image);
+#endif
+                       rc = -EIO;
+                       errcnt++;
+                       if (errcnt >= 3)
+                               break;
+               }
+       }
+
+       return rc;
+}
+
+
+/**
+ * iwl3945_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+static int iwl3945_verify_ucode(struct iwl_priv *priv)
+{
+       __le32 *image;
+       u32 len;
+       int rc = 0;
+
+       /* Try bootstrap */
+       image = (__le32 *)priv->ucode_boot.v_addr;
+       len = priv->ucode_boot.len;
+       rc = iwl3945_verify_inst_sparse(priv, image, len);
+       if (rc == 0) {
+               IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try initialize */
+       image = (__le32 *)priv->ucode_init.v_addr;
+       len = priv->ucode_init.len;
+       rc = iwl3945_verify_inst_sparse(priv, image, len);
+       if (rc == 0) {
+               IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try runtime/protocol */
+       image = (__le32 *)priv->ucode_code.v_addr;
+       len = priv->ucode_code.len;
+       rc = iwl3945_verify_inst_sparse(priv, image, len);
+       if (rc == 0) {
+               IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+       /* Since nothing seems to match, show first several data entries in
+        * instruction SRAM, so maybe visual inspection will give a clue.
+        * Selection of bootstrap image (vs. other images) is arbitrary. */
+       image = (__le32 *)priv->ucode_boot.v_addr;
+       len = priv->ucode_boot.len;
+       rc = iwl3945_verify_inst_full(priv, image, len);
+
+       return rc;
+}
+
+static void iwl3945_nic_start(struct iwl_priv *priv)
+{
+       /* Remove all resets to allow NIC to operate */
+       iwl_write32(priv, CSR_RESET, 0);
+}
+
+#define IWL3945_UCODE_GET(item)                                                \
+static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
+{                                                                      \
+       return le32_to_cpu(ucode->v1.item);                             \
+}
+
+static u32 iwl3945_ucode_get_header_size(u32 api_ver)
+{
+       return 24;
+}
+
+static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
+{
+       return (u8 *) ucode->v1.data;
+}
+
+IWL3945_UCODE_GET(inst_size);
+IWL3945_UCODE_GET(data_size);
+IWL3945_UCODE_GET(init_size);
+IWL3945_UCODE_GET(init_data_size);
+IWL3945_UCODE_GET(boot_size);
+
+/**
+ * iwl3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int iwl3945_read_ucode(struct iwl_priv *priv)
+{
+       const struct iwl_ucode_header *ucode;
+       int ret = -EINVAL, index;
+       const struct firmware *ucode_raw;
+       /* firmware file name contains uCode/driver compatibility version */
+       const char *name_pre = priv->cfg->fw_name_pre;
+       const unsigned int api_max = priv->cfg->ucode_api_max;
+       const unsigned int api_min = priv->cfg->ucode_api_min;
+       char buf[25];
+       u8 *src;
+       size_t len;
+       u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+       /* Ask kernel firmware_class module to get the boot firmware off disk.
+        * request_firmware() is synchronous, file is in memory on return. */
+       for (index = api_max; index >= api_min; index--) {
+               sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
+               ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
+               if (ret < 0) {
+                       IWL_ERR(priv, "%s firmware file req failed: %d\n",
+                                 buf, ret);
+                       if (ret == -ENOENT)
+                               continue;
+                       else
+                               goto error;
+               } else {
+                       if (index < api_max)
+                               IWL_ERR(priv, "Loaded firmware %s, "
+                                       "which is deprecated. "
+                                       " Please use API v%u instead.\n",
+                                         buf, api_max);
+                       IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
+                                      "(%zd bytes) from disk\n",
+                                      buf, ucode_raw->size);
+                       break;
+               }
+       }
+
+       if (ret < 0)
+               goto error;
+
+       /* Make sure that we got at least our header! */
+       if (ucode_raw->size <  iwl3945_ucode_get_header_size(1)) {
+               IWL_ERR(priv, "File size way too small!\n");
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       /* Data from ucode file:  header followed by uCode images */
+       ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+       priv->ucode_ver = le32_to_cpu(ucode->ver);
+       api_ver = IWL_UCODE_API(priv->ucode_ver);
+       inst_size = iwl3945_ucode_get_inst_size(ucode);
+       data_size = iwl3945_ucode_get_data_size(ucode);
+       init_size = iwl3945_ucode_get_init_size(ucode);
+       init_data_size = iwl3945_ucode_get_init_data_size(ucode);
+       boot_size = iwl3945_ucode_get_boot_size(ucode);
+       src = iwl3945_ucode_get_data(ucode);
+
+       /* api_ver should match the api version forming part of the
+        * firmware filename ... but we don't check for that and only rely
+        * on the API version read from firmware header from here on forward */
+
+       if (api_ver < api_min || api_ver > api_max) {
+               IWL_ERR(priv, "Driver unable to support your firmware API. "
+                         "Driver supports v%u, firmware is v%u.\n",
+                         api_max, api_ver);
+               priv->ucode_ver = 0;
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (api_ver != api_max)
+               IWL_ERR(priv, "Firmware has old API version. Expected %u, "
+                         "got %u. New firmware can be obtained "
+                         "from http://www.intellinuxwireless.org.\n",
+                         api_max, api_ver);
+
+       IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
+               IWL_UCODE_MAJOR(priv->ucode_ver),
+               IWL_UCODE_MINOR(priv->ucode_ver),
+               IWL_UCODE_API(priv->ucode_ver),
+               IWL_UCODE_SERIAL(priv->ucode_ver));
+
+       snprintf(priv->hw->wiphy->fw_version,
+                sizeof(priv->hw->wiphy->fw_version),
+                "%u.%u.%u.%u",
+                IWL_UCODE_MAJOR(priv->ucode_ver),
+                IWL_UCODE_MINOR(priv->ucode_ver),
+                IWL_UCODE_API(priv->ucode_ver),
+                IWL_UCODE_SERIAL(priv->ucode_ver));
+
+       IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+                      priv->ucode_ver);
+       IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
+                      inst_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
+                      data_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
+                      init_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
+                      init_data_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
+                      boot_size);
+
+
+       /* Verify size of file vs. image size info in file's header */
+       if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
+               inst_size + data_size + init_size +
+               init_data_size + boot_size) {
+
+               IWL_DEBUG_INFO(priv,
+                       "uCode file size %zd does not match expected size\n",
+                       ucode_raw->size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       /* Verify that uCode images will fit in card's SRAM */
+       if (inst_size > IWL39_MAX_INST_SIZE) {
+               IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
+                              inst_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       if (data_size > IWL39_MAX_DATA_SIZE) {
+               IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
+                              data_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (init_size > IWL39_MAX_INST_SIZE) {
+               IWL_DEBUG_INFO(priv,
+                               "uCode init instr len %d too large to fit in\n",
+                               init_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (init_data_size > IWL39_MAX_DATA_SIZE) {
+               IWL_DEBUG_INFO(priv,
+                               "uCode init data len %d too large to fit in\n",
+                               init_data_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (boot_size > IWL39_MAX_BSM_SIZE) {
+               IWL_DEBUG_INFO(priv,
+                               "uCode boot instr len %d too large to fit in\n",
+                               boot_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       /* Allocate ucode buffers for card's bus-master loading ... */
+
+       /* Runtime instructions and 2 copies of data:
+        * 1) unmodified from disk
+        * 2) backup cache for save/restore during power-downs */
+       priv->ucode_code.len = inst_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+
+       priv->ucode_data.len = data_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+
+       priv->ucode_data_backup.len = data_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+
+       if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
+           !priv->ucode_data_backup.v_addr)
+               goto err_pci_alloc;
+
+       /* Initialization instructions and data */
+       if (init_size && init_data_size) {
+               priv->ucode_init.len = init_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+
+               priv->ucode_init_data.len = init_data_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+
+               if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Bootstrap (instructions only, no data) */
+       if (boot_size) {
+               priv->ucode_boot.len = boot_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+
+               if (!priv->ucode_boot.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Copy images into buffers for card's bus-master reads ... */
+
+       /* Runtime instructions (first block of data in file) */
+       len = inst_size;
+       IWL_DEBUG_INFO(priv,
+               "Copying (but not loading) uCode instr len %zd\n", len);
+       memcpy(priv->ucode_code.v_addr, src, len);
+       src += len;
+
+       IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+               priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
+
+       /* Runtime data (2nd block)
+        * NOTE:  Copy into backup buffer will be done in iwl3945_up()  */
+       len = data_size;
+       IWL_DEBUG_INFO(priv,
+               "Copying (but not loading) uCode data len %zd\n", len);
+       memcpy(priv->ucode_data.v_addr, src, len);
+       memcpy(priv->ucode_data_backup.v_addr, src, len);
+       src += len;
+
+       /* Initialization instructions (3rd block) */
+       if (init_size) {
+               len = init_size;
+               IWL_DEBUG_INFO(priv,
+                       "Copying (but not loading) init instr len %zd\n", len);
+               memcpy(priv->ucode_init.v_addr, src, len);
+               src += len;
+       }
+
+       /* Initialization data (4th block) */
+       if (init_data_size) {
+               len = init_data_size;
+               IWL_DEBUG_INFO(priv,
+                       "Copying (but not loading) init data len %zd\n", len);
+               memcpy(priv->ucode_init_data.v_addr, src, len);
+               src += len;
+       }
+
+       /* Bootstrap instructions (5th block) */
+       len = boot_size;
+       IWL_DEBUG_INFO(priv,
+               "Copying (but not loading) boot instr len %zd\n", len);
+       memcpy(priv->ucode_boot.v_addr, src, len);
+
+       /* We have our copies now, allow OS release its copies */
+       release_firmware(ucode_raw);
+       return 0;
+
+ err_pci_alloc:
+       IWL_ERR(priv, "failed to allocate pci memory\n");
+       ret = -ENOMEM;
+       iwl3945_dealloc_ucode_pci(priv);
+
+ err_release:
+       release_firmware(ucode_raw);
+
+ error:
+       return ret;
+}
+
+
+/**
+ * iwl3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
+{
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+
+       /* bits 31:0 for 3945 */
+       pinst = priv->ucode_code.p_addr;
+       pdata = priv->ucode_data_backup.p_addr;
+
+       /* Tell bootstrap uCode where to find image to load */
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+                                priv->ucode_data.len);
+
+       /* Inst byte count must be last to set up, bit 31 signals uCode
+        *   that all new ptr/size info is in place */
+       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+                                priv->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+       IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
+
+       return 0;
+}
+
+/**
+ * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
+ *
+ * Called after REPLY_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void iwl3945_init_alive_start(struct iwl_priv *priv)
+{
+       /* Check alive response for "valid" sign from uCode */
+       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
+               goto restart;
+       }
+
+       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "initialize" alive if code weren't properly loaded.  */
+       if (iwl3945_verify_ucode(priv)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
+               goto restart;
+       }
+
+       /* Send pointers to protocol/runtime uCode image ... init code will
+        * load and launch runtime uCode, which will send us another "Alive"
+        * notification. */
+       IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+       if (iwl3945_set_ucode_ptrs(priv)) {
+               /* Runtime instruction load won't happen;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
+               goto restart;
+       }
+       return;
+
+ restart:
+       queue_work(priv->workqueue, &priv->restart);
+}
+
+/**
+ * iwl3945_alive_start - called after REPLY_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by iwl3945_init_alive_start()).
+ */
+static void iwl3945_alive_start(struct iwl_priv *priv)
+{
+       int thermal_spin = 0;
+       u32 rfkill;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+
+       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Alive failed.\n");
+               goto restart;
+       }
+
+       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "runtime" alive if code weren't properly loaded.  */
+       if (iwl3945_verify_ucode(priv)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
+               goto restart;
+       }
+
+       rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
+       IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
+
+       if (rfkill & 0x1) {
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+               /* if RFKILL is not on, then wait for thermal
+                * sensor in adapter to kick in */
+               while (iwl3945_hw_get_temperature(priv) == 0) {
+                       thermal_spin++;
+                       udelay(10);
+               }
+
+               if (thermal_spin)
+                       IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
+                                      thermal_spin * 10);
+       } else
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       /* After the ALIVE response, we can send commands to 3945 uCode */
+       set_bit(STATUS_ALIVE, &priv->status);
+
+       /* Enable watchdog to monitor the driver tx queues */
+       iwl_legacy_setup_watchdog(priv);
+
+       if (iwl_legacy_is_rfkill(priv))
+               return;
+
+       ieee80211_wake_queues(priv->hw);
+
+       priv->active_rate = IWL_RATES_MASK_3945;
+
+       iwl_legacy_power_update_mode(priv, true);
+
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
+               struct iwl3945_rxon_cmd *active_rxon =
+                               (struct iwl3945_rxon_cmd *)(&ctx->active);
+
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       } else {
+               /* Initialize our rx_config data */
+               iwl_legacy_connection_init_rx_config(priv, ctx);
+       }
+
+       /* Configure Bluetooth device coexistence support */
+       iwl_legacy_send_bt_config(priv);
+
+       set_bit(STATUS_READY, &priv->status);
+
+       /* Configure the adapter for unassociated operation */
+       iwl3945_commit_rxon(priv, ctx);
+
+       iwl3945_reg_txpower_periodic(priv);
+
+       IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+       wake_up_interruptible(&priv->wait_command_queue);
+
+       return;
+
+ restart:
+       queue_work(priv->workqueue, &priv->restart);
+}
+
+static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
+
+static void __iwl3945_down(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       int exit_pending;
+
+       IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+
+       exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
+        * to prevent rearm timer */
+       del_timer_sync(&priv->watchdog);
+
+       /* Station information will now be cleared in device */
+       iwl_legacy_clear_ucode_stations(priv, NULL);
+       iwl_legacy_dealloc_bcast_stations(priv);
+       iwl_legacy_clear_driver_stations(priv);
+
+       /* Unblock any waiting calls */
+       wake_up_interruptible_all(&priv->wait_command_queue);
+
+       /* Wipe out the EXIT_PENDING status bit if we are not actually
+        * exiting the module */
+       if (!exit_pending)
+               clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* stop and reset the on-board processor */
+       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /* tell the device to stop sending interrupts */
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       iwl3945_synchronize_irq(priv);
+
+       if (priv->mac80211_registered)
+               ieee80211_stop_queues(priv->hw);
+
+       /* If we have not previously called iwl3945_init() then
+        * clear all bits but the RF Kill bits and return */
+       if (!iwl_legacy_is_init(priv)) {
+               priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+                                       STATUS_RF_KILL_HW |
+                              test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+                                       STATUS_GEO_CONFIGURED |
+                               test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+                                       STATUS_EXIT_PENDING;
+               goto exit;
+       }
+
+       /* ...otherwise clear out all the status bits but the RF Kill
+        * bit and continue taking the NIC down. */
+       priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+                               STATUS_RF_KILL_HW |
+                       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+                               STATUS_GEO_CONFIGURED |
+                       test_bit(STATUS_FW_ERROR, &priv->status) <<
+                               STATUS_FW_ERROR |
+                       test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+                               STATUS_EXIT_PENDING;
+
+       iwl3945_hw_txq_ctx_stop(priv);
+       iwl3945_hw_rxq_stop(priv);
+
+       /* Power-down device's busmaster DMA clocks */
+       iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(5);
+
+       /* Stop the device, and put it in low power state */
+       iwl_legacy_apm_stop(priv);
+
+ exit:
+       memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
+
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+       priv->beacon_skb = NULL;
+
+       /* clear out any free frames */
+       iwl3945_clear_free_frames(priv);
+}
+
+static void iwl3945_down(struct iwl_priv *priv)
+{
+       mutex_lock(&priv->mutex);
+       __iwl3945_down(priv);
+       mutex_unlock(&priv->mutex);
+
+       iwl3945_cancel_deferred_work(priv);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       unsigned long flags;
+       u8 sta_id;
+
+       spin_lock_irqsave(&priv->sta_lock, flags);
+       sta_id = iwl_legacy_prep_station(priv, ctx,
+                                       iwlegacy_bcast_addr, false, NULL);
+       if (sta_id == IWL_INVALID_STATION) {
+               IWL_ERR(priv, "Unable to prepare broadcast station\n");
+               spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+               return -EINVAL;
+       }
+
+       priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+       priv->stations[sta_id].used |= IWL_STA_BCAST;
+       spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+       return 0;
+}
+
+static int __iwl3945_up(struct iwl_priv *priv)
+{
+       int rc, i;
+
+       rc = iwl3945_alloc_bcast_station(priv);
+       if (rc)
+               return rc;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+               return -EIO;
+       }
+
+       if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+               IWL_ERR(priv, "ucode not available for device bring up\n");
+               return -EIO;
+       }
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (iwl_read32(priv, CSR_GP_CNTRL) &
+                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+       else {
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+               IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
+               return -ENODEV;
+       }
+
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+
+       rc = iwl3945_hw_nic_init(priv);
+       if (rc) {
+               IWL_ERR(priv, "Unable to int nic\n");
+               return rc;
+       }
+
+       /* make sure rfkill handshake bits are cleared */
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       /* clear (again), then enable host interrupts */
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+       iwl_legacy_enable_interrupts(priv);
+
+       /* really make sure rfkill handshake bits are cleared */
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+       /* Copy original ucode data image from disk into backup cache.
+        * This will be used to initialize the on-board processor's
+        * data SRAM for a clean start when the runtime program first loads. */
+       memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
+              priv->ucode_data.len);
+
+       /* We return success when we resume from suspend and rf_kill is on. */
+       if (test_bit(STATUS_RF_KILL_HW, &priv->status))
+               return 0;
+
+       for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+               /* load bootstrap state machine,
+                * load bootstrap program into processor's memory,
+                * prepare to load the "initialize" uCode */
+               rc = priv->cfg->ops->lib->load_ucode(priv);
+
+               if (rc) {
+                       IWL_ERR(priv,
+                               "Unable to set up bootstrap uCode: %d\n", rc);
+                       continue;
+               }
+
+               /* start card; "initialize" will load runtime ucode */
+               iwl3945_nic_start(priv);
+
+               IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
+
+               return 0;
+       }
+
+       set_bit(STATUS_EXIT_PENDING, &priv->status);
+       __iwl3945_down(priv);
+       clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* tried to restart and config the device for as long as our
+        * patience could withstand */
+       IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
+       return -EIO;
+}
+
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void iwl3945_bg_init_alive_start(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, init_alive_start.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl3945_init_alive_start(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl3945_bg_alive_start(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, alive_start.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl3945_alive_start(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change.  This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void iwl3945_rfkill_poll(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
+       bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
+       bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
+                       & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+       if (new_rfkill != old_rfkill) {
+               if (new_rfkill)
+                       set_bit(STATUS_RF_KILL_HW, &priv->status);
+               else
+                       clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
+
+               IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
+                               new_rfkill ? "disable radio" : "enable radio");
+       }
+
+       /* Keep this running, even if radio now enabled.  This will be
+        * cancelled in mac_start() if system decides to start again */
+       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
+                          round_jiffies_relative(2 * HZ));
+
+}
+
+int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_SCAN_CMD,
+               .len = sizeof(struct iwl3945_scan_cmd),
+               .flags = CMD_SIZE_HUGE,
+       };
+       struct iwl3945_scan_cmd *scan;
+       u8 n_probes = 0;
+       enum ieee80211_band band;
+       bool is_active = false;
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->scan_cmd) {
+               priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
+                                        IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+               if (!priv->scan_cmd) {
+                       IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
+                       return -ENOMEM;
+               }
+       }
+       scan = priv->scan_cmd;
+       memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+       scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+       scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
+               u16 interval = 0;
+               u32 extra;
+               u32 suspend_time = 100;
+               u32 scan_suspend_time = 100;
+
+               IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+
+               if (priv->is_internal_short_scan)
+                       interval = 0;
+               else
+                       interval = vif->bss_conf.beacon_int;
+
+               scan->suspend_time = 0;
+               scan->max_out_time = cpu_to_le32(200 * 1024);
+               if (!interval)
+                       interval = suspend_time;
+               /*
+                * suspend time format:
+                *  0-19: beacon interval in usec (time before exec.)
+                * 20-23: 0
+                * 24-31: number of beacons (suspend between channels)
+                */
+
+               extra = (suspend_time / interval) << 24;
+               scan_suspend_time = 0xFF0FFFFF &
+                   (extra | ((suspend_time % interval) * 1024));
+
+               scan->suspend_time = cpu_to_le32(scan_suspend_time);
+               IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+                              scan_suspend_time, interval);
+       }
+
+       if (priv->is_internal_short_scan) {
+               IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+       } else if (priv->scan_request->n_ssids) {
+               int i, p = 0;
+               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+               for (i = 0; i < priv->scan_request->n_ssids; i++) {
+                       /* always does wildcard anyway */
+                       if (!priv->scan_request->ssids[i].ssid_len)
+                               continue;
+                       scan->direct_scan[p].id = WLAN_EID_SSID;
+                       scan->direct_scan[p].len =
+                               priv->scan_request->ssids[i].ssid_len;
+                       memcpy(scan->direct_scan[p].ssid,
+                              priv->scan_request->ssids[i].ssid,
+                              priv->scan_request->ssids[i].ssid_len);
+                       n_probes++;
+                       p++;
+               }
+               is_active = true;
+       } else
+               IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
+
+       /* We don't build a direct scan probe request; the uCode will do
+        * that based on the direct_mask added to each channel entry */
+       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+       scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
+       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       /* flags + rate selection */
+
+       switch (priv->scan_band) {
+       case IEEE80211_BAND_2GHZ:
+               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+               scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
+               band = IEEE80211_BAND_2GHZ;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
+               band = IEEE80211_BAND_5GHZ;
+               break;
+       default:
+               IWL_WARN(priv, "Invalid scan band\n");
+               return -EIO;
+       }
+
+       /*
+        * If active scaning is requested but a certain channel
+        * is marked passive, we can do active scanning if we
+        * detect transmissions.
+        */
+       scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+                                       IWL_GOOD_CRC_TH_DISABLED;
+
+       if (!priv->is_internal_short_scan) {
+               scan->tx_cmd.len = cpu_to_le16(
+                       iwl_legacy_fill_probe_req(priv,
+                               (struct ieee80211_mgmt *)scan->data,
+                               vif->addr,
+                               priv->scan_request->ie,
+                               priv->scan_request->ie_len,
+                               IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+       } else {
+               /* use bcast addr, will not be transmitted but must be valid */
+               scan->tx_cmd.len = cpu_to_le16(
+                       iwl_legacy_fill_probe_req(priv,
+                               (struct ieee80211_mgmt *)scan->data,
+                               iwlegacy_bcast_addr, NULL, 0,
+                               IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+       }
+       /* select Rx antennas */
+       scan->flags |= iwl3945_get_antenna_flags(priv);
+
+       if (priv->is_internal_short_scan) {
+               scan->channel_count =
+                       iwl3945_get_single_channel_for_scan(priv, vif, band,
+                               (void *)&scan->data[le16_to_cpu(
+                               scan->tx_cmd.len)]);
+       } else {
+               scan->channel_count =
+                       iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
+                               (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
+       }
+
+       if (scan->channel_count == 0) {
+               IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+               return -EIO;
+       }
+
+       cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+           scan->channel_count * sizeof(struct iwl3945_scan_channel);
+       cmd.data = scan;
+       scan->len = cpu_to_le16(cmd.len);
+
+       set_bit(STATUS_SCAN_HW, &priv->status);
+       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+       if (ret)
+               clear_bit(STATUS_SCAN_HW, &priv->status);
+       return ret;
+}
+
+void iwl3945_post_scan(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       /*
+        * Since setting the RXON may have been deferred while
+        * performing the scan, fire one off if needed
+        */
+       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+               iwl3945_commit_rxon(priv, ctx);
+}
+
+static void iwl3945_bg_restart(struct work_struct *data)
+{
+       struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+               struct iwl_rxon_context *ctx;
+               mutex_lock(&priv->mutex);
+               for_each_context(priv, ctx)
+                       ctx->vif = NULL;
+               priv->is_open = 0;
+               mutex_unlock(&priv->mutex);
+               iwl3945_down(priv);
+               ieee80211_restart_hw(priv->hw);
+       } else {
+               iwl3945_down(priv);
+
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       return;
+
+               mutex_lock(&priv->mutex);
+               __iwl3945_up(priv);
+               mutex_unlock(&priv->mutex);
+       }
+}
+
+static void iwl3945_bg_rx_replenish(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, rx_replenish);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl3945_rx_replenish(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+void iwl3945_post_associate(struct iwl_priv *priv)
+{
+       int rc = 0;
+       struct ieee80211_conf *conf = NULL;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (!ctx->vif || !priv->is_open)
+               return;
+
+       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+                       ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+
+       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwl3945_commit_rxon(priv, ctx);
+
+       rc = iwl_legacy_send_rxon_timing(priv, ctx);
+       if (rc)
+               IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
+                           "Attempting to continue.\n");
+
+       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+       ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
+                       ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
+
+       if (ctx->vif->bss_conf.use_short_preamble)
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+               if (ctx->vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+       }
+
+       iwl3945_commit_rxon(priv, ctx);
+
+       switch (ctx->vif->type) {
+       case NL80211_IFTYPE_STATION:
+               iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               iwl3945_send_beacon_cmd(priv);
+               break;
+       default:
+               IWL_ERR(priv, "%s Should not be called in %d mode\n",
+                       __func__, ctx->vif->type);
+               break;
+       }
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT    (2 * HZ)
+
+static int iwl3945_mac_start(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       /* we should be verifying the device is ready to be opened */
+       mutex_lock(&priv->mutex);
+
+       /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+        * ucode filename and max sizes are card-specific. */
+
+       if (!priv->ucode_code.len) {
+               ret = iwl3945_read_ucode(priv);
+               if (ret) {
+                       IWL_ERR(priv, "Could not read microcode: %d\n", ret);
+                       mutex_unlock(&priv->mutex);
+                       goto out_release_irq;
+               }
+       }
+
+       ret = __iwl3945_up(priv);
+
+       mutex_unlock(&priv->mutex);
+
+       if (ret)
+               goto out_release_irq;
+
+       IWL_DEBUG_INFO(priv, "Start UP work.\n");
+
+       /* Wait for START_ALIVE from ucode. Otherwise callbacks from
+        * mac80211 will not be run successfully. */
+       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+                       test_bit(STATUS_READY, &priv->status),
+                       UCODE_READY_TIMEOUT);
+       if (!ret) {
+               if (!test_bit(STATUS_READY, &priv->status)) {
+                       IWL_ERR(priv,
+                               "Wait for START_ALIVE timeout after %dms.\n",
+                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
+                       ret = -ETIMEDOUT;
+                       goto out_release_irq;
+               }
+       }
+
+       /* ucode is running and will send rfkill notifications,
+        * no need to poll the killswitch state anymore */
+       cancel_delayed_work(&priv->_3945.rfkill_poll);
+
+       priv->is_open = 1;
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+
+out_release_irq:
+       priv->is_open = 0;
+       IWL_DEBUG_MAC80211(priv, "leave - failed\n");
+       return ret;
+}
+
+static void iwl3945_mac_stop(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!priv->is_open) {
+               IWL_DEBUG_MAC80211(priv, "leave - skip\n");
+               return;
+       }
+
+       priv->is_open = 0;
+
+       iwl3945_down(priv);
+
+       flush_workqueue(priv->workqueue);
+
+       /* start polling the killswitch state again */
+       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
+                          round_jiffies_relative(2 * HZ));
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+       if (iwl3945_tx_skb(priv, skb))
+               dev_kfree_skb_any(skb);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl3945_config_ap(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif = ctx->vif;
+       int rc = 0;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       /* The following should be done only at AP bring up */
+       if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
+
+               /* RXON - unassoc (to set timing command) */
+               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+               iwl3945_commit_rxon(priv, ctx);
+
+               /* RXON Timing */
+               rc = iwl_legacy_send_rxon_timing(priv, ctx);
+               if (rc)
+                       IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
+                                       "Attempting to continue.\n");
+
+               ctx->staging.assoc_id = 0;
+
+               if (vif->bss_conf.use_short_preamble)
+                       ctx->staging.flags |=
+                               RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &=
+                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+                       if (vif->bss_conf.use_short_slot)
+                               ctx->staging.flags |=
+                                       RXON_FLG_SHORT_SLOT_MSK;
+                       else
+                               ctx->staging.flags &=
+                                       ~RXON_FLG_SHORT_SLOT_MSK;
+               }
+               /* restore RXON assoc */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               iwl3945_commit_rxon(priv, ctx);
+       }
+       iwl3945_send_beacon_cmd(priv);
+}
+
+static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_sta *sta,
+                              struct ieee80211_key_conf *key)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret = 0;
+       u8 sta_id = IWL_INVALID_STATION;
+       u8 static_key;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (iwl3945_mod_params.sw_crypto) {
+               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       /*
+        * To support IBSS RSN, don't program group keys in IBSS, the
+        * hardware will then not attempt to decrypt the frames.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
+       static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
+
+       if (!static_key) {
+               sta_id = iwl_legacy_sta_id_or_broadcast(
+                               priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
+               if (sta_id == IWL_INVALID_STATION)
+                       return -EINVAL;
+       }
+
+       mutex_lock(&priv->mutex);
+       iwl_legacy_scan_cancel_timeout(priv, 100);
+
+       switch (cmd) {
+       case SET_KEY:
+               if (static_key)
+                       ret = iwl3945_set_static_key(priv, key);
+               else
+                       ret = iwl3945_set_dynamic_key(priv, key, sta_id);
+               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+               break;
+       case DISABLE_KEY:
+               if (static_key)
+                       ret = iwl3945_remove_static_key(priv);
+               else
+                       ret = iwl3945_clear_sta_key_info(priv, sta_id);
+               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&priv->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+       int ret;
+       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+       u8 sta_id;
+
+       IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+                       sta->addr);
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+                       sta->addr);
+       sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+
+       ret = iwl_legacy_add_station_common(priv,
+                               &priv->contexts[IWL_RXON_CTX_BSS],
+                                    sta->addr, is_ap, sta, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+                       sta->addr, ret);
+               /* Should we return success if return code is EEXIST ? */
+               mutex_unlock(&priv->mutex);
+               return ret;
+       }
+
+       sta_priv->common.sta_id = sta_id;
+
+       /* Initialize rate scaling */
+       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+                      sta->addr);
+       iwl3945_rs_rate_init(priv, sta, sta_id);
+       mutex_unlock(&priv->mutex);
+
+       return 0;
+}
+
+static void iwl3945_configure_filter(struct ieee80211_hw *hw,
+                                    unsigned int changed_flags,
+                                    unsigned int *total_flags,
+                                    u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       ctx->staging.filter_flags &= ~filter_nand;
+       ctx->staging.filter_flags |= filter_or;
+
+       /*
+        * Not committing directly because hardware can perform a scan,
+        * but even if hw is ready, committing here breaks for some reason,
+        * we'll eventually commit the filter flags change anyway.
+        */
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_legacy_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t iwl3945_show_debug_level(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
+}
+static ssize_t iwl3945_store_debug_level(struct device *d,
+                               struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret)
+               IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
+       else {
+               priv->debug_level = val;
+               if (iwl_legacy_alloc_traffic_mem(priv))
+                       IWL_ERR(priv,
+                               "Not enough memory to generate traffic log\n");
+       }
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+                       iwl3945_show_debug_level, iwl3945_store_debug_level);
+
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+static ssize_t iwl3945_show_temperature(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
+
+static ssize_t iwl3945_show_tx_power(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
+}
+
+static ssize_t iwl3945_store_tx_power(struct device *d,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       char *p = (char *)buf;
+       u32 val;
+
+       val = simple_strtoul(p, &p, 10);
+       if (p == buf)
+               IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
+       else
+               iwl3945_hw_reg_set_txpower(priv, val);
+
+       return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
+
+static ssize_t iwl3945_show_flags(struct device *d,
+                         struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t iwl3945_store_flags(struct device *d,
+                          struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       u32 flags = simple_strtoul(buf, NULL, 0);
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       mutex_lock(&priv->mutex);
+       if (le32_to_cpu(ctx->staging.flags) != flags) {
+               /* Cancel any currently running scans... */
+               if (iwl_legacy_scan_cancel_timeout(priv, 100))
+                       IWL_WARN(priv, "Could not cancel scan.\n");
+               else {
+                       IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
+                                      flags);
+                       ctx->staging.flags = cpu_to_le32(flags);
+                       iwl3945_commit_rxon(priv, ctx);
+               }
+       }
+       mutex_unlock(&priv->mutex);
+
+       return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
+
+static ssize_t iwl3945_show_filter_flags(struct device *d,
+                                struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       return sprintf(buf, "0x%04X\n",
+               le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t iwl3945_store_filter_flags(struct device *d,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+       mutex_lock(&priv->mutex);
+       if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+               /* Cancel any currently running scans... */
+               if (iwl_legacy_scan_cancel_timeout(priv, 100))
+                       IWL_WARN(priv, "Could not cancel scan.\n");
+               else {
+                       IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
+                                      "0x%04X\n", filter_flags);
+                       ctx->staging.filter_flags =
+                               cpu_to_le32(filter_flags);
+                       iwl3945_commit_rxon(priv, ctx);
+               }
+       }
+       mutex_unlock(&priv->mutex);
+
+       return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
+                  iwl3945_store_filter_flags);
+
+static ssize_t iwl3945_show_measurement(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       struct iwl_spectrum_notification measure_report;
+       u32 size = sizeof(measure_report), len = 0, ofs = 0;
+       u8 *data = (u8 *)&measure_report;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (!(priv->measurement_status & MEASUREMENT_READY)) {
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return 0;
+       }
+       memcpy(&measure_report, &priv->measure_report, size);
+       priv->measurement_status = 0;
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       while (size && (PAGE_SIZE - len)) {
+               hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+                                  PAGE_SIZE - len, 1);
+               len = strlen(buf);
+               if (PAGE_SIZE - len)
+                       buf[len++] = '\n';
+
+               ofs += 16;
+               size -= min(size, 16U);
+       }
+
+       return len;
+}
+
+static ssize_t iwl3945_store_measurement(struct device *d,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_measurement_params params = {
+               .channel = le16_to_cpu(ctx->active.channel),
+               .start_time = cpu_to_le64(priv->_3945.last_tsf),
+               .duration = cpu_to_le16(1),
+       };
+       u8 type = IWL_MEASURE_BASIC;
+       u8 buffer[32];
+       u8 channel;
+
+       if (count) {
+               char *p = buffer;
+               strncpy(buffer, buf, min(sizeof(buffer), count));
+               channel = simple_strtoul(p, NULL, 0);
+               if (channel)
+                       params.channel = channel;
+
+               p = buffer;
+               while (*p && *p != ' ')
+                       p++;
+               if (*p)
+                       type = simple_strtoul(p + 1, NULL, 0);
+       }
+
+       IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
+                      "channel %d (for '%s')\n", type, params.channel, buf);
+       iwl3945_get_measurement(priv, &params, type);
+
+       return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
+                  iwl3945_show_measurement, iwl3945_store_measurement);
+
+static ssize_t iwl3945_store_retry_rate(struct device *d,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       priv->retry_rate = simple_strtoul(buf, NULL, 0);
+       if (priv->retry_rate <= 0)
+               priv->retry_rate = 1;
+
+       return count;
+}
+
+static ssize_t iwl3945_show_retry_rate(struct device *d,
+                              struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       return sprintf(buf, "%d", priv->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
+                  iwl3945_store_retry_rate);
+
+
+static ssize_t iwl3945_show_channels(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       /* all this shit doesn't belong into sysfs anyway */
+       return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
+
+static ssize_t iwl3945_show_antenna(struct device *d,
+                           struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
+}
+
+static ssize_t iwl3945_store_antenna(struct device *d,
+                            struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
+       int ant;
+
+       if (count == 0)
+               return 0;
+
+       if (sscanf(buf, "%1i", &ant) != 1) {
+               IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
+               return count;
+       }
+
+       if ((ant >= 0) && (ant <= 2)) {
+               IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
+               iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
+       } else
+               IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
+
+
+       return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
+
+static ssize_t iwl3945_show_status(struct device *d,
+                          struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+       return sprintf(buf, "0x%08x\n", (int)priv->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
+
+static ssize_t iwl3945_dump_error_log(struct device *d,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       char *p = (char *)buf;
+
+       if (p[0] == '1')
+               iwl3945_dump_nic_error_log(priv);
+
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
+{
+       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+       init_waitqueue_head(&priv->wait_command_queue);
+
+       INIT_WORK(&priv->restart, iwl3945_bg_restart);
+       INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
+       INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
+       INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
+       INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
+
+       iwl_legacy_setup_scan_deferred_work(priv);
+
+       iwl3945_hw_setup_deferred_work(priv);
+
+       init_timer(&priv->watchdog);
+       priv->watchdog.data = (unsigned long)priv;
+       priv->watchdog.function = iwl_legacy_bg_watchdog;
+
+       tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+                    iwl3945_irq_tasklet, (unsigned long)priv);
+}
+
+static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
+{
+       iwl3945_hw_cancel_deferred_work(priv);
+
+       cancel_delayed_work_sync(&priv->init_alive_start);
+       cancel_delayed_work(&priv->alive_start);
+
+       iwl_legacy_cancel_scan_deferred_work(priv);
+}
+
+static struct attribute *iwl3945_sysfs_entries[] = {
+       &dev_attr_antenna.attr,
+       &dev_attr_channels.attr,
+       &dev_attr_dump_errors.attr,
+       &dev_attr_flags.attr,
+       &dev_attr_filter_flags.attr,
+       &dev_attr_measurement.attr,
+       &dev_attr_retry_rate.attr,
+       &dev_attr_status.attr,
+       &dev_attr_temperature.attr,
+       &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       &dev_attr_debug_level.attr,
+#endif
+       NULL
+};
+
+static struct attribute_group iwl3945_attribute_group = {
+       .name = NULL,           /* put in device directory */
+       .attrs = iwl3945_sysfs_entries,
+};
+
+struct ieee80211_ops iwl3945_hw_ops = {
+       .tx = iwl3945_mac_tx,
+       .start = iwl3945_mac_start,
+       .stop = iwl3945_mac_stop,
+       .add_interface = iwl_legacy_mac_add_interface,
+       .remove_interface = iwl_legacy_mac_remove_interface,
+       .change_interface = iwl_legacy_mac_change_interface,
+       .config = iwl_legacy_mac_config,
+       .configure_filter = iwl3945_configure_filter,
+       .set_key = iwl3945_mac_set_key,
+       .conf_tx = iwl_legacy_mac_conf_tx,
+       .reset_tsf = iwl_legacy_mac_reset_tsf,
+       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
+       .hw_scan = iwl_legacy_mac_hw_scan,
+       .sta_add = iwl3945_mac_sta_add,
+       .sta_remove = iwl_legacy_mac_sta_remove,
+       .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
+};
+
+static int iwl3945_init_drv(struct iwl_priv *priv)
+{
+       int ret;
+       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+
+       priv->retry_rate = 1;
+       priv->beacon_skb = NULL;
+
+       spin_lock_init(&priv->sta_lock);
+       spin_lock_init(&priv->hcmd_lock);
+
+       INIT_LIST_HEAD(&priv->free_frames);
+
+       mutex_init(&priv->mutex);
+       mutex_init(&priv->sync_cmd_mutex);
+
+       priv->ieee_channels = NULL;
+       priv->ieee_rates = NULL;
+       priv->band = IEEE80211_BAND_2GHZ;
+
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+
+       /* initialize force reset */
+       priv->force_reset[IWL_RF_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_RF_RESET;
+       priv->force_reset[IWL_FW_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+
+       priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
+       priv->tx_power_next = IWL_DEFAULT_TX_POWER;
+
+       if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+               IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
+                        eeprom->version);
+               ret = -EINVAL;
+               goto err;
+       }
+       ret = iwl_legacy_init_channel_map(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+               goto err;
+       }
+
+       /* Set up txpower settings in driver for all channels */
+       if (iwl3945_txpower_set_from_eeprom(priv)) {
+               ret = -EIO;
+               goto err_free_channel_map;
+       }
+
+       ret = iwl_legacy_init_geos(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+               goto err_free_channel_map;
+       }
+       iwl3945_init_hw_rates(priv, priv->ieee_rates);
+
+       return 0;
+
+err_free_channel_map:
+       iwl_legacy_free_channel_map(priv);
+err:
+       return ret;
+}
+
+#define IWL3945_MAX_PROBE_REQUEST      200
+
+static int iwl3945_setup_mac(struct iwl_priv *priv)
+{
+       int ret;
+       struct ieee80211_hw *hw = priv->hw;
+
+       hw->rate_control_algorithm = "iwl-3945-rs";
+       hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
+       hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+       /* Tell mac80211 our characteristics */
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_SPECTRUM_MGMT;
+
+       hw->wiphy->interface_modes =
+               priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
+
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
+                           WIPHY_FLAG_IBSS_RSN;
+
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+       /* we create the 802.11 header and a zero-length SSID element */
+       hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+       /* Default value; 4 EDCA QOS priorities */
+       hw->queues = 4;
+
+       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                       &priv->bands[IEEE80211_BAND_2GHZ];
+
+       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                       &priv->bands[IEEE80211_BAND_5GHZ];
+
+       iwl_legacy_leds_init(priv);
+
+       ret = ieee80211_register_hw(priv->hw);
+       if (ret) {
+               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               return ret;
+       }
+       priv->mac80211_registered = 1;
+
+       return 0;
+}
+
+static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       int err = 0, i;
+       struct iwl_priv *priv;
+       struct ieee80211_hw *hw;
+       struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+       struct iwl3945_eeprom *eeprom;
+       unsigned long flags;
+
+       /***********************
+        * 1. Allocating HW data
+        * ********************/
+
+       /* mac80211 allocates memory for this device instance, including
+        *   space for this driver's private structure */
+       hw = iwl_legacy_alloc_all(cfg);
+       if (hw == NULL) {
+               pr_err("Can not allocate network device\n");
+               err = -ENOMEM;
+               goto out;
+       }
+       priv = hw->priv;
+       SET_IEEE80211_DEV(hw, &pdev->dev);
+
+       priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
+
+       /* 3945 has only one valid context */
+       priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+
+       for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+               priv->contexts[i].ctxid = i;
+
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+       priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+       priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+       priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+       priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_ADHOC);
+       priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+       priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+       priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+       /*
+        * Disabling hardware scan means that mac80211 will perform scans
+        * "the hard way", rather than using device's scan.
+        */
+       if (iwl3945_mod_params.disable_hw_scan) {
+               dev_printk(KERN_DEBUG, &(pdev->dev),
+                       "sw scan support is deprecated\n");
+               iwl3945_hw_ops.hw_scan = NULL;
+       }
+
+       IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+       priv->cfg = cfg;
+       priv->pci_dev = pdev;
+       priv->inta_mask = CSR_INI_SET_MASK;
+
+       if (iwl_legacy_alloc_traffic_mem(priv))
+               IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+       /***************************
+        * 2. Initializing PCI bus
+        * *************************/
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                               PCIE_LINK_STATE_CLKPM);
+
+       if (pci_enable_device(pdev)) {
+               err = -ENODEV;
+               goto out_ieee80211_free_hw;
+       }
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (err) {
+               IWL_WARN(priv, "No suitable DMA available.\n");
+               goto out_pci_disable_device;
+       }
+
+       pci_set_drvdata(pdev, priv);
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err)
+               goto out_pci_disable_device;
+
+       /***********************
+        * 3. Read REV Register
+        * ********************/
+       priv->hw_base = pci_iomap(pdev, 0, 0);
+       if (!priv->hw_base) {
+               err = -ENODEV;
+               goto out_pci_release_regions;
+       }
+
+       IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
+                       (unsigned long long) pci_resource_len(pdev, 0));
+       IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
+
+       /* We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state */
+       pci_write_config_byte(pdev, 0x41, 0x00);
+
+       /* these spin locks will be used in apm_ops.init and EEPROM access
+        * we should init now
+        */
+       spin_lock_init(&priv->reg_lock);
+       spin_lock_init(&priv->lock);
+
+       /*
+        * stop and reset the on-board processor just in case it is in a
+        * strange state ... like being left stranded by a primary kernel
+        * and this is now the kdump kernel trying to start up
+        */
+       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /***********************
+        * 4. Read EEPROM
+        * ********************/
+
+       /* Read the EEPROM */
+       err = iwl_legacy_eeprom_init(priv);
+       if (err) {
+               IWL_ERR(priv, "Unable to init EEPROM\n");
+               goto out_iounmap;
+       }
+       /* MAC Address location in EEPROM same for 3945/4965 */
+       eeprom = (struct iwl3945_eeprom *)priv->eeprom;
+       IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
+       SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
+
+       /***********************
+        * 5. Setup HW Constants
+        * ********************/
+       /* Device-specific setup */
+       if (iwl3945_hw_set_hw_params(priv)) {
+               IWL_ERR(priv, "failed to set hw settings\n");
+               goto out_eeprom_free;
+       }
+
+       /***********************
+        * 6. Setup priv
+        * ********************/
+
+       err = iwl3945_init_drv(priv);
+       if (err) {
+               IWL_ERR(priv, "initializing driver failed\n");
+               goto out_unset_hw_params;
+       }
+
+       IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
+               priv->cfg->name);
+
+       /***********************
+        * 7. Setup Services
+        * ********************/
+
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       pci_enable_msi(priv->pci_dev);
+
+       err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
+                         IRQF_SHARED, DRV_NAME, priv);
+       if (err) {
+               IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
+               goto out_disable_msi;
+       }
+
+       err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
+       if (err) {
+               IWL_ERR(priv, "failed to create sysfs device attributes\n");
+               goto out_release_irq;
+       }
+
+       iwl_legacy_set_rxon_channel(priv,
+                            &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
+                            &priv->contexts[IWL_RXON_CTX_BSS]);
+       iwl3945_setup_deferred_work(priv);
+       iwl3945_setup_rx_handlers(priv);
+       iwl_legacy_power_initialize(priv);
+
+       /*********************************
+        * 8. Setup and Register mac80211
+        * *******************************/
+
+       iwl_legacy_enable_interrupts(priv);
+
+       err = iwl3945_setup_mac(priv);
+       if (err)
+               goto  out_remove_sysfs;
+
+       err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
+       if (err)
+               IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
+
+       /* Start monitoring the killswitch */
+       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
+                          2 * HZ);
+
+       return 0;
+
+ out_remove_sysfs:
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+       sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
+ out_release_irq:
+       free_irq(priv->pci_dev->irq, priv);
+ out_disable_msi:
+       pci_disable_msi(priv->pci_dev);
+       iwl_legacy_free_geos(priv);
+       iwl_legacy_free_channel_map(priv);
+ out_unset_hw_params:
+       iwl3945_unset_hw_params(priv);
+ out_eeprom_free:
+       iwl_legacy_eeprom_free(priv);
+ out_iounmap:
+       pci_iounmap(pdev, priv->hw_base);
+ out_pci_release_regions:
+       pci_release_regions(pdev);
+ out_pci_disable_device:
+       pci_set_drvdata(pdev, NULL);
+       pci_disable_device(pdev);
+ out_ieee80211_free_hw:
+       iwl_legacy_free_traffic_mem(priv);
+       ieee80211_free_hw(priv->hw);
+ out:
+       return err;
+}
+
+static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
+{
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+       unsigned long flags;
+
+       if (!priv)
+               return;
+
+       IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+
+       iwl_legacy_dbgfs_unregister(priv);
+
+       set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       iwl_legacy_leds_exit(priv);
+
+       if (priv->mac80211_registered) {
+               ieee80211_unregister_hw(priv->hw);
+               priv->mac80211_registered = 0;
+       } else {
+               iwl3945_down(priv);
+       }
+
+       /*
+        * Make sure device is reset to low power before unloading driver.
+        * This may be redundant with iwl_down(), but there are paths to
+        * run iwl_down() without calling apm_ops.stop(), and there are
+        * paths to avoid running iwl_down() at all before leaving driver.
+        * This (inexpensive) call *makes sure* device is reset.
+        */
+       iwl_legacy_apm_stop(priv);
+
+       /* make sure we flush any pending irq or
+        * tasklet for the driver
+        */
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl3945_synchronize_irq(priv);
+
+       sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
+
+       cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
+
+       iwl3945_dealloc_ucode_pci(priv);
+
+       if (priv->rxq.bd)
+               iwl3945_rx_queue_free(priv, &priv->rxq);
+       iwl3945_hw_txq_ctx_free(priv);
+
+       iwl3945_unset_hw_params(priv);
+
+       /*netif_stop_queue(dev); */
+       flush_workqueue(priv->workqueue);
+
+       /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
+        * priv->workqueue... so we can't take down the workqueue
+        * until now... */
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+       iwl_legacy_free_traffic_mem(priv);
+
+       free_irq(pdev->irq, priv);
+       pci_disable_msi(pdev);
+
+       pci_iounmap(pdev, priv->hw_base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       iwl_legacy_free_channel_map(priv);
+       iwl_legacy_free_geos(priv);
+       kfree(priv->scan_cmd);
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       ieee80211_free_hw(priv->hw);
+}
+
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver iwl3945_driver = {
+       .name = DRV_NAME,
+       .id_table = iwl3945_hw_card_ids,
+       .probe = iwl3945_pci_probe,
+       .remove = __devexit_p(iwl3945_pci_remove),
+       .driver.pm = IWL_LEGACY_PM_OPS,
+};
+
+static int __init iwl3945_init(void)
+{
+
+       int ret;
+       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+       pr_info(DRV_COPYRIGHT "\n");
+
+       ret = iwl3945_rate_control_register();
+       if (ret) {
+               pr_err("Unable to register rate control algorithm: %d\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&iwl3945_driver);
+       if (ret) {
+               pr_err("Unable to initialize PCI module\n");
+               goto error_register;
+       }
+
+       return ret;
+
+error_register:
+       iwl3945_rate_control_unregister();
+       return ret;
+}
+
+static void __exit iwl3945_exit(void)
+{
+       pci_unregister_driver(&iwl3945_driver);
+       iwl3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
+
+module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto,
+               "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
+               int, S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan,
+               "disable hardware scanning (default 0) (deprecated)");
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(iwl3945_exit);
+module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644 (file)
index 0000000..91b3d8b
--- /dev/null
@@ -0,0 +1,3632 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME        "iwl4965"
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-sta.h"
+#include "iwl-4965-calib.h"
+#include "iwl-4965.h"
+#include "iwl-4965-led.h"
+
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION        "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION     IWLWIFI_VERSION VD
+
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void iwl4965_update_chain_flags(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain) {
+               for_each_context(priv, ctx) {
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+                       if (ctx->active.rx_chain != ctx->staging.rx_chain)
+                               iwl_legacy_commit_rxon(priv, ctx);
+               }
+       }
+}
+
+static void iwl4965_clear_free_frames(struct iwl_priv *priv)
+{
+       struct list_head *element;
+
+       IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
+                      priv->frames_count);
+
+       while (!list_empty(&priv->free_frames)) {
+               element = priv->free_frames.next;
+               list_del(element);
+               kfree(list_entry(element, struct iwl_frame, list));
+               priv->frames_count--;
+       }
+
+       if (priv->frames_count) {
+               IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
+                           priv->frames_count);
+               priv->frames_count = 0;
+       }
+}
+
+static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
+{
+       struct iwl_frame *frame;
+       struct list_head *element;
+       if (list_empty(&priv->free_frames)) {
+               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+               if (!frame) {
+                       IWL_ERR(priv, "Could not allocate frame!\n");
+                       return NULL;
+               }
+
+               priv->frames_count++;
+               return frame;
+       }
+
+       element = priv->free_frames.next;
+       list_del(element);
+       return list_entry(element, struct iwl_frame, list);
+}
+
+static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
+{
+       memset(frame, 0, sizeof(*frame));
+       list_add(&frame->list, &priv->free_frames);
+}
+
+static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
+                                struct ieee80211_hdr *hdr,
+                                int left)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_skb)
+               return 0;
+
+       if (priv->beacon_skb->len > left)
+               return 0;
+
+       memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
+
+       return priv->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
+                              struct iwl_tx_beacon_cmd *tx_beacon_cmd,
+                              u8 *beacon, u32 frame_size)
+{
+       u16 tim_idx;
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+       /*
+        * The index is relative to frame start but we start looking at the
+        * variable-length part of the beacon.
+        */
+       tim_idx = mgmt->u.beacon.variable - beacon;
+
+       /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+       while ((tim_idx < (frame_size - 2)) &&
+                       (beacon[tim_idx] != WLAN_EID_TIM))
+               tim_idx += beacon[tim_idx+1] + 2;
+
+       /* If TIM field was found, set variables */
+       if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+               tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+               tx_beacon_cmd->tim_size = beacon[tim_idx+1];
+       } else
+               IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
+                                      struct iwl_frame *frame)
+{
+       struct iwl_tx_beacon_cmd *tx_beacon_cmd;
+       u32 frame_size;
+       u32 rate_flags;
+       u32 rate;
+       /*
+        * We have to set up the TX command, the TX Beacon command, and the
+        * beacon contents.
+        */
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_ctx) {
+               IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
+               return 0;
+       }
+
+       /* Initialize memory */
+       tx_beacon_cmd = &frame->u.beacon;
+       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+       /* Set up TX beacon contents */
+       frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
+                               sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+       if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+               return 0;
+       if (!frame_size)
+               return 0;
+
+       /* Set up TX command fields */
+       tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+       tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
+       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+               TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
+
+       /* Set up TX beacon command fields */
+       iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
+                          frame_size);
+
+       /* Set up packet rate and flags */
+       rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
+       priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+                                             priv->hw_params.valid_tx_ant);
+       rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+       if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+       tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
+                       rate_flags);
+
+       return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
+{
+       struct iwl_frame *frame;
+       unsigned int frame_size;
+       int rc;
+
+       frame = iwl4965_get_free_frame(priv);
+       if (!frame) {
+               IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
+                         "command.\n");
+               return -ENOMEM;
+       }
+
+       frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
+       if (!frame_size) {
+               IWL_ERR(priv, "Error configuring the beacon command\n");
+               iwl4965_free_frame(priv, frame);
+               return -EINVAL;
+       }
+
+       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+                             &frame->u.cmd[0]);
+
+       iwl4965_free_frame(priv, frame);
+
+       return rc;
+}
+
+static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       dma_addr_t addr = get_unaligned_le32(&tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               addr |=
+               ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+       return addr;
+}
+
+static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+       return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+                                 dma_addr_t addr, u16 len)
+{
+       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+       u16 hi_n_len = len << 4;
+
+       put_unaligned_le32(addr, &tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+       tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+       tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+       return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @priv - driver private data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+       struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
+       struct iwl_tfd *tfd;
+       struct pci_dev *dev = priv->pci_dev;
+       int index = txq->q.read_ptr;
+       int i;
+       int num_tbs;
+
+       tfd = &tfd_tmp[index];
+
+       /* Sanity check on number of chunks */
+       num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+       if (num_tbs >= IWL_NUM_OF_TBS) {
+               IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+               /* @todo issue fatal error, it is quite serious situation */
+               return;
+       }
+
+       /* Unmap tx_cmd */
+       if (num_tbs)
+               pci_unmap_single(dev,
+                               dma_unmap_addr(&txq->meta[index], mapping),
+                               dma_unmap_len(&txq->meta[index], len),
+                               PCI_DMA_BIDIRECTIONAL);
+
+       /* Unmap chunks, if any. */
+       for (i = 1; i < num_tbs; i++)
+               pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
+                               iwl4965_tfd_tb_get_len(tfd, i),
+                               PCI_DMA_TODEVICE);
+
+       /* free SKB */
+       if (txq->txb) {
+               struct sk_buff *skb;
+
+               skb = txq->txb[txq->q.read_ptr].skb;
+
+               /* can be called from irqs-disabled context */
+               if (skb) {
+                       dev_kfree_skb_any(skb);
+                       txq->txb[txq->q.read_ptr].skb = NULL;
+               }
+       }
+}
+
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+                                struct iwl_tx_queue *txq,
+                                dma_addr_t addr, u16 len,
+                                u8 reset, u8 pad)
+{
+       struct iwl_queue *q;
+       struct iwl_tfd *tfd, *tfd_tmp;
+       u32 num_tbs;
+
+       q = &txq->q;
+       tfd_tmp = (struct iwl_tfd *)txq->tfds;
+       tfd = &tfd_tmp[q->write_ptr];
+
+       if (reset)
+               memset(tfd, 0, sizeof(*tfd));
+
+       num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+       /* Each TFD can point to a maximum 20 Tx buffers */
+       if (num_tbs >= IWL_NUM_OF_TBS) {
+               IWL_ERR(priv, "Error can not send more than %d chunks\n",
+                         IWL_NUM_OF_TBS);
+               return -EINVAL;
+       }
+
+       BUG_ON(addr & ~DMA_BIT_MASK(36));
+       if (unlikely(addr & ~IWL_TX_DMA_MASK))
+               IWL_ERR(priv, "Unaligned address = %llx\n",
+                         (unsigned long long)addr);
+
+       iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+       return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+                        struct iwl_tx_queue *txq)
+{
+       int txq_id = txq->q.id;
+
+       /* Circular buffer (TFD queue in DRAM) physical base address */
+       iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
+                            txq->q.dma_addr >> 8);
+
+       return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_alive_resp *palive;
+       struct delayed_work *pwork;
+
+       palive = &pkt->u.alive_frame;
+
+       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+                      "0x%01X 0x%01X\n",
+                      palive->is_valid, palive->ver_type,
+                      palive->ver_subtype);
+
+       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+               memcpy(&priv->card_alive_init,
+                      &pkt->u.alive_frame,
+                      sizeof(struct iwl_init_alive_resp));
+               pwork = &priv->init_alive_start;
+       } else {
+               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+               memcpy(&priv->card_alive, &pkt->u.alive_frame,
+                      sizeof(struct iwl_alive_resp));
+               pwork = &priv->alive_start;
+       }
+
+       /* We delay the ALIVE response by 5ms to
+        * give the HW RF Kill time to activate... */
+       if (palive->is_valid == UCODE_VALID_OK)
+               queue_delayed_work(priv->workqueue, pwork,
+                                  msecs_to_jiffies(5));
+       else
+               IWL_WARN(priv, "uCode did not respond OK.\n");
+}
+
+/**
+ * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
+ *
+ * This callback is provided in order to send a statistics request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
+ * was received.  We need to ensure we receive the statistics in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void iwl4965_bg_statistics_periodic(unsigned long data)
+{
+       struct iwl_priv *priv = (struct iwl_priv *)data;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       /* dont send host command if rf-kill is on */
+       if (!iwl_legacy_is_ready_rf(priv))
+               return;
+
+       iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
+}
+
+
+static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+                                       u32 start_idx, u32 num_events,
+                                       u32 mode)
+{
+       u32 i;
+       u32 ptr;        /* SRAM byte address of log data */
+       u32 ev, time, data; /* event log data */
+       unsigned long reg_flags;
+
+       if (mode == 0)
+               ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+       else
+               ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+       /* Make sure device is powered up for SRAM reads */
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       if (iwl_grab_nic_access(priv)) {
+               spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+               return;
+       }
+
+       /* Set starting address; reads will auto-increment */
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+       rmb();
+
+       /*
+        * "time" is actually "data" for mode 0 (no timestamp).
+        * place event id # at far right for easier visual parsing.
+        */
+       for (i = 0; i < num_events; i++) {
+               ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (mode == 0) {
+                       trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+                                                       0, time, ev);
+               } else {
+                       data = _iwl_legacy_read_direct32(priv,
+                                               HBUS_TARG_MEM_RDAT);
+                       trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+                                               time, data, ev);
+               }
+       }
+       /* Allow device to power down */
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
+{
+       u32 capacity;   /* event log capacity in # entries */
+       u32 base;       /* SRAM byte address of event log header */
+       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+       u32 num_wraps;  /* # times uCode wrapped to top of log */
+       u32 next_entry; /* index of next entry to be written by uCode */
+
+       if (priv->ucode_type == UCODE_INIT)
+               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+       else
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               capacity = iwl_legacy_read_targ_mem(priv, base);
+               num_wraps = iwl_legacy_read_targ_mem(priv,
+                                               base + (2 * sizeof(u32)));
+               mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+               next_entry = iwl_legacy_read_targ_mem(priv,
+                                               base + (3 * sizeof(u32)));
+       } else
+               return;
+
+       if (num_wraps == priv->event_log.num_wraps) {
+               iwl4965_print_cont_event_trace(priv,
+                                      base, priv->event_log.next_entry,
+                                      next_entry - priv->event_log.next_entry,
+                                      mode);
+               priv->event_log.non_wraps_count++;
+       } else {
+               if ((num_wraps - priv->event_log.num_wraps) > 1)
+                       priv->event_log.wraps_more_count++;
+               else
+                       priv->event_log.wraps_once_count++;
+               trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
+                               num_wraps - priv->event_log.num_wraps,
+                               next_entry, priv->event_log.next_entry);
+               if (next_entry < priv->event_log.next_entry) {
+                       iwl4965_print_cont_event_trace(priv, base,
+                              priv->event_log.next_entry,
+                              capacity - priv->event_log.next_entry,
+                              mode);
+
+                       iwl4965_print_cont_event_trace(priv, base, 0,
+                               next_entry, mode);
+               } else {
+                       iwl4965_print_cont_event_trace(priv, base,
+                              next_entry, capacity - next_entry,
+                              mode);
+
+                       iwl4965_print_cont_event_trace(priv, base, 0,
+                               next_entry, mode);
+               }
+       }
+       priv->event_log.num_wraps = num_wraps;
+       priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl4965_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl4965_bg_ucode_trace(unsigned long data)
+{
+       struct iwl_priv *priv = (struct iwl_priv *)data;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (priv->event_log.ucode_trace) {
+               iwl4965_continuous_event_trace(priv);
+               /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+               mod_timer(&priv->ucode_trace,
+                        jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+       }
+}
+
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl4965_beacon_notif *beacon =
+               (struct iwl4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
+               "tsf %d %d rate %d\n",
+               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+               beacon->beacon_notify_hdr.failure_frame,
+               le32_to_cpu(beacon->ibss_mgr_status),
+               le32_to_cpu(beacon->high_tsf),
+               le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
+{
+       unsigned long flags;
+
+       IWL_DEBUG_POWER(priv, "Stop all queues\n");
+
+       if (priv->mac80211_registered)
+               ieee80211_stop_queues(priv->hw);
+
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+                       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+       iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+       spin_lock_irqsave(&priv->reg_lock, flags);
+       if (!iwl_grab_nic_access(priv))
+               iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+       unsigned long status = priv->status;
+
+       IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+                         (flags & CT_CARD_DISABLED) ?
+                         "Reached" : "Not reached");
+
+       if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+                    CT_CARD_DISABLED)) {
+
+               iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+                           CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+               iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+               if (!(flags & RXON_CARD_DISABLED)) {
+                       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+                       iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+               }
+       }
+
+       if (flags & CT_CARD_DISABLED)
+               iwl4965_perform_ct_kill_task(priv);
+
+       if (flags & HW_CARD_DISABLED)
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       if (!(flags & RXON_CARD_DISABLED))
+               iwl_legacy_scan_cancel(priv);
+
+       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+            test_bit(STATUS_RF_KILL_HW, &priv->status)))
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+                       test_bit(STATUS_RF_KILL_HW, &priv->status));
+       else
+               wake_up_interruptible(&priv->wait_command_queue);
+}
+
+/**
+ * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
+{
+       priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
+       priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
+       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+                       iwl_legacy_rx_spectrum_measure_notif;
+       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
+       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+           iwl_legacy_rx_pm_debug_statistics_notif;
+       priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+
+       /*
+        * The same handler is used for both the REPLY to a discrete
+        * statistics request from the host as well as for the periodic
+        * statistics notifications (after received beacons) from the uCode.
+        */
+       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
+       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
+
+       iwl_legacy_setup_rx_scan_handlers(priv);
+
+       /* status change handler */
+       priv->rx_handlers[CARD_STATE_NOTIFICATION] =
+                                       iwl4965_rx_card_state_notif;
+
+       priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
+           iwl4965_rx_missed_beacon_notif;
+       /* Rx handlers */
+       priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
+       priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
+       /* block ack */
+       priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
+       /* Set up hardware specific Rx handlers */
+       priv->cfg->ops->lib->rx_handler_setup(priv);
+}
+
+/**
+ * iwl4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void iwl4965_rx_handle(struct iwl_priv *priv)
+{
+       struct iwl_rx_mem_buffer *rxb;
+       struct iwl_rx_packet *pkt;
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       u32 r, i;
+       int reclaim;
+       unsigned long flags;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty;
+
+       /* uCode's read index (stored in shared DRAM) indicates the last Rx
+        * buffer that the driver may process (last buffer filled by ucode). */
+       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
+       i = rxq->read;
+
+       /* Rx interrupt, but nothing sent from uCode */
+       if (i == r)
+               IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+
+       while (i != r) {
+               int len;
+
+               rxb = rxq->queue[i];
+
+               /* If an RXB doesn't have a Rx queue slot associated with it,
+                * then a bug has been introduced in the queue refilling
+                * routines -- catch it here */
+               BUG_ON(rxb == NULL);
+
+               rxq->queue[i] = NULL;
+
+               pci_unmap_page(priv->pci_dev, rxb->page_dma,
+                              PAGE_SIZE << priv->hw_params.rx_page_order,
+                              PCI_DMA_FROMDEVICE);
+               pkt = rxb_addr(rxb);
+
+               len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+               len += sizeof(u32); /* account for status word */
+               trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
+
+               /* Reclaim a command buffer only if this packet is a response
+                *   to a (driver-originated) command.
+                * If the packet (e.g. Rx frame) originated from uCode,
+                *   there is no command buffer to reclaim.
+                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+                *   but apparently a few don't get set; catch them here. */
+               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+                       (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+                       (pkt->hdr.cmd != REPLY_RX) &&
+                       (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+                       (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+                       (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+                       (pkt->hdr.cmd != REPLY_TX);
+
+               /* Based on type of command response or notification,
+                *   handle those that need handling via function in
+                *   rx_handlers table.  See iwl4965_setup_rx_handlers() */
+               if (priv->rx_handlers[pkt->hdr.cmd]) {
+                       IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
+                               i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+                               pkt->hdr.cmd);
+                       priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+                       priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+               } else {
+                       /* No handling needed */
+                       IWL_DEBUG_RX(priv,
+                               "r %d i %d No handler needed for %s, 0x%02x\n",
+                               r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+                               pkt->hdr.cmd);
+               }
+
+               /*
+                * XXX: After here, we should always check rxb->page
+                * against NULL before touching it or its virtual
+                * memory (pkt). Because some rx_handler might have
+                * already taken or freed the pages.
+                */
+
+               if (reclaim) {
+                       /* Invoke any callbacks, transfer the buffer to caller,
+                        * and fire off the (possibly) blocking iwl_legacy_send_cmd()
+                        * as we reclaim the driver command queue */
+                       if (rxb->page)
+                               iwl_legacy_tx_cmd_complete(priv, rxb);
+                       else
+                               IWL_WARN(priv, "Claim null rxb?\n");
+               }
+
+               /* Reuse the page if possible. For notification packets and
+                * SKBs that fail to Rx correctly, add them back into the
+                * rx_free list for reuse later. */
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (rxb->page != NULL) {
+                       rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+                               0, PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       list_add_tail(&rxb->list, &rxq->rx_free);
+                       rxq->free_count++;
+               } else
+                       list_add_tail(&rxb->list, &rxq->rx_used);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               i = (i + 1) & RX_QUEUE_MASK;
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode wont assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               iwl4965_rx_replenish_now(priv);
+                               count = 0;
+                       }
+               }
+       }
+
+       /* Backtrack one entry */
+       rxq->read = i;
+       if (fill_rx)
+               iwl4965_rx_replenish_now(priv);
+       else
+               iwl4965_rx_queue_restock(priv);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
+{
+       /* wait to make sure we flush pending tasklet*/
+       synchronize_irq(priv->pci_dev->irq);
+       tasklet_kill(&priv->irq_tasklet);
+}
+
+static void iwl4965_irq_tasklet(struct iwl_priv *priv)
+{
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+       u32 i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       u32 inta_mask;
+#endif
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Ack/clear/reset pending uCode interrupts.
+        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+       inta = iwl_read32(priv, CSR_INT);
+       iwl_write32(priv, CSR_INT, inta);
+
+       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+        * Any new interrupts that happen after this, either while we're
+        * in this tasklet, or later, will show up in next ISR/tasklet. */
+       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+       iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
+               /* just for debug */
+               inta_mask = iwl_read32(priv, CSR_INT_MASK);
+               IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                             inta, inta_mask, inta_fh);
+       }
+#endif
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+        * atomic, make sure that inta covers all the interrupts that
+        * we've discovered, even if FH interrupt came in just after
+        * reading CSR_INT. */
+       if (inta_fh & CSR49_FH_INT_RX_MASK)
+               inta |= CSR_INT_BIT_FH_RX;
+       if (inta_fh & CSR49_FH_INT_TX_MASK)
+               inta |= CSR_INT_BIT_FH_TX;
+
+       /* Now service all interrupt bits discovered above. */
+       if (inta & CSR_INT_BIT_HW_ERR) {
+               IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
+
+               /* Tell the device to stop sending interrupts */
+               iwl_legacy_disable_interrupts(priv);
+
+               priv->isr_stats.hw++;
+               iwl_legacy_irq_handle_error(priv);
+
+               handled |= CSR_INT_BIT_HW_ERR;
+
+               return;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+               /* NIC fires this, but we don't use it, redundant with WAKEUP */
+               if (inta & CSR_INT_BIT_SCD) {
+                       IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+                                     "the frame/frames.\n");
+                       priv->isr_stats.sch++;
+               }
+
+               /* Alive notification via Rx interrupt will do the real work */
+               if (inta & CSR_INT_BIT_ALIVE) {
+                       IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+                       priv->isr_stats.alive++;
+               }
+       }
+#endif
+       /* Safely ignore these bits for debug checks below */
+       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+       /* HW RF KILL switch toggled */
+       if (inta & CSR_INT_BIT_RF_KILL) {
+               int hw_rf_kill = 0;
+               if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+                       hw_rf_kill = 1;
+
+               IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
+                               hw_rf_kill ? "disable radio" : "enable radio");
+
+               priv->isr_stats.rfkill++;
+
+               /* driver only loads ucode once setting the interface up.
+                * the driver allows loading the ucode even if the radio
+                * is killed. Hence update the killswitch state here. The
+                * rfkill handler will care about restarting if needed.
+                */
+               if (!test_bit(STATUS_ALIVE, &priv->status)) {
+                       if (hw_rf_kill)
+                               set_bit(STATUS_RF_KILL_HW, &priv->status);
+                       else
+                               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+                       wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+               }
+
+               handled |= CSR_INT_BIT_RF_KILL;
+       }
+
+       /* Chip got too hot and stopped itself */
+       if (inta & CSR_INT_BIT_CT_KILL) {
+               IWL_ERR(priv, "Microcode CT kill error detected.\n");
+               priv->isr_stats.ctkill++;
+               handled |= CSR_INT_BIT_CT_KILL;
+       }
+
+       /* Error detected by uCode */
+       if (inta & CSR_INT_BIT_SW_ERR) {
+               IWL_ERR(priv, "Microcode SW error detected. "
+                       " Restarting 0x%X.\n", inta);
+               priv->isr_stats.sw++;
+               iwl_legacy_irq_handle_error(priv);
+               handled |= CSR_INT_BIT_SW_ERR;
+       }
+
+       /*
+        * uCode wakes up after power-down sleep.
+        * Tell device about any new tx or host commands enqueued,
+        * and about any Rx buffers made available while asleep.
+        */
+       if (inta & CSR_INT_BIT_WAKEUP) {
+               IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+               iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+               for (i = 0; i < priv->hw_params.max_txq_num; i++)
+                       iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
+               priv->isr_stats.wakeup++;
+               handled |= CSR_INT_BIT_WAKEUP;
+       }
+
+       /* All uCode command responses, including Tx command responses,
+        * Rx "responses" (frame-received notification), and other
+        * notifications from uCode come through here*/
+       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+               iwl4965_rx_handle(priv);
+               priv->isr_stats.rx++;
+               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+       }
+
+       /* This "Tx" DMA channel is used only for loading uCode */
+       if (inta & CSR_INT_BIT_FH_TX) {
+               IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
+               priv->isr_stats.tx++;
+               handled |= CSR_INT_BIT_FH_TX;
+               /* Wake up uCode load routine, now that load is complete */
+               priv->ucode_write_complete = 1;
+               wake_up_interruptible(&priv->wait_command_queue);
+       }
+
+       if (inta & ~handled) {
+               IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+               priv->isr_stats.unhandled++;
+       }
+
+       if (inta & ~(priv->inta_mask)) {
+               IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+                        inta & ~priv->inta_mask);
+               IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
+       }
+
+       /* Re-enable all interrupts */
+       /* only Re-enable if diabled by irq */
+       if (test_bit(STATUS_INT_ENABLED, &priv->status))
+               iwl_legacy_enable_interrupts(priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+               inta = iwl_read32(priv, CSR_INT);
+               inta_mask = iwl_read32(priv, CSR_INT_MASK);
+               inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+               IWL_DEBUG_ISR(priv,
+                       "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+                       "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+       }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t iwl4965_show_debug_level(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
+}
+static ssize_t iwl4965_store_debug_level(struct device *d,
+                               struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret)
+               IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
+       else {
+               priv->debug_level = val;
+               if (iwl_legacy_alloc_traffic_mem(priv))
+                       IWL_ERR(priv,
+                               "Not enough memory to generate traffic log\n");
+       }
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+                       iwl4965_show_debug_level, iwl4965_store_debug_level);
+
+
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+
+static ssize_t iwl4965_show_temperature(struct device *d,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       if (!iwl_legacy_is_alive(priv))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", priv->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
+
+static ssize_t iwl4965_show_tx_power(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+
+       if (!iwl_legacy_is_ready_rf(priv))
+               return sprintf(buf, "off\n");
+       else
+               return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
+}
+
+static ssize_t iwl4965_store_tx_power(struct device *d,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct iwl_priv *priv = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret)
+               IWL_INFO(priv, "%s is not in decimal form.\n", buf);
+       else {
+               ret = iwl_legacy_set_tx_power(priv, val, false);
+               if (ret)
+                       IWL_ERR(priv, "failed setting tx power (0x%d).\n",
+                               ret);
+               else
+                       ret = count;
+       }
+       return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
+                       iwl4965_show_tx_power, iwl4965_store_tx_power);
+
+static struct attribute *iwl_sysfs_entries[] = {
+       &dev_attr_temperature.attr,
+       &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       &dev_attr_debug_level.attr,
+#endif
+       NULL
+};
+
+static struct attribute_group iwl_attribute_group = {
+       .name = NULL,           /* put in device directory */
+       .attrs = iwl_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
+{
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+}
+
+static void iwl4965_nic_start(struct iwl_priv *priv)
+{
+       /* Remove all resets to allow NIC to operate */
+       iwl_write32(priv, CSR_RESET, 0);
+}
+
+static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
+                                       void *context);
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+                                               u32 max_probe_length);
+
+static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
+{
+       const char *name_pre = priv->cfg->fw_name_pre;
+       char tag[8];
+
+       if (first) {
+               priv->fw_index = priv->cfg->ucode_api_max;
+               sprintf(tag, "%d", priv->fw_index);
+       } else {
+               priv->fw_index--;
+               sprintf(tag, "%d", priv->fw_index);
+       }
+
+       if (priv->fw_index < priv->cfg->ucode_api_min) {
+               IWL_ERR(priv, "no suitable firmware found!\n");
+               return -ENOENT;
+       }
+
+       sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+       IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
+                      priv->firmware_name);
+
+       return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
+                                      &priv->pci_dev->dev, GFP_KERNEL, priv,
+                                      iwl4965_ucode_callback);
+}
+
+struct iwl4965_firmware_pieces {
+       const void *inst, *data, *init, *init_data, *boot;
+       size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int iwl4965_load_firmware(struct iwl_priv *priv,
+                                      const struct firmware *ucode_raw,
+                                      struct iwl4965_firmware_pieces *pieces)
+{
+       struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+       u32 api_ver, hdr_size;
+       const u8 *src;
+
+       priv->ucode_ver = le32_to_cpu(ucode->ver);
+       api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+       switch (api_ver) {
+       default:
+       case 0:
+       case 1:
+       case 2:
+               hdr_size = 24;
+               if (ucode_raw->size < hdr_size) {
+                       IWL_ERR(priv, "File size too small!\n");
+                       return -EINVAL;
+               }
+               pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+               pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+               pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+               pieces->init_data_size =
+                               le32_to_cpu(ucode->v1.init_data_size);
+               pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+               src = ucode->v1.data;
+               break;
+       }
+
+       /* Verify size of file vs. image size info in file's header */
+       if (ucode_raw->size != hdr_size + pieces->inst_size +
+                               pieces->data_size + pieces->init_size +
+                               pieces->init_data_size + pieces->boot_size) {
+
+               IWL_ERR(priv,
+                       "uCode file size %d does not match expected size\n",
+                       (int)ucode_raw->size);
+               return -EINVAL;
+       }
+
+       pieces->inst = src;
+       src += pieces->inst_size;
+       pieces->data = src;
+       src += pieces->data_size;
+       pieces->init = src;
+       src += pieces->init_size;
+       pieces->init_data = src;
+       src += pieces->init_data_size;
+       pieces->boot = src;
+       src += pieces->boot_size;
+
+       return 0;
+}
+
+/**
+ * iwl4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+       struct iwl_priv *priv = context;
+       struct iwl_ucode_header *ucode;
+       int err;
+       struct iwl4965_firmware_pieces pieces;
+       const unsigned int api_max = priv->cfg->ucode_api_max;
+       const unsigned int api_min = priv->cfg->ucode_api_min;
+       u32 api_ver;
+
+       u32 max_probe_length = 200;
+       u32 standard_phy_calibration_size =
+                       IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+       memset(&pieces, 0, sizeof(pieces));
+
+       if (!ucode_raw) {
+               if (priv->fw_index <= priv->cfg->ucode_api_max)
+                       IWL_ERR(priv,
+                               "request for firmware file '%s' failed.\n",
+                               priv->firmware_name);
+               goto try_again;
+       }
+
+       IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
+                      priv->firmware_name, ucode_raw->size);
+
+       /* Make sure that we got at least the API version number */
+       if (ucode_raw->size < 4) {
+               IWL_ERR(priv, "File size way too small!\n");
+               goto try_again;
+       }
+
+       /* Data from ucode file:  header followed by uCode images */
+       ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+       err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
+
+       if (err)
+               goto try_again;
+
+       api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+       /*
+        * api_ver should match the api version forming part of the
+        * firmware filename ... but we don't check for that and only rely
+        * on the API version read from firmware header from here on forward
+        */
+       if (api_ver < api_min || api_ver > api_max) {
+               IWL_ERR(priv,
+                       "Driver unable to support your firmware API. "
+                       "Driver supports v%u, firmware is v%u.\n",
+                       api_max, api_ver);
+               goto try_again;
+       }
+
+       if (api_ver != api_max)
+               IWL_ERR(priv,
+                       "Firmware has old API version. Expected v%u, "
+                       "got v%u. New firmware can be obtained "
+                       "from http://www.intellinuxwireless.org.\n",
+                       api_max, api_ver);
+
+       IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
+                IWL_UCODE_MAJOR(priv->ucode_ver),
+                IWL_UCODE_MINOR(priv->ucode_ver),
+                IWL_UCODE_API(priv->ucode_ver),
+                IWL_UCODE_SERIAL(priv->ucode_ver));
+
+       snprintf(priv->hw->wiphy->fw_version,
+                sizeof(priv->hw->wiphy->fw_version),
+                "%u.%u.%u.%u",
+                IWL_UCODE_MAJOR(priv->ucode_ver),
+                IWL_UCODE_MINOR(priv->ucode_ver),
+                IWL_UCODE_API(priv->ucode_ver),
+                IWL_UCODE_SERIAL(priv->ucode_ver));
+
+       /*
+        * For any of the failures below (before allocating pci memory)
+        * we will try to load a version with a smaller API -- maybe the
+        * user just got a corrupted version of the latest API.
+        */
+
+       IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+                      priv->ucode_ver);
+       IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+                      pieces.inst_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+                      pieces.data_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+                      pieces.init_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+                      pieces.init_data_size);
+       IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+                      pieces.boot_size);
+
+       /* Verify that uCode images will fit in card's SRAM */
+       if (pieces.inst_size > priv->hw_params.max_inst_size) {
+               IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+                       pieces.inst_size);
+               goto try_again;
+       }
+
+       if (pieces.data_size > priv->hw_params.max_data_size) {
+               IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+                       pieces.data_size);
+               goto try_again;
+       }
+
+       if (pieces.init_size > priv->hw_params.max_inst_size) {
+               IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+                       pieces.init_size);
+               goto try_again;
+       }
+
+       if (pieces.init_data_size > priv->hw_params.max_data_size) {
+               IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+                       pieces.init_data_size);
+               goto try_again;
+       }
+
+       if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+               IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+                       pieces.boot_size);
+               goto try_again;
+       }
+
+       /* Allocate ucode buffers for card's bus-master loading ... */
+
+       /* Runtime instructions and 2 copies of data:
+        * 1) unmodified from disk
+        * 2) backup cache for save/restore during power-downs */
+       priv->ucode_code.len = pieces.inst_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+
+       priv->ucode_data.len = pieces.data_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+
+       priv->ucode_data_backup.len = pieces.data_size;
+       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+
+       if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
+           !priv->ucode_data_backup.v_addr)
+               goto err_pci_alloc;
+
+       /* Initialization instructions and data */
+       if (pieces.init_size && pieces.init_data_size) {
+               priv->ucode_init.len = pieces.init_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+
+               priv->ucode_init_data.len = pieces.init_data_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+
+               if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Bootstrap (instructions only, no data) */
+       if (pieces.boot_size) {
+               priv->ucode_boot.len = pieces.boot_size;
+               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+
+               if (!priv->ucode_boot.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Now that we can no longer fail, copy information */
+
+       priv->sta_key_max_num = STA_KEY_MAX_NUM;
+
+       /* Copy images into buffers for card's bus-master reads ... */
+
+       /* Runtime instructions (first block of data in file) */
+       IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+                       pieces.inst_size);
+       memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+       IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+               priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
+
+       /*
+        * Runtime data
+        * NOTE:  Copy into backup buffer will be done in iwl_up()
+        */
+       IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+                       pieces.data_size);
+       memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+       memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+       /* Initialization instructions */
+       if (pieces.init_size) {
+               IWL_DEBUG_INFO(priv,
+                               "Copying (but not loading) init instr len %Zd\n",
+                               pieces.init_size);
+               memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
+       }
+
+       /* Initialization data */
+       if (pieces.init_data_size) {
+               IWL_DEBUG_INFO(priv,
+                               "Copying (but not loading) init data len %Zd\n",
+                              pieces.init_data_size);
+               memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+                      pieces.init_data_size);
+       }
+
+       /* Bootstrap instructions */
+       IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+                       pieces.boot_size);
+       memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+       /*
+        * figure out the offset of chain noise reset and gain commands
+        * base on the size of standard phy calibration commands table size
+        */
+       priv->_4965.phy_calib_chain_noise_reset_cmd =
+               standard_phy_calibration_size;
+       priv->_4965.phy_calib_chain_noise_gain_cmd =
+               standard_phy_calibration_size + 1;
+
+       /**************************************************
+        * This is still part of probe() in a sense...
+        *
+        * 9. Setup and register with mac80211 and debugfs
+        **************************************************/
+       err = iwl4965_mac_setup_register(priv, max_probe_length);
+       if (err)
+               goto out_unbind;
+
+       err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
+       if (err)
+               IWL_ERR(priv,
+               "failed to create debugfs files. Ignoring error: %d\n", err);
+
+       err = sysfs_create_group(&priv->pci_dev->dev.kobj,
+                                       &iwl_attribute_group);
+       if (err) {
+               IWL_ERR(priv, "failed to create sysfs device attributes\n");
+               goto out_unbind;
+       }
+
+       /* We have our copies now, allow OS release its copies */
+       release_firmware(ucode_raw);
+       complete(&priv->_4965.firmware_loading_complete);
+       return;
+
+ try_again:
+       /* try next, if any */
+       if (iwl4965_request_firmware(priv, false))
+               goto out_unbind;
+       release_firmware(ucode_raw);
+       return;
+
+ err_pci_alloc:
+       IWL_ERR(priv, "failed to allocate pci memory\n");
+       iwl4965_dealloc_ucode_pci(priv);
+ out_unbind:
+       complete(&priv->_4965.firmware_loading_complete);
+       device_release_driver(&priv->pci_dev->dev);
+       release_firmware(ucode_raw);
+}
+
+static const char * const desc_lookup_text[] = {
+       "OK",
+       "FAIL",
+       "BAD_PARAM",
+       "BAD_CHECKSUM",
+       "NMI_INTERRUPT_WDG",
+       "SYSASSERT",
+       "FATAL_ERROR",
+       "BAD_COMMAND",
+       "HW_ERROR_TUNE_LOCK",
+       "HW_ERROR_TEMPERATURE",
+       "ILLEGAL_CHAN_FREQ",
+       "VCC_NOT_STABLE",
+       "FH_ERROR",
+       "NMI_INTERRUPT_HOST",
+       "NMI_INTERRUPT_ACTION_PT",
+       "NMI_INTERRUPT_UNKNOWN",
+       "UCODE_VERSION_MISMATCH",
+       "HW_ERROR_ABS_LOCK",
+       "HW_ERROR_CAL_LOCK_FAIL",
+       "NMI_INTERRUPT_INST_ACTION_PT",
+       "NMI_INTERRUPT_DATA_ACTION_PT",
+       "NMI_TRM_HW_ER",
+       "NMI_INTERRUPT_TRM",
+       "NMI_INTERRUPT_BREAK_POINT"
+       "DEBUG_0",
+       "DEBUG_1",
+       "DEBUG_2",
+       "DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+       { "NMI_INTERRUPT_WDG", 0x34 },
+       { "SYSASSERT", 0x35 },
+       { "UCODE_VERSION_MISMATCH", 0x37 },
+       { "BAD_COMMAND", 0x38 },
+       { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+       { "FATAL_ERROR", 0x3D },
+       { "NMI_TRM_HW_ERR", 0x46 },
+       { "NMI_INTERRUPT_TRM", 0x4C },
+       { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+       { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+       { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+       { "NMI_INTERRUPT_HOST", 0x66 },
+       { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+       { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+       { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+       { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *iwl4965_desc_lookup(u32 num)
+{
+       int i;
+       int max = ARRAY_SIZE(desc_lookup_text);
+
+       if (num < max)
+               return desc_lookup_text[num];
+
+       max = ARRAY_SIZE(advanced_lookup) - 1;
+       for (i = 0; i < max; i++) {
+               if (advanced_lookup[i].num == num)
+                       break;
+       }
+       return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
+{
+       u32 data2, line;
+       u32 desc, time, count, base, data1;
+       u32 blink1, blink2, ilink1, ilink2;
+       u32 pc, hcmd;
+
+       if (priv->ucode_type == UCODE_INIT) {
+               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+       } else {
+               base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+       }
+
+       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+               return;
+       }
+
+       count = iwl_legacy_read_targ_mem(priv, base);
+
+       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+                       priv->status, count);
+       }
+
+       desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
+       priv->isr_stats.err_code = desc;
+       pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
+       blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
+       blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
+       ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
+       ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
+       data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
+       data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
+       line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
+       time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
+       hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
+
+       trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
+                                       time, data1, data2, line,
+                                     blink1, blink2, ilink1, ilink2);
+
+       IWL_ERR(priv, "Desc                                  Time       "
+               "data1      data2      line\n");
+       IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+               iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
+       IWL_ERR(priv, "pc      blink1  blink2  ilink1  ilink2  hcmd\n");
+       IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+               pc, blink1, blink2, ilink1, ilink2, hcmd);
+}
+
+#define EVENT_START_OFFSET  (4 * sizeof(u32))
+
+/**
+ * iwl4965_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
+                              u32 num_events, u32 mode,
+                              int pos, char **buf, size_t bufsz)
+{
+       u32 i;
+       u32 base;       /* SRAM byte address of event log header */
+       u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+       u32 ptr;        /* SRAM byte address of log data */
+       u32 ev, time, data; /* event log data */
+       unsigned long reg_flags;
+
+       if (num_events == 0)
+               return pos;
+
+       if (priv->ucode_type == UCODE_INIT) {
+               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+       } else {
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       }
+
+       if (mode == 0)
+               event_size = 2 * sizeof(u32);
+       else
+               event_size = 3 * sizeof(u32);
+
+       ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+       /* Make sure device is powered up for SRAM reads */
+       spin_lock_irqsave(&priv->reg_lock, reg_flags);
+       iwl_grab_nic_access(priv);
+
+       /* Set starting address; reads will auto-increment */
+       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+       rmb();
+
+       /* "time" is actually "data" for mode 0 (no timestamp).
+       * place event id # at far right for easier visual parsing. */
+       for (i = 0; i < num_events; i++) {
+               ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+               if (mode == 0) {
+                       /* data, ev */
+                       if (bufsz) {
+                               pos += scnprintf(*buf + pos, bufsz - pos,
+                                               "EVT_LOG:0x%08x:%04u\n",
+                                               time, ev);
+                       } else {
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
+                                       time, ev);
+                               IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+                                       time, ev);
+                       }
+               } else {
+                       data = _iwl_legacy_read_direct32(priv,
+                                               HBUS_TARG_MEM_RDAT);
+                       if (bufsz) {
+                               pos += scnprintf(*buf + pos, bufsz - pos,
+                                               "EVT_LOGT:%010u:0x%08x:%04u\n",
+                                                time, data, ev);
+                       } else {
+                               IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+                                       time, data, ev);
+                               trace_iwlwifi_legacy_dev_ucode_event(priv, time,
+                                       data, ev);
+                       }
+               }
+       }
+
+       /* Allow device to power down */
+       iwl_release_nic_access(priv);
+       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+       return pos;
+}
+
+/**
+ * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+                                   u32 num_wraps, u32 next_entry,
+                                   u32 size, u32 mode,
+                                   int pos, char **buf, size_t bufsz)
+{
+       /*
+        * display the newest DEFAULT_LOG_ENTRIES entries
+        * i.e the entries just before the next ont that uCode would fill.
+        */
+       if (num_wraps) {
+               if (next_entry < size) {
+                       pos = iwl4965_print_event_log(priv,
+                                               capacity - (size - next_entry),
+                                               size - next_entry, mode,
+                                               pos, buf, bufsz);
+                       pos = iwl4965_print_event_log(priv, 0,
+                                                 next_entry, mode,
+                                                 pos, buf, bufsz);
+               } else
+                       pos = iwl4965_print_event_log(priv, next_entry - size,
+                                                 size, mode, pos, buf, bufsz);
+       } else {
+               if (next_entry < size) {
+                       pos = iwl4965_print_event_log(priv, 0, next_entry,
+                                                 mode, pos, buf, bufsz);
+               } else {
+                       pos = iwl4965_print_event_log(priv, next_entry - size,
+                                                 size, mode, pos, buf, bufsz);
+               }
+       }
+       return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+                           char **buf, bool display)
+{
+       u32 base;       /* SRAM byte address of event log header */
+       u32 capacity;   /* event log capacity in # entries */
+       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+       u32 num_wraps;  /* # times uCode wrapped to top of log */
+       u32 next_entry; /* index of next entry to be written by uCode */
+       u32 size;       /* # entries that we'll print */
+       int pos = 0;
+       size_t bufsz = 0;
+
+       if (priv->ucode_type == UCODE_INIT) {
+               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+       } else {
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+       }
+
+       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv,
+                       "Invalid event log pointer 0x%08X for %s uCode\n",
+                       base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+               return -EINVAL;
+       }
+
+       /* event log header */
+       capacity = iwl_legacy_read_targ_mem(priv, base);
+       mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+       num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+       next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+       size = num_wraps ? capacity : next_entry;
+
+       /* bail out if nothing in log */
+       if (size == 0) {
+               IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+               return pos;
+       }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+               size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+                       ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+       size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+               ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+       IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+               size);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+       if (display) {
+               if (full_log)
+                       bufsz = capacity * 48;
+               else
+                       bufsz = size * 48;
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+       }
+       if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+               /*
+                * if uCode has wrapped back to top of log,
+                * start at the oldest entry,
+                * i.e the next one that uCode would fill.
+                */
+               if (num_wraps)
+                       pos = iwl4965_print_event_log(priv, next_entry,
+                                               capacity - next_entry, mode,
+                                               pos, buf, bufsz);
+               /* (then/else) start at top of log */
+               pos = iwl4965_print_event_log(priv, 0,
+                                         next_entry, mode, pos, buf, bufsz);
+       } else
+               pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+                                               next_entry, size, mode,
+                                               pos, buf, bufsz);
+#else
+       pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+                                       next_entry, size, mode,
+                                       pos, buf, bufsz);
+#endif
+       return pos;
+}
+
+static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
+{
+       struct iwl_ct_kill_config cmd;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       cmd.critical_temperature_R =
+               cpu_to_le32(priv->hw_params.ct_kill_threshold);
+
+       ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
+                              sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
+       else
+               IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
+                               "succeeded, "
+                               "critical temperature is %d\n",
+                               priv->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+       IWL_TX_FIFO_VO,
+       IWL_TX_FIFO_VI,
+       IWL_TX_FIFO_BE,
+       IWL_TX_FIFO_BK,
+       IWL49_CMD_FIFO_NUM,
+       IWL_TX_FIFO_UNUSED,
+       IWL_TX_FIFO_UNUSED,
+};
+
+static int iwl4965_alive_notify(struct iwl_priv *priv)
+{
+       u32 a;
+       unsigned long flags;
+       int i, chan;
+       u32 reg_val;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Clear 4965's internal Tx Scheduler data base */
+       priv->scd_base_addr = iwl_legacy_read_prph(priv,
+                                       IWL49_SCD_SRAM_BASE_ADDR);
+       a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
+       for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+               iwl_legacy_write_targ_mem(priv, a, 0);
+       for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+               iwl_legacy_write_targ_mem(priv, a, 0);
+       for (; a < priv->scd_base_addr +
+              IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+               iwl_legacy_write_targ_mem(priv, a, 0);
+
+       /* Tel 4965 where to find Tx byte count tables */
+       iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
+                       priv->scd_bc_tbls.dma >> 10);
+
+       /* Enable DMA channel */
+       for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
+               iwl_legacy_write_direct32(priv,
+                               FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+       /* Update FH chicken bits */
+       reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+       iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+                          reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+       /* Disable chain mode for all queues */
+       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
+
+       /* Initialize each Tx queue (including the command queue) */
+       for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+
+               /* TFD circular buffer read/write indexes */
+               iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
+               iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+               /* Max Tx Window size for Scheduler-ACK mode */
+               iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+                               (SCD_WIN_SIZE <<
+                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+               /* Frame limit */
+               iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+                               sizeof(u32),
+                               (SCD_FRAME_LIMIT <<
+                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+       }
+       iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
+                                (1 << priv->hw_params.max_txq_num) - 1);
+
+       /* Activate all Tx DMA/FIFO channels */
+       iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
+
+       iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+       /* make sure all queue are not stopped */
+       memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+       for (i = 0; i < 4; i++)
+               atomic_set(&priv->queue_stop_count[i], 0);
+
+       /* reset to 0 to enable all the queue first */
+       priv->txq_ctx_active_msk = 0;
+       /* Map each Tx/cmd queue to its corresponding fifo */
+       BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+       for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+               int ac = default_queue_to_tx_fifo[i];
+
+               iwl_txq_ctx_activate(priv, i);
+
+               if (ac == IWL_TX_FIFO_UNUSED)
+                       continue;
+
+               iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+       }
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return 0;
+}
+
+/**
+ * iwl4965_alive_start - called after REPLY_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by iwl_init_alive_start()).
+ */
+static void iwl4965_alive_start(struct iwl_priv *priv)
+{
+       int ret = 0;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+
+       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Alive failed.\n");
+               goto restart;
+       }
+
+       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "runtime" alive if code weren't properly loaded.  */
+       if (iwl4965_verify_ucode(priv)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
+               goto restart;
+       }
+
+       ret = iwl4965_alive_notify(priv);
+       if (ret) {
+               IWL_WARN(priv,
+                       "Could not complete ALIVE transition [ntf]: %d\n", ret);
+               goto restart;
+       }
+
+
+       /* After the ALIVE response, we can send host commands to the uCode */
+       set_bit(STATUS_ALIVE, &priv->status);
+
+       /* Enable watchdog to monitor the driver tx queues */
+       iwl_legacy_setup_watchdog(priv);
+
+       if (iwl_legacy_is_rfkill(priv))
+               return;
+
+       ieee80211_wake_queues(priv->hw);
+
+       priv->active_rate = IWL_RATES_MASK;
+
+       if (iwl_legacy_is_associated_ctx(ctx)) {
+               struct iwl_legacy_rxon_cmd *active_rxon =
+                               (struct iwl_legacy_rxon_cmd *)&ctx->active;
+               /* apply any changes in staging */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       } else {
+               struct iwl_rxon_context *tmp;
+               /* Initialize our rx_config data */
+               for_each_context(priv, tmp)
+                       iwl_legacy_connection_init_rx_config(priv, tmp);
+
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       /* Configure bluetooth coexistence if enabled */
+       iwl_legacy_send_bt_config(priv);
+
+       iwl4965_reset_run_time_calib(priv);
+
+       set_bit(STATUS_READY, &priv->status);
+
+       /* Configure the adapter for unassociated operation */
+       iwl_legacy_commit_rxon(priv, ctx);
+
+       /* At this point, the NIC is initialized and operational */
+       iwl4965_rf_kill_ct_config(priv);
+
+       IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+       wake_up_interruptible(&priv->wait_command_queue);
+
+       iwl_legacy_power_update_mode(priv, true);
+       IWL_DEBUG_INFO(priv, "Updated power mode\n");
+
+       return;
+
+ restart:
+       queue_work(priv->workqueue, &priv->restart);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
+
+static void __iwl4965_down(struct iwl_priv *priv)
+{
+       unsigned long flags;
+       int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+
+       iwl_legacy_scan_cancel_timeout(priv, 200);
+
+       exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
+        * to prevent rearm timer */
+       del_timer_sync(&priv->watchdog);
+
+       iwl_legacy_clear_ucode_stations(priv, NULL);
+       iwl_legacy_dealloc_bcast_stations(priv);
+       iwl_legacy_clear_driver_stations(priv);
+
+       /* Unblock any waiting calls */
+       wake_up_interruptible_all(&priv->wait_command_queue);
+
+       /* Wipe out the EXIT_PENDING status bit if we are not actually
+        * exiting the module */
+       if (!exit_pending)
+               clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* stop and reset the on-board processor */
+       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /* tell the device to stop sending interrupts */
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       iwl4965_synchronize_irq(priv);
+
+       if (priv->mac80211_registered)
+               ieee80211_stop_queues(priv->hw);
+
+       /* If we have not previously called iwl_init() then
+        * clear all bits but the RF Kill bit and return */
+       if (!iwl_legacy_is_init(priv)) {
+               priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+                                       STATUS_RF_KILL_HW |
+                              test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+                                       STATUS_GEO_CONFIGURED |
+                              test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+                                       STATUS_EXIT_PENDING;
+               goto exit;
+       }
+
+       /* ...otherwise clear out all the status bits but the RF Kill
+        * bit and continue taking the NIC down. */
+       priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+                               STATUS_RF_KILL_HW |
+                       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+                               STATUS_GEO_CONFIGURED |
+                       test_bit(STATUS_FW_ERROR, &priv->status) <<
+                               STATUS_FW_ERROR |
+                      test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+                               STATUS_EXIT_PENDING;
+
+       iwl4965_txq_ctx_stop(priv);
+       iwl4965_rxq_stop(priv);
+
+       /* Power-down device's busmaster DMA clocks */
+       iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(5);
+
+       /* Make sure (redundant) we've released our request to stay awake */
+       iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /* Stop the device, and put it in low power state */
+       iwl_legacy_apm_stop(priv);
+
+ exit:
+       memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
+
+       dev_kfree_skb(priv->beacon_skb);
+       priv->beacon_skb = NULL;
+
+       /* clear out any free frames */
+       iwl4965_clear_free_frames(priv);
+}
+
+static void iwl4965_down(struct iwl_priv *priv)
+{
+       mutex_lock(&priv->mutex);
+       __iwl4965_down(priv);
+       mutex_unlock(&priv->mutex);
+
+       iwl4965_cancel_deferred_work(priv);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int iwl4965_set_hw_ready(struct iwl_priv *priv)
+{
+       int ret = 0;
+
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+       /* See if we got it */
+       ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+                               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+                               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+                               HW_READY_TIMEOUT);
+       if (ret != -ETIMEDOUT)
+               priv->hw_ready = true;
+       else
+               priv->hw_ready = false;
+
+       IWL_DEBUG_INFO(priv, "hardware %s\n",
+                     (priv->hw_ready == 1) ? "ready" : "not ready");
+       return ret;
+}
+
+static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
+{
+       int ret = 0;
+
+       IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
+
+       ret = iwl4965_set_hw_ready(priv);
+       if (priv->hw_ready)
+               return ret;
+
+       /* If HW is not ready, prepare the conditions to check again */
+       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                       CSR_HW_IF_CONFIG_REG_PREPARE);
+
+       ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+                       ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+                       CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+       /* HW should be ready by now, check again. */
+       if (ret != -ETIMEDOUT)
+               iwl4965_set_hw_ready(priv);
+
+       return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int __iwl4965_up(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+       int i;
+       int ret;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+               return -EIO;
+       }
+
+       if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+               IWL_ERR(priv, "ucode not available for device bringup\n");
+               return -EIO;
+       }
+
+       for_each_context(priv, ctx) {
+               ret = iwl4965_alloc_bcast_station(priv, ctx);
+               if (ret) {
+                       iwl_legacy_dealloc_bcast_stations(priv);
+                       return ret;
+               }
+       }
+
+       iwl4965_prepare_card_hw(priv);
+
+       if (!priv->hw_ready) {
+               IWL_WARN(priv, "Exit HW not ready\n");
+               return -EIO;
+       }
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (iwl_read32(priv,
+               CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       if (iwl_legacy_is_rfkill(priv)) {
+               wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
+
+               iwl_legacy_enable_interrupts(priv);
+               IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
+               return 0;
+       }
+
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+
+       /* must be initialised before iwl_hw_nic_init */
+       priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+
+       ret = iwl4965_hw_nic_init(priv);
+       if (ret) {
+               IWL_ERR(priv, "Unable to init nic\n");
+               return ret;
+       }
+
+       /* make sure rfkill handshake bits are cleared */
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       /* clear (again), then enable host interrupts */
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+       iwl_legacy_enable_interrupts(priv);
+
+       /* really make sure rfkill handshake bits are cleared */
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+       /* Copy original ucode data image from disk into backup cache.
+        * This will be used to initialize the on-board processor's
+        * data SRAM for a clean start when the runtime program first loads. */
+       memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
+              priv->ucode_data.len);
+
+       for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+               /* load bootstrap state machine,
+                * load bootstrap program into processor's memory,
+                * prepare to load the "initialize" uCode */
+               ret = priv->cfg->ops->lib->load_ucode(priv);
+
+               if (ret) {
+                       IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
+                               ret);
+                       continue;
+               }
+
+               /* start card; "initialize" will load runtime ucode */
+               iwl4965_nic_start(priv);
+
+               IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
+
+               return 0;
+       }
+
+       set_bit(STATUS_EXIT_PENDING, &priv->status);
+       __iwl4965_down(priv);
+       clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       /* tried to restart and config the device for as long as our
+        * patience could withstand */
+       IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
+       return -EIO;
+}
+
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_init_alive_start(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, init_alive_start.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       priv->cfg->ops->lib->init_alive_start(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_alive_start(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, alive_start.work);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl4965_alive_start(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                       run_time_calib_work);
+
+       mutex_lock(&priv->mutex);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           test_bit(STATUS_SCANNING, &priv->status)) {
+               mutex_unlock(&priv->mutex);
+               return;
+       }
+
+       if (priv->start_calib) {
+               iwl4965_chain_noise_calibration(priv,
+                               (void *)&priv->_4965.statistics);
+               iwl4965_sensitivity_calibration(priv,
+                               (void *)&priv->_4965.statistics);
+       }
+
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_restart(struct work_struct *data)
+{
+       struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+               struct iwl_rxon_context *ctx;
+
+               mutex_lock(&priv->mutex);
+               for_each_context(priv, ctx)
+                       ctx->vif = NULL;
+               priv->is_open = 0;
+
+               __iwl4965_down(priv);
+
+               mutex_unlock(&priv->mutex);
+               iwl4965_cancel_deferred_work(priv);
+               ieee80211_restart_hw(priv->hw);
+       } else {
+               iwl4965_down(priv);
+
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       return;
+
+               mutex_lock(&priv->mutex);
+               __iwl4965_up(priv);
+               mutex_unlock(&priv->mutex);
+       }
+}
+
+static void iwl4965_bg_rx_replenish(struct work_struct *data)
+{
+       struct iwl_priv *priv =
+           container_of(data, struct iwl_priv, rx_replenish);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+       iwl4965_rx_replenish(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT    (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+                                 u32 max_probe_length)
+{
+       int ret;
+       struct ieee80211_hw *hw = priv->hw;
+       struct iwl_rxon_context *ctx;
+
+       hw->rate_control_algorithm = "iwl-4965-rs";
+
+       /* Tell mac80211 our characteristics */
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_AMPDU_AGGREGATION |
+                   IEEE80211_HW_NEED_DTIM_PERIOD |
+                   IEEE80211_HW_SPECTRUM_MGMT |
+                   IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+       if (priv->cfg->sku & IWL_SKU_N)
+               hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+                            IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+       hw->sta_data_size = sizeof(struct iwl_station_priv);
+       hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+       for_each_context(priv, ctx) {
+               hw->wiphy->interface_modes |= ctx->interface_modes;
+               hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+       }
+
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+       /*
+        * For now, disable PS by default because it affects
+        * RX performance significantly.
+        */
+       hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+       /* we create the 802.11 header and a zero-length SSID element */
+       hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+       /* Default value; 4 EDCA QOS priorities */
+       hw->queues = 4;
+
+       hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                       &priv->bands[IEEE80211_BAND_2GHZ];
+       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                       &priv->bands[IEEE80211_BAND_5GHZ];
+
+       iwl_legacy_leds_init(priv);
+
+       ret = ieee80211_register_hw(priv->hw);
+       if (ret) {
+               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               return ret;
+       }
+       priv->mac80211_registered = 1;
+
+       return 0;
+}
+
+
+int iwl4965_mac_start(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       /* we should be verifying the device is ready to be opened */
+       mutex_lock(&priv->mutex);
+       ret = __iwl4965_up(priv);
+       mutex_unlock(&priv->mutex);
+
+       if (ret)
+               return ret;
+
+       if (iwl_legacy_is_rfkill(priv))
+               goto out;
+
+       IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+       /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+        * mac80211 will not be run successfully. */
+       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+                       test_bit(STATUS_READY, &priv->status),
+                       UCODE_READY_TIMEOUT);
+       if (!ret) {
+               if (!test_bit(STATUS_READY, &priv->status)) {
+                       IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
+                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
+                       return -ETIMEDOUT;
+               }
+       }
+
+       iwl4965_led_enable(priv);
+
+out:
+       priv->is_open = 1;
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+}
+
+void iwl4965_mac_stop(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!priv->is_open)
+               return;
+
+       priv->is_open = 0;
+
+       iwl4965_down(priv);
+
+       flush_workqueue(priv->workqueue);
+
+       /* enable interrupts again in order to receive rfkill changes */
+       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+       iwl_legacy_enable_interrupts(priv);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MACDUMP(priv, "enter\n");
+
+       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+       if (iwl4965_tx_skb(priv, skb))
+               dev_kfree_skb_any(skb);
+
+       IWL_DEBUG_MACDUMP(priv, "leave\n");
+}
+
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta,
+                               u32 iv32, u16 *phase1key)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
+                           iv32, phase1key);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *ctx = vif_priv->ctx;
+       int ret;
+       u8 sta_id;
+       bool is_default_wep_key = false;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (priv->cfg->mod_params->sw_crypto) {
+               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
+       if (sta_id == IWL_INVALID_STATION)
+               return -EINVAL;
+
+       mutex_lock(&priv->mutex);
+       iwl_legacy_scan_cancel_timeout(priv, 100);
+
+       /*
+        * If we are getting WEP group key and we didn't receive any key mapping
+        * so far, we are in legacy wep mode (group key only), otherwise we are
+        * in 1X mode.
+        * In legacy wep mode, we use another host command to the uCode.
+        */
+       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+            key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+           !sta) {
+               if (cmd == SET_KEY)
+                       is_default_wep_key = !ctx->key_mapping_keys;
+               else
+                       is_default_wep_key =
+                                       (key->hw_key_idx == HW_KEY_DEFAULT);
+       }
+
+       switch (cmd) {
+       case SET_KEY:
+               if (is_default_wep_key)
+                       ret = iwl4965_set_default_wep_key(priv,
+                                                       vif_priv->ctx, key);
+               else
+                       ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
+                                                 key, sta_id);
+
+               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+               break;
+       case DISABLE_KEY:
+               if (is_default_wep_key)
+                       ret = iwl4965_remove_default_wep_key(priv, ctx, key);
+               else
+                       ret = iwl4965_remove_dynamic_key(priv, ctx,
+                                                       key, sta_id);
+
+               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&priv->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret = -EINVAL;
+
+       IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+                    sta->addr, tid);
+
+       if (!(priv->cfg->sku & IWL_SKU_N))
+               return -EACCES;
+
+       mutex_lock(&priv->mutex);
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               IWL_DEBUG_HT(priv, "start Rx\n");
+               ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               IWL_DEBUG_HT(priv, "stop Rx\n");
+               ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               IWL_DEBUG_HT(priv, "start Tx\n");
+               ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
+               if (ret == 0) {
+                       priv->_4965.agg_tids_count++;
+                       IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+                                    priv->_4965.agg_tids_count);
+               }
+               break;
+       case IEEE80211_AMPDU_TX_STOP:
+               IWL_DEBUG_HT(priv, "stop Tx\n");
+               ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
+               if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
+                       priv->_4965.agg_tids_count--;
+                       IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+                                    priv->_4965.agg_tids_count);
+               }
+               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ret = 0;
+               break;
+       }
+       mutex_unlock(&priv->mutex);
+
+       return ret;
+}
+
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+       int ret;
+       u8 sta_id;
+
+       IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+                       sta->addr);
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+                       sta->addr);
+       sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+       atomic_set(&sta_priv->pending_frames, 0);
+
+       ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
+                                    is_ap, sta, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+                       sta->addr, ret);
+               /* Should we return success if return code is EEXIST ? */
+               mutex_unlock(&priv->mutex);
+               return ret;
+       }
+
+       sta_priv->common.sta_id = sta_id;
+
+       /* Initialize rate scaling */
+       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+                      sta->addr);
+       iwl4965_rs_rate_init(priv, sta, sta_id);
+       mutex_unlock(&priv->mutex);
+
+       return 0;
+}
+
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch)
+{
+       struct iwl_priv *priv = hw->priv;
+       const struct iwl_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = ch_switch->channel;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       u16 ch;
+       unsigned long flags = 0;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (iwl_legacy_is_rfkill(priv))
+               goto out_exit;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           test_bit(STATUS_SCANNING, &priv->status))
+               goto out_exit;
+
+       if (!iwl_legacy_is_associated_ctx(ctx))
+               goto out_exit;
+
+       /* channel switch in progress */
+       if (priv->switch_rxon.switch_in_progress == true)
+               goto out_exit;
+
+       mutex_lock(&priv->mutex);
+       if (priv->cfg->ops->lib->set_channel_switch) {
+
+               ch = channel->hw_value;
+               if (le16_to_cpu(ctx->active.channel) != ch) {
+                       ch_info = iwl_legacy_get_channel_info(priv,
+                                                      channel->band,
+                                                      ch);
+                       if (!iwl_legacy_is_channel_valid(ch_info)) {
+                               IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+                               goto out;
+                       }
+                       spin_lock_irqsave(&priv->lock, flags);
+
+                       priv->current_ht_config.smps = conf->smps_mode;
+
+                       /* Configure HT40 channels */
+                       ctx->ht.enabled = conf_is_ht(conf);
+                       if (ctx->ht.enabled) {
+                               if (conf_is_ht40_minus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                                       ctx->ht.is_40mhz = true;
+                               } else if (conf_is_ht40_plus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                                       ctx->ht.is_40mhz = true;
+                               } else {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                                       ctx->ht.is_40mhz = false;
+                               }
+                       } else
+                               ctx->ht.is_40mhz = false;
+
+                       if ((le16_to_cpu(ctx->staging.channel) != ch))
+                               ctx->staging.flags = 0;
+
+                       iwl_legacy_set_rxon_channel(priv, channel, ctx);
+                       iwl_legacy_set_rxon_ht(priv, ht_conf);
+                       iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+                                              ctx->vif);
+                       spin_unlock_irqrestore(&priv->lock, flags);
+
+                       iwl_legacy_set_rate(priv);
+                       /*
+                        * at this point, staging_rxon has the
+                        * configuration for channel switch
+                        */
+                       if (priv->cfg->ops->lib->set_channel_switch(priv,
+                                                                   ch_switch))
+                               priv->switch_rxon.switch_in_progress = false;
+               }
+       }
+out:
+       mutex_unlock(&priv->mutex);
+out_exit:
+       if (!priv->switch_rxon.switch_in_progress)
+               ieee80211_chswitch_done(ctx->vif, false);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+       struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       for_each_context(priv, ctx) {
+               ctx->staging.filter_flags &= ~filter_nand;
+               ctx->staging.filter_flags |= filter_or;
+
+               /*
+                * Not committing directly because hardware can perform a scan,
+                * but we'll eventually commit the filter flags change anyway.
+                */
+       }
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_legacy_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_txpower_work(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                       txpower_work);
+
+       /* If a scan happened to start before we got here
+        * then just return; the statistics notification will
+        * kick off another scheduled work to compensate for
+        * any temperature delta we missed here. */
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           test_bit(STATUS_SCANNING, &priv->status))
+               return;
+
+       mutex_lock(&priv->mutex);
+
+       /* Regardless of if we are associated, we must reconfigure the
+        * TX power since frames can be sent on non-radar channels while
+        * not associated */
+       priv->cfg->ops->lib->send_tx_power(priv);
+
+       /* Update last_temperature to keep is_calib_needed from running
+        * when it isn't needed... */
+       priv->last_temperature = priv->temperature;
+
+       mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
+{
+       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+       init_waitqueue_head(&priv->wait_command_queue);
+
+       INIT_WORK(&priv->restart, iwl4965_bg_restart);
+       INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
+       INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
+       INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
+       INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
+
+       iwl_legacy_setup_scan_deferred_work(priv);
+
+       INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
+
+       init_timer(&priv->statistics_periodic);
+       priv->statistics_periodic.data = (unsigned long)priv;
+       priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
+
+       init_timer(&priv->ucode_trace);
+       priv->ucode_trace.data = (unsigned long)priv;
+       priv->ucode_trace.function = iwl4965_bg_ucode_trace;
+
+       init_timer(&priv->watchdog);
+       priv->watchdog.data = (unsigned long)priv;
+       priv->watchdog.function = iwl_legacy_bg_watchdog;
+
+       tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+               iwl4965_irq_tasklet, (unsigned long)priv);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
+{
+       cancel_work_sync(&priv->txpower_work);
+       cancel_delayed_work_sync(&priv->init_alive_start);
+       cancel_delayed_work(&priv->alive_start);
+       cancel_work_sync(&priv->run_time_calib_work);
+
+       iwl_legacy_cancel_scan_deferred_work(priv);
+
+       del_timer_sync(&priv->statistics_periodic);
+       del_timer_sync(&priv->ucode_trace);
+}
+
+static void iwl4965_init_hw_rates(struct iwl_priv *priv,
+                             struct ieee80211_rate *rates)
+{
+       int i;
+
+       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+               rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
+               rates[i].hw_value = i; /* Rate scaling will work on indexes */
+               rates[i].hw_value_short = i;
+               rates[i].flags = 0;
+               if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
+                       /*
+                        * If CCK != 1M then set short preamble rate flag.
+                        */
+                       rates[i].flags |=
+                               (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
+                                       0 : IEEE80211_RATE_SHORT_PREAMBLE;
+               }
+       }
+}
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
+{
+       iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+                            (index & 0xff) | (txq_id << 8));
+       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+                                       struct iwl_tx_queue *txq,
+                                       int tx_fifo_id, int scd_retry)
+{
+       int txq_id = txq->q.id;
+
+       /* Find out whether to activate Tx queue */
+       int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+       /* Set up and activate */
+       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+                        (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+                        (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+                        IWL49_SCD_QUEUE_STTS_REG_MSK);
+
+       txq->sched_retry = scd_retry;
+
+       IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
+                      active ? "Activate" : "Deactivate",
+                      scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+
+static int iwl4965_init_drv(struct iwl_priv *priv)
+{
+       int ret;
+
+       spin_lock_init(&priv->sta_lock);
+       spin_lock_init(&priv->hcmd_lock);
+
+       INIT_LIST_HEAD(&priv->free_frames);
+
+       mutex_init(&priv->mutex);
+       mutex_init(&priv->sync_cmd_mutex);
+
+       priv->ieee_channels = NULL;
+       priv->ieee_rates = NULL;
+       priv->band = IEEE80211_BAND_2GHZ;
+
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+       priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+       priv->_4965.agg_tids_count = 0;
+
+       /* initialize force reset */
+       priv->force_reset[IWL_RF_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_RF_RESET;
+       priv->force_reset[IWL_FW_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+       /* Choose which receivers/antennas to use */
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv,
+                                       &priv->contexts[IWL_RXON_CTX_BSS]);
+
+       iwl_legacy_init_scan_params(priv);
+
+       /* Set the tx_power_user_lmt to the lowest power level
+        * this value will get overwritten by channel max power avg
+        * from eeprom */
+       priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
+       priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
+
+       ret = iwl_legacy_init_channel_map(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+               goto err;
+       }
+
+       ret = iwl_legacy_init_geos(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+               goto err_free_channel_map;
+       }
+       iwl4965_init_hw_rates(priv, priv->ieee_rates);
+
+       return 0;
+
+err_free_channel_map:
+       iwl_legacy_free_channel_map(priv);
+err:
+       return ret;
+}
+
+static void iwl4965_uninit_drv(struct iwl_priv *priv)
+{
+       iwl4965_calib_free_results(priv);
+       iwl_legacy_free_geos(priv);
+       iwl_legacy_free_channel_map(priv);
+       kfree(priv->scan_cmd);
+}
+
+static void iwl4965_hw_detect(struct iwl_priv *priv)
+{
+       priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
+       priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
+       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+       IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
+}
+
+static int iwl4965_set_hw_params(struct iwl_priv *priv)
+{
+       priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+       priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+       if (priv->cfg->mod_params->amsdu_size_8K)
+               priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
+       else
+               priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
+
+       priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
+
+       if (priv->cfg->mod_params->disable_11n)
+               priv->cfg->sku &= ~IWL_SKU_N;
+
+       /* Device-specific setup */
+       return priv->cfg->ops->lib->set_hw_params(priv);
+}
+
+static const u8 iwl4965_bss_ac_to_fifo[] = {
+       IWL_TX_FIFO_VO,
+       IWL_TX_FIFO_VI,
+       IWL_TX_FIFO_BE,
+       IWL_TX_FIFO_BK,
+};
+
+static const u8 iwl4965_bss_ac_to_queue[] = {
+       0, 1, 2, 3,
+};
+
+static int
+iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       int err = 0, i;
+       struct iwl_priv *priv;
+       struct ieee80211_hw *hw;
+       struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+       unsigned long flags;
+       u16 pci_cmd;
+
+       /************************
+        * 1. Allocating HW data
+        ************************/
+
+       hw = iwl_legacy_alloc_all(cfg);
+       if (!hw) {
+               err = -ENOMEM;
+               goto out;
+       }
+       priv = hw->priv;
+       /* At this point both hw and priv are allocated. */
+
+       /*
+        * The default context is always valid,
+        * more may be discovered when firmware
+        * is loaded.
+        */
+       priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+
+       for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+               priv->contexts[i].ctxid = i;
+
+       priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+       priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+       priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+       priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+       priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+       priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+       priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
+       priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
+       priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+               BIT(NL80211_IFTYPE_ADHOC);
+       priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+               BIT(NL80211_IFTYPE_STATION);
+       priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+       priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+       priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+       priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+       BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
+
+       SET_IEEE80211_DEV(hw, &pdev->dev);
+
+       IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+       priv->cfg = cfg;
+       priv->pci_dev = pdev;
+       priv->inta_mask = CSR_INI_SET_MASK;
+
+       if (iwl_legacy_alloc_traffic_mem(priv))
+               IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+       /**************************
+        * 2. Initializing PCI bus
+        **************************/
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                               PCIE_LINK_STATE_CLKPM);
+
+       if (pci_enable_device(pdev)) {
+               err = -ENODEV;
+               goto out_ieee80211_free_hw;
+       }
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (err) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!err)
+                       err = pci_set_consistent_dma_mask(pdev,
+                                                       DMA_BIT_MASK(32));
+               /* both attempts failed: */
+               if (err) {
+                       IWL_WARN(priv, "No suitable DMA available.\n");
+                       goto out_pci_disable_device;
+               }
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err)
+               goto out_pci_disable_device;
+
+       pci_set_drvdata(pdev, priv);
+
+
+       /***********************
+        * 3. Read REV register
+        ***********************/
+       priv->hw_base = pci_iomap(pdev, 0, 0);
+       if (!priv->hw_base) {
+               err = -ENODEV;
+               goto out_pci_release_regions;
+       }
+
+       IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
+               (unsigned long long) pci_resource_len(pdev, 0));
+       IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
+
+       /* these spin locks will be used in apm_ops.init and EEPROM access
+        * we should init now
+        */
+       spin_lock_init(&priv->reg_lock);
+       spin_lock_init(&priv->lock);
+
+       /*
+        * stop and reset the on-board processor just in case it is in a
+        * strange state ... like being left stranded by a primary kernel
+        * and this is now the kdump kernel trying to start up
+        */
+       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       iwl4965_hw_detect(priv);
+       IWL_INFO(priv, "Detected %s, REV=0x%X\n",
+               priv->cfg->name, priv->hw_rev);
+
+       /* We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state */
+       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+       iwl4965_prepare_card_hw(priv);
+       if (!priv->hw_ready) {
+               IWL_WARN(priv, "Failed, HW not ready\n");
+               goto out_iounmap;
+       }
+
+       /*****************
+        * 4. Read EEPROM
+        *****************/
+       /* Read the EEPROM */
+       err = iwl_legacy_eeprom_init(priv);
+       if (err) {
+               IWL_ERR(priv, "Unable to init EEPROM\n");
+               goto out_iounmap;
+       }
+       err = iwl4965_eeprom_check_version(priv);
+       if (err)
+               goto out_free_eeprom;
+
+       if (err)
+               goto out_free_eeprom;
+
+       /* extract MAC Address */
+       iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
+       IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
+       priv->hw->wiphy->addresses = priv->addresses;
+       priv->hw->wiphy->n_addresses = 1;
+
+       /************************
+        * 5. Setup HW constants
+        ************************/
+       if (iwl4965_set_hw_params(priv)) {
+               IWL_ERR(priv, "failed to set hw parameters\n");
+               goto out_free_eeprom;
+       }
+
+       /*******************
+        * 6. Setup priv
+        *******************/
+
+       err = iwl4965_init_drv(priv);
+       if (err)
+               goto out_free_eeprom;
+       /* At this point both hw and priv are initialized. */
+
+       /********************
+        * 7. Setup services
+        ********************/
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       pci_enable_msi(priv->pci_dev);
+
+       err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
+                         IRQF_SHARED, DRV_NAME, priv);
+       if (err) {
+               IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
+               goto out_disable_msi;
+       }
+
+       iwl4965_setup_deferred_work(priv);
+       iwl4965_setup_rx_handlers(priv);
+
+       /*********************************************
+        * 8. Enable interrupts and read RFKILL state
+        *********************************************/
+
+       /* enable interrupts if needed: hw bug w/a */
+       pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
+       if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+               pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+               pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
+       }
+
+       iwl_legacy_enable_interrupts(priv);
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (iwl_read32(priv, CSR_GP_CNTRL) &
+               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(STATUS_RF_KILL_HW, &priv->status);
+       else
+               set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+       wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+               test_bit(STATUS_RF_KILL_HW, &priv->status));
+
+       iwl_legacy_power_initialize(priv);
+
+       init_completion(&priv->_4965.firmware_loading_complete);
+
+       err = iwl4965_request_firmware(priv, true);
+       if (err)
+               goto out_destroy_workqueue;
+
+       return 0;
+
+ out_destroy_workqueue:
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+       free_irq(priv->pci_dev->irq, priv);
+ out_disable_msi:
+       pci_disable_msi(priv->pci_dev);
+       iwl4965_uninit_drv(priv);
+ out_free_eeprom:
+       iwl_legacy_eeprom_free(priv);
+ out_iounmap:
+       pci_iounmap(pdev, priv->hw_base);
+ out_pci_release_regions:
+       pci_set_drvdata(pdev, NULL);
+       pci_release_regions(pdev);
+ out_pci_disable_device:
+       pci_disable_device(pdev);
+ out_ieee80211_free_hw:
+       iwl_legacy_free_traffic_mem(priv);
+       ieee80211_free_hw(priv->hw);
+ out:
+       return err;
+}
+
+static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
+{
+       struct iwl_priv *priv = pci_get_drvdata(pdev);
+       unsigned long flags;
+
+       if (!priv)
+               return;
+
+       wait_for_completion(&priv->_4965.firmware_loading_complete);
+
+       IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+
+       iwl_legacy_dbgfs_unregister(priv);
+       sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
+
+       /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
+        * to be called and iwl4965_down since we are removing the device
+        * we need to set STATUS_EXIT_PENDING bit.
+        */
+       set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       iwl_legacy_leds_exit(priv);
+
+       if (priv->mac80211_registered) {
+               ieee80211_unregister_hw(priv->hw);
+               priv->mac80211_registered = 0;
+       } else {
+               iwl4965_down(priv);
+       }
+
+       /*
+        * Make sure device is reset to low power before unloading driver.
+        * This may be redundant with iwl4965_down(), but there are paths to
+        * run iwl4965_down() without calling apm_ops.stop(), and there are
+        * paths to avoid running iwl4965_down() at all before leaving driver.
+        * This (inexpensive) call *makes sure* device is reset.
+        */
+       iwl_legacy_apm_stop(priv);
+
+       /* make sure we flush any pending irq or
+        * tasklet for the driver
+        */
+       spin_lock_irqsave(&priv->lock, flags);
+       iwl_legacy_disable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl4965_synchronize_irq(priv);
+
+       iwl4965_dealloc_ucode_pci(priv);
+
+       if (priv->rxq.bd)
+               iwl4965_rx_queue_free(priv, &priv->rxq);
+       iwl4965_hw_txq_ctx_free(priv);
+
+       iwl_legacy_eeprom_free(priv);
+
+
+       /*netif_stop_queue(dev); */
+       flush_workqueue(priv->workqueue);
+
+       /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
+        * priv->workqueue... so we can't take down the workqueue
+        * until now... */
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+       iwl_legacy_free_traffic_mem(priv);
+
+       free_irq(priv->pci_dev->irq, priv);
+       pci_disable_msi(priv->pci_dev);
+       pci_iounmap(pdev, priv->hw_base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       iwl4965_uninit_drv(priv);
+
+       dev_kfree_skb(priv->beacon_skb);
+
+       ieee80211_free_hw(priv->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+       iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
+#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
+       {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
+       {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
+#endif /* CONFIG_IWL4965 */
+
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
+
+static struct pci_driver iwl4965_driver = {
+       .name = DRV_NAME,
+       .id_table = iwl4965_hw_card_ids,
+       .probe = iwl4965_pci_probe,
+       .remove = __devexit_p(iwl4965_pci_remove),
+       .driver.pm = IWL_LEGACY_PM_OPS,
+};
+
+static int __init iwl4965_init(void)
+{
+
+       int ret;
+       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+       pr_info(DRV_COPYRIGHT "\n");
+
+       ret = iwl4965_rate_control_register();
+       if (ret) {
+               pr_err("Unable to register rate control algorithm: %d\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&iwl4965_driver);
+       if (ret) {
+               pr_err("Unable to initialize PCI module\n");
+               goto error_register;
+       }
+
+       return ret;
+
+error_register:
+       iwl4965_rate_control_unregister();
+       return ret;
+}
+
+static void __exit iwl4965_exit(void)
+{
+       pci_unregister_driver(&iwl4965_driver);
+       iwl4965_rate_control_unregister();
+}
+
+module_exit(iwl4965_exit);
+module_init(iwl4965_init);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
+                  int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
index ed424574160ebd834f005203f49815ace833695b..17d555f2215af3ee004081fab73b682ddc1cea93 100644 (file)
@@ -1,14 +1,52 @@
-config IWLWIFI
-       tristate "Intel Wireless Wifi"
+config IWLAGN
+       tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
        depends on PCI && MAC80211
        select FW_LOADER
+       select NEW_LEDS
+       select LEDS_CLASS
+       select LEDS_TRIGGERS
+       select MAC80211_LEDS
+       ---help---
+         Select to build the driver supporting the:
+
+         Intel Wireless WiFi Link Next-Gen AGN
+
+         This option enables support for use with the following hardware:
+               Intel Wireless WiFi Link 6250AGN Adapter
+               Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
+               Intel WiFi Link 1000BGN
+               Intel Wireless WiFi 5150AGN
+               Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
+               Intel 6005 Series Wi-Fi Adapters
+               Intel 6030 Series Wi-Fi Adapters
+               Intel Wireless WiFi Link 6150BGN 2 Adapter
+               Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
+               Intel 2000 Series Wi-Fi Adapters
+
+
+         This driver uses the kernel's mac80211 subsystem.
+
+         In order to use this driver, you will need a microcode (uCode)
+         image for it. You can obtain the microcode from:
+
+                 <http://intellinuxwireless.org/>.
+
+         The microcode is typically installed in /lib/firmware. You can
+         look in the hotplug script /etc/hotplug/firmware.agent to
+         determine which directory FIRMWARE_DIR is set to when the script
+         runs.
+
+         If you want to compile the driver as a module ( = code which can be
+         inserted in and removed from the running kernel whenever you want),
+         say M here and read <file:Documentation/kbuild/modules.txt>.  The
+         module will be called iwlagn.
 
 menu "Debugging Options"
-       depends on IWLWIFI
+       depends on IWLAGN
 
 config IWLWIFI_DEBUG
-       bool "Enable full debugging output in iwlagn and iwl3945 drivers"
-       depends on IWLWIFI
+       bool "Enable full debugging output in the iwlagn driver"
+       depends on IWLAGN
        ---help---
          This option will enable debug tracing output for the iwlwifi drivers
 
@@ -33,7 +71,7 @@ config IWLWIFI_DEBUG
 
 config IWLWIFI_DEBUGFS
         bool "iwlagn debugfs support"
-        depends on IWLWIFI && MAC80211_DEBUGFS
+        depends on IWLAGN && MAC80211_DEBUGFS
         ---help---
          Enable creation of debugfs files for the iwlwifi drivers. This
          is a low-impact option that allows getting insight into the
@@ -41,13 +79,13 @@ config IWLWIFI_DEBUGFS
 
 config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
         bool "Experimental uCode support"
-        depends on IWLWIFI && IWLWIFI_DEBUG
+        depends on IWLAGN && IWLWIFI_DEBUG
         ---help---
          Enable use of experimental ucode for testing and debugging.
 
 config IWLWIFI_DEVICE_TRACING
        bool "iwlwifi device access tracing"
-       depends on IWLWIFI
+       depends on IWLAGN
        depends on EVENT_TRACING
        help
          Say Y here to trace all commands, including TX frames and IO
@@ -64,73 +102,19 @@ config IWLWIFI_DEVICE_TRACING
          occur.
 endmenu
 
-config IWLAGN
-       tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
-       depends on IWLWIFI
-       ---help---
-         Select to build the driver supporting the:
-
-         Intel Wireless WiFi Link Next-Gen AGN
-
-         This driver uses the kernel's mac80211 subsystem.
-
-         In order to use this driver, you will need a microcode (uCode)
-         image for it. You can obtain the microcode from:
-
-                 <http://intellinuxwireless.org/>.
-
-         The microcode is typically installed in /lib/firmware. You can
-         look in the hotplug script /etc/hotplug/firmware.agent to
-         determine which directory FIRMWARE_DIR is set to when the script
-         runs.
-
-         If you want to compile the driver as a module ( = code which can be
-         inserted in and removed from the running kernel whenever you want),
-         say M here and read <file:Documentation/kbuild/modules.txt>.  The
-         module will be called iwlagn.
-
-
-config IWL4965
-       bool "Intel Wireless WiFi 4965AGN"
-       depends on IWLAGN
-       ---help---
-         This option enables support for Intel Wireless WiFi Link 4965AGN
-
-config IWL5000
-       bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
+config IWL_P2P
+       bool "iwlwifi experimental P2P support"
        depends on IWLAGN
-       ---help---
-         This option enables support for use with the following hardware:
-               Intel Wireless WiFi Link 6250AGN Adapter
-               Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
-               Intel WiFi Link 1000BGN
-               Intel Wireless WiFi 5150AGN
-               Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
-               Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
-               Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
-               Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
-
-config IWL3945
-       tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
-       depends on IWLWIFI
-       ---help---
-         Select to build the driver supporting the:
-
-         Intel PRO/Wireless 3945ABG/BG Network Connection
-
-         This driver uses the kernel's mac80211 subsystem.
-
-         In order to use this driver, you will need a microcode (uCode)
-         image for it. You can obtain the microcode from:
+       help
+         This option enables experimental P2P support for some devices
+         based on microcode support. Since P2P support is still under
+         development, this option may even enable it for some devices
+         now that turn out to not support it in the future due to
+         microcode restrictions.
 
-                 <http://intellinuxwireless.org/>.
+         To determine if your microcode supports the experimental P2P
+         offered by this option, check if the driver advertises AP
+         support when it is loaded.
 
-         The microcode is typically installed in /lib/firmware. You can
-         look in the hotplug script /etc/hotplug/firmware.agent to
-         determine which directory FIRMWARE_DIR is set to when the script
-         runs.
+         Say Y only if you want to experiment with P2P.
 
-         If you want to compile the driver as a module ( = code which can be
-         inserted in and removed from the running kernel whenever you want),
-         say M here and read <file:Documentation/kbuild/modules.txt>.  The
-         module will be called iwl3945.
index 93380f97835f3a2f9bf82fca6720c90d4a05484f..9d6ee836426c55061e1c0ab91fec6fd2b2f04cd2 100644 (file)
@@ -1,35 +1,23 @@
-obj-$(CONFIG_IWLWIFI)  += iwlcore.o
-iwlcore-objs           := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlcore-objs           += iwl-rx.o iwl-tx.o iwl-sta.o
-iwlcore-objs           += iwl-scan.o iwl-led.o
-iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
-iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
-iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-
-# If 3945 is selected only, iwl-legacy.o will be added
-# to iwlcore-m above, but it needs to be built in.
-iwlcore-objs += $(iwlcore-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
-
 # AGN
 obj-$(CONFIG_IWLAGN)   += iwlagn.o
 iwlagn-objs            := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
 iwlagn-objs            += iwl-agn-ucode.o iwl-agn-tx.o
-iwlagn-objs            += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
+iwlagn-objs            += iwl-agn-lib.o iwl-agn-calib.o
 iwlagn-objs            += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
-iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
 
-iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
-iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
-iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
+iwlagn-objs            += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwlagn-objs            += iwl-rx.o iwl-tx.o iwl-sta.o
+iwlagn-objs            += iwl-scan.o iwl-led.o
+iwlagn-objs             += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
+iwlagn-objs             += iwl-5000.o
+iwlagn-objs             += iwl-6000.o
+iwlagn-objs             += iwl-1000.o
+iwlagn-objs             += iwl-2000.o
+
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 
-# 3945
-obj-$(CONFIG_IWL3945)  += iwl3945.o
-iwl3945-objs           := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
+CFLAGS_iwl-devtrace.o := -I$(src)
 
 ccflags-y += -D__CHECK_ENDIAN__
index ba78bc8a259fe21078cb76343ac37a3df0770f49..e8e1c2dc8659b9842126ca1c9ae338cd46e32053 100644 (file)
@@ -232,8 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644 (file)
index 0000000..d7b6126
--- /dev/null
@@ -0,0 +1,560 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-sta.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-6000-hw.h"
+#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 5
+#define IWL2000_UCODE_API_MAX 5
+#define IWL200_UCODE_API_MAX 5
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL200_UCODE_API_MIN 5
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
+
+#define IWL200_FW_PRE "iwlwifi-200-"
+#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
+#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+       /* want Celsius */
+       priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+       priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+       u16 radio_cfg;
+
+       radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+       /* write radio config values to register */
+       if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
+       iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                       EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+                       EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+                       EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+       /* set CSR_HW_CONFIG_REG for uCode use */
+       iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+                   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+                   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+       if (priv->cfg->iq_invert)
+               iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+                           CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+
+}
+
+static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+       .min_nrg_cck = 97,
+       .max_nrg_cck = 0, /* not used, set to 0 */
+       .auto_corr_min_ofdm = 80,
+       .auto_corr_min_ofdm_mrc = 128,
+       .auto_corr_min_ofdm_x1 = 105,
+       .auto_corr_min_ofdm_mrc_x1 = 192,
+
+       .auto_corr_max_ofdm = 145,
+       .auto_corr_max_ofdm_mrc = 232,
+       .auto_corr_max_ofdm_x1 = 110,
+       .auto_corr_max_ofdm_mrc_x1 = 232,
+
+       .auto_corr_min_cck = 125,
+       .auto_corr_max_cck = 175,
+       .auto_corr_min_cck_mrc = 160,
+       .auto_corr_max_cck_mrc = 310,
+       .nrg_th_cck = 97,
+       .nrg_th_ofdm = 100,
+
+       .barker_corr_th_min = 190,
+       .barker_corr_th_min_mrc = 390,
+       .nrg_th_cca = 62,
+};
+
+static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+       if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+           priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+               priv->cfg->base_params->num_of_queues =
+                       priv->cfg->mod_params->num_of_queues;
+
+       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+       priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+       priv->hw_params.scd_bc_tbls_size =
+               priv->cfg->base_params->num_of_queues *
+               sizeof(struct iwlagn_scd_bc_tbl);
+       priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+       priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
+
+       priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+       priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+       priv->hw_params.max_bsm_size = 0;
+       priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
+                                       BIT(IEEE80211_BAND_5GHZ);
+       priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+       priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+       if (priv->cfg->rx_with_siso_diversity)
+               priv->hw_params.rx_chains_num = 1;
+       else
+               priv->hw_params.rx_chains_num =
+                       num_of_ant(priv->cfg->valid_rx_ant);
+       priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+       priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+       iwl2000_set_ct_threshold(priv);
+
+       /* Set initial sensitivity parameters */
+       /* Set initial calibration set */
+       priv->hw_params.sens = &iwl2000_sensitivity;
+       priv->hw_params.calib_init_cfg =
+               BIT(IWL_CALIB_XTAL)             |
+               BIT(IWL_CALIB_LO)               |
+               BIT(IWL_CALIB_TX_IQ)            |
+               BIT(IWL_CALIB_BASE_BAND);
+       if (priv->cfg->need_dc_calib)
+               priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+       if (priv->cfg->need_temp_offset_calib)
+               priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+
+       priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
+       return 0;
+}
+
+static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
+                                    struct ieee80211_channel_switch *ch_switch)
+{
+       /*
+        * MULTI-FIXME
+        * See iwl_mac_channel_switch.
+        */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl6000_channel_switch_cmd cmd;
+       const struct iwl_channel_info *ch_info;
+       u32 switch_time_in_usec, ucode_switch_time;
+       u16 ch;
+       u32 tsf_low;
+       u8 switch_count;
+       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+       struct ieee80211_vif *vif = ctx->vif;
+       struct iwl_host_cmd hcmd = {
+               .id = REPLY_CHANNEL_SWITCH,
+               .len = sizeof(cmd),
+               .flags = CMD_SYNC,
+               .data = &cmd,
+       };
+
+       cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+       ch = ch_switch->channel->hw_value;
+       IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+               ctx->active.channel, ch);
+       cmd.channel = cpu_to_le16(ch);
+       cmd.rxon_flags = ctx->staging.flags;
+       cmd.rxon_filter_flags = ctx->staging.filter_flags;
+       switch_count = ch_switch->count;
+       tsf_low = ch_switch->timestamp & 0x0ffffffff;
+       /*
+        * calculate the ucode channel switch time
+        * adding TSF as one of the factor for when to switch
+        */
+       if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+               if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+                   beacon_interval)) {
+                       switch_count -= (priv->ucode_beacon_time -
+                               tsf_low) / beacon_interval;
+               } else
+                       switch_count = 0;
+       }
+       if (switch_count <= 1)
+               cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+       else {
+               switch_time_in_usec =
+                       vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+               ucode_switch_time = iwl_usecs_to_beacons(priv,
+                                               switch_time_in_usec,
+                                               beacon_interval);
+               cmd.switch_time = iwl_add_beacon_time(priv,
+                                               priv->ucode_beacon_time,
+                                               ucode_switch_time,
+                                               beacon_interval);
+       }
+       IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+                     cmd.switch_time);
+       ch_info = iwl_get_channel_info(priv, priv->band, ch);
+       if (ch_info)
+               cmd.expect_beacon = is_channel_radar(ch_info);
+       else {
+               IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+                       ctx->active.channel, ch);
+               return -EFAULT;
+       }
+       priv->switch_rxon.channel = cmd.channel;
+       priv->switch_rxon.switch_in_progress = true;
+
+       return iwl_send_cmd_sync(priv, &hcmd);
+}
+
+static struct iwl_lib_ops iwl2000_lib = {
+       .set_hw_params = iwl2000_hw_set_hw_params,
+       .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+       .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+       .txq_set_sched = iwlagn_txq_set_sched,
+       .txq_agg_enable = iwlagn_txq_agg_enable,
+       .txq_agg_disable = iwlagn_txq_agg_disable,
+       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = iwl_hw_txq_free_tfd,
+       .txq_init = iwl_hw_tx_queue_init,
+       .rx_handler_setup = iwlagn_rx_handler_setup,
+       .setup_deferred_work = iwlagn_bt_setup_deferred_work,
+       .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
+       .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+       .load_ucode = iwlagn_load_ucode,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
+       .dump_csr = iwl_dump_csr,
+       .dump_fh = iwl_dump_fh,
+       .init_alive_start = iwlagn_init_alive_start,
+       .alive_notify = iwlagn_alive_notify,
+       .send_tx_power = iwlagn_send_tx_power,
+       .update_chain_flags = iwl_update_chain_flags,
+       .set_channel_switch = iwl2030_hw_channel_switch,
+       .apm_ops = {
+               .init = iwl_apm_init,
+               .config = iwl2000_nic_config,
+       },
+       .eeprom_ops = {
+               .regulatory_bands = {
+                       EEPROM_REG_BAND_1_CHANNELS,
+                       EEPROM_REG_BAND_2_CHANNELS,
+                       EEPROM_REG_BAND_3_CHANNELS,
+                       EEPROM_REG_BAND_4_CHANNELS,
+                       EEPROM_REG_BAND_5_CHANNELS,
+                       EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+                       EEPROM_REG_BAND_52_HT40_CHANNELS
+               },
+               .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+               .release_semaphore = iwlcore_eeprom_release_semaphore,
+               .calib_version  = iwlagn_eeprom_calib_version,
+               .query_addr = iwlagn_eeprom_query_addr,
+               .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+       },
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
+       .temp_ops = {
+               .temperature = iwlagn_temperature,
+       },
+       .debugfs_ops = {
+               .rx_stats_read = iwl_ucode_rx_stats_read,
+               .tx_stats_read = iwl_ucode_tx_stats_read,
+               .general_stats_read = iwl_ucode_general_stats_read,
+               .bt_stats_read = iwl_ucode_bt_stats_read,
+               .reply_tx_error = iwl_reply_tx_error_read,
+       },
+       .txfifo_flush = iwlagn_txfifo_flush,
+       .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+       .tt_ops = {
+               .lower_power_detection = iwl_tt_is_low_power_state,
+               .tt_power_mode = iwl_tt_current_power_mode,
+               .ct_kill_check = iwl_check_for_ct_kill,
+       }
+};
+
+static const struct iwl_ops iwl2000_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl2030_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_bt_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl200_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl230_ops = {
+       .lib = &iwl2000_lib,
+       .hcmd = &iwlagn_bt_hcmd,
+       .utils = &iwlagn_hcmd_utils,
+       .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static struct iwl_base_params iwl2000_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .num_of_queues = IWLAGN_NUM_QUEUES,
+       .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+       .pll_cfg_val = 0,
+       .set_l0s = true,
+       .use_bsm = false,
+       .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+       .shadow_ram_support = true,
+       .led_compensation = 51,
+       .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .wd_timeout = IWL_DEF_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .ucode_tracing = true,
+       .sensitivity_calib_by_driver = true,
+       .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
+};
+
+
+static struct iwl_base_params iwl2030_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .num_of_queues = IWLAGN_NUM_QUEUES,
+       .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+       .pll_cfg_val = 0,
+       .set_l0s = true,
+       .use_bsm = false,
+       .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+       .shadow_ram_support = true,
+       .led_compensation = 57,
+       .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .wd_timeout = IWL_LONG_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .ucode_tracing = true,
+       .sensitivity_calib_by_driver = true,
+       .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
+};
+
+static struct iwl_ht_params iwl2000_ht_params = {
+       .ht_greenfield_support = true,
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static struct iwl_bt_params iwl2030_bt_params = {
+       .bt_statistics = true,
+       /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+       .advanced_bt_coexist = true,
+       .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+       .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+       .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+       .bt_sco_disable = true,
+       .bt_session_2 = true,
+};
+
+#define IWL_DEVICE_2000                                                \
+       .fw_name_pre = IWL2000_FW_PRE,                          \
+       .ucode_api_max = IWL2000_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL2000_UCODE_API_MIN,                 \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl2000_ops,                                    \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2000_base_params,                    \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .iq_invert = true                                       \
+
+struct iwl_cfg iwl2000_2bgn_cfg = {
+       .name = "2000 Series 2x2 BGN",
+       IWL_DEVICE_2000,
+       .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2000_2bg_cfg = {
+       .name = "2000 Series 2x2 BG",
+       IWL_DEVICE_2000,
+};
+
+#define IWL_DEVICE_2030                                                \
+       .fw_name_pre = IWL2030_FW_PRE,                          \
+       .ucode_api_max = IWL2030_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL2030_UCODE_API_MIN,                 \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl2030_ops,                                    \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2030_base_params,                    \
+       .bt_params = &iwl2030_bt_params,                        \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true,                                         \
+       .iq_invert = true                                       \
+
+struct iwl_cfg iwl2030_2bgn_cfg = {
+       .name = "2000 Series 2x2 BGN/BT",
+       IWL_DEVICE_2030,
+       .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2030_2bg_cfg = {
+       .name = "2000 Series 2x2 BG/BT",
+       IWL_DEVICE_2030,
+};
+
+#define IWL_DEVICE_6035                                                \
+       .fw_name_pre = IWL2030_FW_PRE,                          \
+       .ucode_api_max = IWL2030_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL2030_UCODE_API_MIN,                 \
+       .eeprom_ver = EEPROM_6035_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION,       \
+       .ops = &iwl2030_ops,                                    \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2030_base_params,                    \
+       .bt_params = &iwl2030_bt_params,                        \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true                                          \
+
+struct iwl_cfg iwl6035_2agn_cfg = {
+       .name = "2000 Series 2x2 AGN/BT",
+       IWL_DEVICE_6035,
+       .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl6035_2abg_cfg = {
+       .name = "2000 Series 2x2 ABG/BT",
+       IWL_DEVICE_6035,
+};
+
+struct iwl_cfg iwl6035_2bg_cfg = {
+       .name = "2000 Series 2x2 BG/BT",
+       IWL_DEVICE_6035,
+};
+
+#define IWL_DEVICE_200                                         \
+       .fw_name_pre = IWL200_FW_PRE,                           \
+       .ucode_api_max = IWL200_UCODE_API_MAX,                  \
+       .ucode_api_min = IWL200_UCODE_API_MIN,                  \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl200_ops,                                     \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2000_base_params,                    \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true,                                         \
+       .rx_with_siso_diversity = true                          \
+
+struct iwl_cfg iwl200_bg_cfg = {
+       .name = "200 Series 1x1 BG",
+       IWL_DEVICE_200,
+};
+
+struct iwl_cfg iwl200_bgn_cfg = {
+       .name = "200 Series 1x1 BGN",
+       IWL_DEVICE_200,
+       .ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_230                                         \
+       .fw_name_pre = IWL200_FW_PRE,                           \
+       .ucode_api_max = IWL200_UCODE_API_MAX,                  \
+       .ucode_api_min = IWL200_UCODE_API_MIN,                  \
+       .eeprom_ver = EEPROM_2000_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
+       .ops = &iwl230_ops,                                     \
+       .mod_params = &iwlagn_mod_params,                       \
+       .base_params = &iwl2030_base_params,                    \
+       .bt_params = &iwl2030_bt_params,                        \
+       .need_dc_calib = true,                                  \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true,                                         \
+       .rx_with_siso_diversity = true                          \
+
+struct iwl_cfg iwl230_bg_cfg = {
+       .name = "200 Series 1x1 BG/BT",
+       IWL_DEVICE_230,
+};
+
+struct iwl_cfg iwl230_bgn_cfg = {
+       .name = "200 Series 1x1 BGN/BT",
+       IWL_DEVICE_230,
+       .ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
deleted file mode 100644 (file)
index ef0835b..0000000
+++ /dev/null
@@ -1,522 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
-       int p = 0;
-
-       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
-                      le32_to_cpu(priv->_3945.statistics.flag));
-       if (le32_to_cpu(priv->_3945.statistics.flag) &
-                       UCODE_STATISTICS_CLEAR_MSK)
-               p += scnprintf(buf + p, bufsz - p,
-                              "\tStatistics have been cleared\n");
-       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
-                      (le32_to_cpu(priv->_3945.statistics.flag) &
-                       UCODE_STATISTICS_FREQUENCY_MSK)
-                       ? "2.4 GHz" : "5.2 GHz");
-       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
-                      (le32_to_cpu(priv->_3945.statistics.flag) &
-                       UCODE_STATISTICS_NARROW_BAND_MSK)
-                       ? "enabled" : "disabled");
-       return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
-                                   char __user *user_buf,
-                                   size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
-                   sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
-       ssize_t ret;
-       struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
-       struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
-       struct iwl39_statistics_rx_non_phy *general, *accum_general;
-       struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
-       if (!iwl_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * The statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       ofdm = &priv->_3945.statistics.rx.ofdm;
-       cck = &priv->_3945.statistics.rx.cck;
-       general = &priv->_3945.statistics.rx.general;
-       accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
-       accum_cck = &priv->_3945.accum_statistics.rx.cck;
-       accum_general = &priv->_3945.accum_statistics.rx.general;
-       delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
-       delta_cck = &priv->_3945.delta_statistics.rx.cck;
-       delta_general = &priv->_3945.delta_statistics.rx.general;
-       max_ofdm = &priv->_3945.max_delta.rx.ofdm;
-       max_cck = &priv->_3945.max_delta.rx.cck;
-       max_general = &priv->_3945.max_delta.rx.general;
-
-       pos += iwl3945_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Rx - OFDM:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
-                        accum_ofdm->ina_cnt,
-                        delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_cnt:",
-                        le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
-                        delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
-                        le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
-                        delta_ofdm->plcp_err, max_ofdm->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",  "crc32_err:",
-                        le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
-                        delta_ofdm->crc32_err, max_ofdm->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
-                        le32_to_cpu(ofdm->overrun_err),
-                        accum_ofdm->overrun_err, delta_ofdm->overrun_err,
-                        max_ofdm->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "early_overrun_err:",
-                        le32_to_cpu(ofdm->early_overrun_err),
-                        accum_ofdm->early_overrun_err,
-                        delta_ofdm->early_overrun_err,
-                        max_ofdm->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "crc32_good:", le32_to_cpu(ofdm->crc32_good),
-                        accum_ofdm->crc32_good, delta_ofdm->crc32_good,
-                        max_ofdm->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
-                        le32_to_cpu(ofdm->false_alarm_cnt),
-                        accum_ofdm->false_alarm_cnt,
-                        delta_ofdm->false_alarm_cnt,
-                        max_ofdm->false_alarm_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_sync_err_cnt:",
-                        le32_to_cpu(ofdm->fina_sync_err_cnt),
-                        accum_ofdm->fina_sync_err_cnt,
-                        delta_ofdm->fina_sync_err_cnt,
-                        max_ofdm->fina_sync_err_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sfd_timeout:",
-                        le32_to_cpu(ofdm->sfd_timeout),
-                        accum_ofdm->sfd_timeout,
-                        delta_ofdm->sfd_timeout,
-                        max_ofdm->sfd_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_timeout:",
-                        le32_to_cpu(ofdm->fina_timeout),
-                        accum_ofdm->fina_timeout,
-                        delta_ofdm->fina_timeout,
-                        max_ofdm->fina_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "unresponded_rts:",
-                        le32_to_cpu(ofdm->unresponded_rts),
-                        accum_ofdm->unresponded_rts,
-                        delta_ofdm->unresponded_rts,
-                        max_ofdm->unresponded_rts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "rxe_frame_lmt_ovrun:",
-                        le32_to_cpu(ofdm->rxe_frame_limit_overrun),
-                        accum_ofdm->rxe_frame_limit_overrun,
-                        delta_ofdm->rxe_frame_limit_overrun,
-                        max_ofdm->rxe_frame_limit_overrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_ack_cnt:",
-                        le32_to_cpu(ofdm->sent_ack_cnt),
-                        accum_ofdm->sent_ack_cnt,
-                        delta_ofdm->sent_ack_cnt,
-                        max_ofdm->sent_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_cts_cnt:",
-                        le32_to_cpu(ofdm->sent_cts_cnt),
-                        accum_ofdm->sent_cts_cnt,
-                        delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Rx - CCK:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "ina_cnt:",
-                        le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
-                        delta_cck->ina_cnt, max_cck->ina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_cnt:",
-                        le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
-                        delta_cck->fina_cnt, max_cck->fina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "plcp_err:",
-                        le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
-                        delta_cck->plcp_err, max_cck->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "crc32_err:",
-                        le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
-                        delta_cck->crc32_err, max_cck->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "overrun_err:",
-                        le32_to_cpu(cck->overrun_err),
-                        accum_cck->overrun_err,
-                        delta_cck->overrun_err, max_cck->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "early_overrun_err:",
-                        le32_to_cpu(cck->early_overrun_err),
-                        accum_cck->early_overrun_err,
-                        delta_cck->early_overrun_err,
-                        max_cck->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "crc32_good:",
-                        le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
-                        delta_cck->crc32_good,
-                        max_cck->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "false_alarm_cnt:",
-                        le32_to_cpu(cck->false_alarm_cnt),
-                        accum_cck->false_alarm_cnt,
-                        delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_sync_err_cnt:",
-                        le32_to_cpu(cck->fina_sync_err_cnt),
-                        accum_cck->fina_sync_err_cnt,
-                        delta_cck->fina_sync_err_cnt,
-                        max_cck->fina_sync_err_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sfd_timeout:",
-                        le32_to_cpu(cck->sfd_timeout),
-                        accum_cck->sfd_timeout,
-                        delta_cck->sfd_timeout, max_cck->sfd_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_timeout:",
-                        le32_to_cpu(cck->fina_timeout),
-                        accum_cck->fina_timeout,
-                        delta_cck->fina_timeout, max_cck->fina_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "unresponded_rts:",
-                        le32_to_cpu(cck->unresponded_rts),
-                        accum_cck->unresponded_rts,
-                        delta_cck->unresponded_rts,
-                        max_cck->unresponded_rts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "rxe_frame_lmt_ovrun:",
-                        le32_to_cpu(cck->rxe_frame_limit_overrun),
-                        accum_cck->rxe_frame_limit_overrun,
-                        delta_cck->rxe_frame_limit_overrun,
-                        max_cck->rxe_frame_limit_overrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_ack_cnt:",
-                        le32_to_cpu(cck->sent_ack_cnt),
-                        accum_cck->sent_ack_cnt,
-                        delta_cck->sent_ack_cnt,
-                        max_cck->sent_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_cts_cnt:",
-                        le32_to_cpu(cck->sent_cts_cnt),
-                        accum_cck->sent_cts_cnt,
-                        delta_cck->sent_cts_cnt,
-                        max_cck->sent_cts_cnt);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Rx - GENERAL:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bogus_cts:",
-                        le32_to_cpu(general->bogus_cts),
-                        accum_general->bogus_cts,
-                        delta_general->bogus_cts, max_general->bogus_cts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bogus_ack:",
-                        le32_to_cpu(general->bogus_ack),
-                        accum_general->bogus_ack,
-                        delta_general->bogus_ack, max_general->bogus_ack);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "non_bssid_frames:",
-                        le32_to_cpu(general->non_bssid_frames),
-                        accum_general->non_bssid_frames,
-                        delta_general->non_bssid_frames,
-                        max_general->non_bssid_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "filtered_frames:",
-                        le32_to_cpu(general->filtered_frames),
-                        accum_general->filtered_frames,
-                        delta_general->filtered_frames,
-                        max_general->filtered_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "non_channel_beacons:",
-                        le32_to_cpu(general->non_channel_beacons),
-                        accum_general->non_channel_beacons,
-                        delta_general->non_channel_beacons,
-                        max_general->non_channel_beacons);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
-                                   char __user *user_buf,
-                                   size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
-       ssize_t ret;
-       struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
-       if (!iwl_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * The statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       tx = &priv->_3945.statistics.tx;
-       accum_tx = &priv->_3945.accum_statistics.tx;
-       delta_tx = &priv->_3945.delta_statistics.tx;
-       max_tx = &priv->_3945.max_delta.tx;
-       pos += iwl3945_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Tx:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "preamble:",
-                        le32_to_cpu(tx->preamble_cnt),
-                        accum_tx->preamble_cnt,
-                        delta_tx->preamble_cnt, max_tx->preamble_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "rx_detected_cnt:",
-                        le32_to_cpu(tx->rx_detected_cnt),
-                        accum_tx->rx_detected_cnt,
-                        delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bt_prio_defer_cnt:",
-                        le32_to_cpu(tx->bt_prio_defer_cnt),
-                        accum_tx->bt_prio_defer_cnt,
-                        delta_tx->bt_prio_defer_cnt,
-                        max_tx->bt_prio_defer_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bt_prio_kill_cnt:",
-                        le32_to_cpu(tx->bt_prio_kill_cnt),
-                        accum_tx->bt_prio_kill_cnt,
-                        delta_tx->bt_prio_kill_cnt,
-                        max_tx->bt_prio_kill_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "few_bytes_cnt:",
-                        le32_to_cpu(tx->few_bytes_cnt),
-                        accum_tx->few_bytes_cnt,
-                        delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "cts_timeout:",
-                        le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
-                        delta_tx->cts_timeout, max_tx->cts_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "ack_timeout:",
-                        le32_to_cpu(tx->ack_timeout),
-                        accum_tx->ack_timeout,
-                        delta_tx->ack_timeout, max_tx->ack_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "expected_ack_cnt:",
-                        le32_to_cpu(tx->expected_ack_cnt),
-                        accum_tx->expected_ack_cnt,
-                        delta_tx->expected_ack_cnt,
-                        max_tx->expected_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "actual_ack_cnt:",
-                        le32_to_cpu(tx->actual_ack_cnt),
-                        accum_tx->actual_ack_cnt,
-                        delta_tx->actual_ack_cnt,
-                        max_tx->actual_ack_cnt);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
-       ssize_t ret;
-       struct iwl39_statistics_general *general, *accum_general;
-       struct iwl39_statistics_general *delta_general, *max_general;
-       struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
-       struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
-       if (!iwl_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * The statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       general = &priv->_3945.statistics.general;
-       dbg = &priv->_3945.statistics.general.dbg;
-       div = &priv->_3945.statistics.general.div;
-       accum_general = &priv->_3945.accum_statistics.general;
-       delta_general = &priv->_3945.delta_statistics.general;
-       max_general = &priv->_3945.max_delta.general;
-       accum_dbg = &priv->_3945.accum_statistics.general.dbg;
-       delta_dbg = &priv->_3945.delta_statistics.general.dbg;
-       max_dbg = &priv->_3945.max_delta.general.dbg;
-       accum_div = &priv->_3945.accum_statistics.general.div;
-       delta_div = &priv->_3945.delta_statistics.general.div;
-       max_div = &priv->_3945.max_delta.general.div;
-       pos += iwl3945_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_General:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "burst_check:",
-                        le32_to_cpu(dbg->burst_check),
-                        accum_dbg->burst_check,
-                        delta_dbg->burst_check, max_dbg->burst_check);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "burst_count:",
-                        le32_to_cpu(dbg->burst_count),
-                        accum_dbg->burst_count,
-                        delta_dbg->burst_count, max_dbg->burst_count);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sleep_time:",
-                        le32_to_cpu(general->sleep_time),
-                        accum_general->sleep_time,
-                        delta_general->sleep_time, max_general->sleep_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "slots_out:",
-                        le32_to_cpu(general->slots_out),
-                        accum_general->slots_out,
-                        delta_general->slots_out, max_general->slots_out);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "slots_idle:",
-                        le32_to_cpu(general->slots_idle),
-                        accum_general->slots_idle,
-                        delta_general->slots_idle, max_general->slots_idle);
-       pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
-                        le32_to_cpu(general->ttl_timestamp));
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "tx_on_a:",
-                        le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
-                        delta_div->tx_on_a, max_div->tx_on_a);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "tx_on_b:",
-                        le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
-                        delta_div->tx_on_b, max_div->tx_on_b);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "exec_time:",
-                        le32_to_cpu(div->exec_time), accum_div->exec_time,
-                        delta_div->exec_time, max_div->exec_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "probe_time:",
-                        le32_to_cpu(div->probe_time), accum_div->probe_time,
-                        delta_div->probe_time, max_div->probe_time);
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
deleted file mode 100644 (file)
index 70809c5..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-                                   size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-                                   size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-                                        char __user *user_buf, size_t count,
-                                        loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
-                                          char __user *user_buf, size_t count,
-                                          loff_t *ppos)
-{
-       return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
-                                          char __user *user_buf, size_t count,
-                                          loff_t *ppos)
-{
-       return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos)
-{
-       return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
deleted file mode 100644 (file)
index 2c9ed2b..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND                   (0x0800)
-#define FH39_MEM_UPPER_BOUND                   (0x1000)
-
-#define FH39_CBCC_TABLE                (FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE                (FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf)                    (FH39_TFDB_TABLE + \
-                                                ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)       (FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch)         (FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch)    (FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch)    (FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch)                 (FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch)          (FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch)                (FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch)            (FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch)       (FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR          (FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL                 (FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS               (FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch)                 (FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch)          (FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch)          (FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch)       (FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL                            (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128          (0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST          (0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH                        (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF               (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER            (0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL     (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL      (0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD            (0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT             (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE             (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE            (0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID           (0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR            (0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON       (0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON       (0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B     (0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON                (0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON                (0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH      (0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH            (0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)    (BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)   (BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
-       (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
-        FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE                    (0x01000000)
-
-struct iwl3945_tfd_tb {
-       __le32 addr;
-       __le32 len;
-} __packed;
-
-struct iwl3945_tfd {
-       __le32 control_flags;
-       struct iwl3945_tfd_tb tbs[4];
-       u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
deleted file mode 100644 (file)
index 65b5834..0000000
+++ /dev/null
@@ -1,294 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET      95
-
-#define IWL_DEFAULT_TX_POWER   0x0F
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- *   to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- *   (PA) output voltage detector.  This same detector is used during Tx of
- *   long packets in normal operation to provide feedback as to proper output
- *   level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
-       u8 gain_index;          /* index into power (gain) setup table ... */
-       s8 power;               /* ... for this pwr level for this chnl group */
-       u16 v_det;              /* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- *    to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
-       struct iwl3945_eeprom_txpower_sample samples[5];  /* 5 power levels */
-       s32 a, b, c, d, e;      /* coefficients for voltage->power
-                                * formula (signed) */
-       s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
-                                * frequency (signed) */
-       s8 saturation_power;    /* highest power possible by h/w in this
-                                * band */
-       u8 group_channel;       /* "representative" channel # in this band */
-       s16 temperature;        /* h/w temperature at factory calib this band
-                                * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- *   difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
-       u32 Ta;
-       u32 Tb;
-       u32 Tc;
-       u32 Td;
-       u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
-       u8 reserved0[16];
-       u16 device_id;  /* abs.ofs: 16 */
-       u8 reserved1[2];
-       u16 pmc;                /* abs.ofs: 20 */
-       u8 reserved2[20];
-       u8 mac_address[6];      /* abs.ofs: 42 */
-       u8 reserved3[58];
-       u16 board_revision;     /* abs.ofs: 106 */
-       u8 reserved4[11];
-       u8 board_pba_number[9]; /* abs.ofs: 119 */
-       u8 reserved5[8];
-       u16 version;            /* abs.ofs: 136 */
-       u8 sku_cap;             /* abs.ofs: 138 */
-       u8 leds_mode;           /* abs.ofs: 139 */
-       u16 oem_mode;
-       u16 wowlan_mode;        /* abs.ofs: 142 */
-       u16 leds_time_interval; /* abs.ofs: 144 */
-       u8 leds_off_time;       /* abs.ofs: 146 */
-       u8 leds_on_time;        /* abs.ofs: 147 */
-       u8 almgor_m_version;    /* abs.ofs: 148 */
-       u8 antenna_switch_type; /* abs.ofs: 149 */
-       u8 reserved6[42];
-       u8 sku_id[4];           /* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 or 4965 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-       u16 band_1_count;       /* abs.ofs: 196 */
-       struct iwl_eeprom_channel band_1_channels[14];  /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-       u16 band_2_count;       /* abs.ofs: 226 */
-       struct iwl_eeprom_channel band_2_channels[13];  /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-       u16 band_3_count;       /* abs.ofs: 254 */
-       struct iwl_eeprom_channel band_3_channels[12];  /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-       u16 band_4_count;       /* abs.ofs: 280 */
-       struct iwl_eeprom_channel band_4_channels[11];  /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-       u16 band_5_count;       /* abs.ofs: 304 */
-       struct iwl_eeprom_channel band_5_channels[6];  /* abs.ofs: 306 */
-
-       u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-       struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
-       struct iwl3945_eeprom_temperature_corr corrections;  /* abs.ofs: 832 */
-       u8 reserved16[172];     /* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)     /* bit 6    */
-#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)     /* bit 7    */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES        5
-#define IWL39_CMD_QUEUE_NUM    4
-
-#define IWL_DEFAULT_TX_RETRY  15
-
-/*********************************************/
-
-#define RFD_SIZE                              4
-#define NUM_TFD_CHUNKS                        4
-
-#define RX_QUEUE_SIZE                         256
-#define RX_QUEUE_MASK                         255
-#define RX_QUEUE_SIZE_LOG                     8
-
-#define U32_PAD(n)             ((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n)       (n << 24)
-#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n)         (n << 28)
-#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND             (0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND             (0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND             (0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND             (0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
-                               IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
-                               IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
-       return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
-              (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
-       __le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
-       return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
-       return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
-       return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
deleted file mode 100644 (file)
index abe2b73..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
-                               struct iwl_led_cmd *led_cmd)
-{
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_LEDS_CMD,
-               .len = sizeof(struct iwl_led_cmd),
-               .data = led_cmd,
-               .flags = CMD_ASYNC,
-               .callback = NULL,
-       };
-
-       return iwl_send_cmd(priv, &cmd);
-}
-
-/* Set led on command */
-static int iwl3945_led_on(struct iwl_priv *priv)
-{
-       struct iwl_led_cmd led_cmd = {
-               .id = IWL_LED_LINK,
-               .on = IWL_LED_SOLID,
-               .off = 0,
-               .interval = IWL_DEF_LED_INTRVL
-       };
-       return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
-/* Set led off command */
-static int iwl3945_led_off(struct iwl_priv *priv)
-{
-       struct iwl_led_cmd led_cmd = {
-               .id = IWL_LED_LINK,
-               .on = 0,
-               .off = 0,
-               .interval = IWL_DEF_LED_INTRVL
-       };
-       IWL_DEBUG_LED(priv, "led off\n");
-       return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
-       .cmd = iwl3945_send_led_cmd,
-       .on = iwl3945_led_on,
-       .off = iwl3945_led_off,
-};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
deleted file mode 100644 (file)
index ce990ad..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
deleted file mode 100644 (file)
index 1f3e7e3..0000000
+++ /dev/null
@@ -1,995 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/wireless.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
-       7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
-       7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
-       0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
-       7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
-       s8 min_rssi;
-       u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
-       {-60, IWL_RATE_54M_INDEX},
-       {-64, IWL_RATE_48M_INDEX},
-       {-72, IWL_RATE_36M_INDEX},
-       {-80, IWL_RATE_24M_INDEX},
-       {-84, IWL_RATE_18M_INDEX},
-       {-85, IWL_RATE_12M_INDEX},
-       {-87, IWL_RATE_9M_INDEX},
-       {-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
-       {-60, IWL_RATE_54M_INDEX},
-       {-64, IWL_RATE_48M_INDEX},
-       {-68, IWL_RATE_36M_INDEX},
-       {-80, IWL_RATE_24M_INDEX},
-       {-84, IWL_RATE_18M_INDEX},
-       {-85, IWL_RATE_12M_INDEX},
-       {-86, IWL_RATE_11M_INDEX},
-       {-88, IWL_RATE_5M_INDEX},
-       {-90, IWL_RATE_2M_INDEX},
-       {-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW          62
-#define IWL_RATE_FLUSH          (3*HZ)
-#define IWL_RATE_WIN_FLUSH       (HZ/2)
-#define IWL39_RATE_HIGH_TH          11520
-#define IWL_SUCCESS_UP_TH         8960
-#define IWL_SUCCESS_DOWN_TH      10880
-#define IWL_RATE_MIN_FAILURE_TH       6
-#define IWL_RATE_MIN_SUCCESS_TH       8
-#define IWL_RATE_DECREASE_TH       1920
-#define IWL_RATE_RETRY_TH           15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
-       u32 index = 0;
-       u32 table_size = 0;
-       struct iwl3945_tpt_entry *tpt_table = NULL;
-
-       if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
-               rssi = IWL_MIN_RSSI_VAL;
-
-       switch (band) {
-       case IEEE80211_BAND_2GHZ:
-               tpt_table = iwl3945_tpt_table_g;
-               table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
-               break;
-
-       case IEEE80211_BAND_5GHZ:
-               tpt_table = iwl3945_tpt_table_a;
-               table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
-               break;
-
-       default:
-               BUG();
-               break;
-       }
-
-       while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
-               index++;
-
-       index = min(index, (table_size - 1));
-
-       return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
-       window->data = 0;
-       window->success_counter = 0;
-       window->success_ratio = -1;
-       window->counter = 0;
-       window->average_tpt = IWL_INVALID_VALUE;
-       window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed.  If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
-       int unflushed = 0;
-       int i;
-       unsigned long flags;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-       /*
-        * For each rate, if we have collected data on that rate
-        * and it has been more than IWL_RATE_WIN_FLUSH
-        * since we flushed, clear out the gathered statistics
-        */
-       for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
-               if (!rs_sta->win[i].counter)
-                       continue;
-
-               spin_lock_irqsave(&rs_sta->lock, flags);
-               if (time_after(jiffies, rs_sta->win[i].stamp +
-                              IWL_RATE_WIN_FLUSH)) {
-                       IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
-                                      "index %d\n",
-                                      rs_sta->win[i].counter, i);
-                       iwl3945_clear_window(&rs_sta->win[i]);
-               } else
-                       unflushed++;
-               spin_unlock_irqrestore(&rs_sta->lock, flags);
-       }
-
-       return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX              5000   /* msec */
-#define IWL_RATE_FLUSH_MIN              50     /* msec */
-#define IWL_AVERAGE_PACKETS             1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
-       struct iwl3945_rs_sta *rs_sta = (void *)data;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-       int unflushed = 0;
-       unsigned long flags;
-       u32 packet_count, duration, pps;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       /* Number of packets Rx'd since last time this timer ran */
-       packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
-       rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
-       if (unflushed) {
-               duration =
-                   jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
-               IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
-                              packet_count, duration);
-
-               /* Determine packets per second */
-               if (duration)
-                       pps = (packet_count * 1000) / duration;
-               else
-                       pps = 0;
-
-               if (pps) {
-                       duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
-                       if (duration < IWL_RATE_FLUSH_MIN)
-                               duration = IWL_RATE_FLUSH_MIN;
-                       else if (duration > IWL_RATE_FLUSH_MAX)
-                               duration = IWL_RATE_FLUSH_MAX;
-               } else
-                       duration = IWL_RATE_FLUSH_MAX;
-
-               rs_sta->flush_time = msecs_to_jiffies(duration);
-
-               IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
-                              duration, packet_count);
-
-               mod_timer(&rs_sta->rate_scale_flush, jiffies +
-                         rs_sta->flush_time);
-
-               rs_sta->last_partial_flush = jiffies;
-       } else {
-               rs_sta->flush_time = IWL_RATE_FLUSH;
-               rs_sta->flush_pending = 0;
-       }
-       /* If there weren't any unflushed entries, we don't schedule the timer
-        * to run again */
-
-       rs_sta->last_flush = jiffies;
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate.  window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
-                               struct iwl3945_rate_scale_data *window,
-                               int success, int retries, int index)
-{
-       unsigned long flags;
-       s32 fail_count;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-       if (!retries) {
-               IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
-               return;
-       }
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       /*
-        * Keep track of only the latest 62 tx frame attempts in this rate's
-        * history window; anything older isn't really relevant any more.
-        * If we have filled up the sliding window, drop the oldest attempt;
-        * if the oldest attempt (highest bit in bitmap) shows "success",
-        * subtract "1" from the success counter (this is the main reason
-        * we keep these bitmaps!).
-        * */
-       while (retries > 0) {
-               if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
-                       /* remove earliest */
-                       window->counter = IWL_RATE_MAX_WINDOW - 1;
-
-                       if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
-                               window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
-                               window->success_counter--;
-                       }
-               }
-
-               /* Increment frames-attempted counter */
-               window->counter++;
-
-               /* Shift bitmap by one frame (throw away oldest history),
-                * OR in "1", and increment "success" if this
-                * frame was successful. */
-               window->data <<= 1;
-               if (success > 0) {
-                       window->success_counter++;
-                       window->data |= 0x1;
-                       success--;
-               }
-
-               retries--;
-       }
-
-       /* Calculate current success ratio, avoid divide-by-0! */
-       if (window->counter > 0)
-               window->success_ratio = 128 * (100 * window->success_counter)
-                                       / window->counter;
-       else
-               window->success_ratio = IWL_INVALID_VALUE;
-
-       fail_count = window->counter - window->success_counter;
-
-       /* Calculate average throughput, if we have enough history. */
-       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
-           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
-               window->average_tpt = ((window->success_ratio *
-                               rs_sta->expected_tpt[index] + 64) / 128);
-       else
-               window->average_tpt = IWL_INVALID_VALUE;
-
-       /* Tag this window as having been updated */
-       window->stamp = jiffies;
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
-       struct ieee80211_hw *hw = priv->hw;
-       struct ieee80211_conf *conf = &priv->hw->conf;
-       struct iwl3945_sta_priv *psta;
-       struct iwl3945_rs_sta *rs_sta;
-       struct ieee80211_supported_band *sband;
-       int i;
-
-       IWL_DEBUG_INFO(priv, "enter\n");
-       if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
-               goto out;
-
-       psta = (struct iwl3945_sta_priv *) sta->drv_priv;
-       rs_sta = &psta->rs_sta;
-       sband = hw->wiphy->bands[conf->channel->band];
-
-       rs_sta->priv = priv;
-
-       rs_sta->start_rate = IWL_RATE_INVALID;
-
-       /* default to just 802.11b */
-       rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
-       rs_sta->last_partial_flush = jiffies;
-       rs_sta->last_flush = jiffies;
-       rs_sta->flush_time = IWL_RATE_FLUSH;
-       rs_sta->last_tx_packets = 0;
-
-       rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
-       rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
-       for (i = 0; i < IWL_RATE_COUNT_3945; i++)
-               iwl3945_clear_window(&rs_sta->win[i]);
-
-       /* TODO: what is a good starting rate for STA? About middle? Maybe not
-        * the lowest or the highest rate.. Could consider using RSSI from
-        * previous packets? Need to have IEEE 802.1X auth succeed immediately
-        * after assoc.. */
-
-       for (i = sband->n_bitrates - 1; i >= 0; i--) {
-               if (sta->supp_rates[sband->band] & (1 << i)) {
-                       rs_sta->last_txrate_idx = i;
-                       break;
-               }
-       }
-
-       priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
-       /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
-       if (sband->band == IEEE80211_BAND_5GHZ) {
-               rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
-               priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
-                                               IWL_FIRST_OFDM_RATE;
-       }
-
-out:
-       priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
-       IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
-       return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void rs_free(void *priv)
-{
-       return;
-}
-
-static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
-       struct iwl3945_rs_sta *rs_sta;
-       struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
-       struct iwl_priv *priv __maybe_unused = iwl_priv;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       rs_sta = &psta->rs_sta;
-
-       spin_lock_init(&rs_sta->lock);
-       init_timer(&rs_sta->rate_scale_flush);
-
-       IWL_DEBUG_RATE(priv, "leave\n");
-
-       return rs_sta;
-}
-
-static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
-                       void *priv_sta)
-{
-       struct iwl3945_rs_sta *rs_sta = priv_sta;
-
-       /*
-        * Be careful not to use any members of iwl3945_rs_sta (like trying
-        * to use iwl_priv to print out debugging) since it may not be fully
-        * initialized at this point.
-        */
-       del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
-                        struct ieee80211_sta *sta, void *priv_sta,
-                        struct sk_buff *skb)
-{
-       s8 retries = 0, current_count;
-       int scale_rate_index, first_index, last_index;
-       unsigned long flags;
-       struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
-       struct iwl3945_rs_sta *rs_sta = priv_sta;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       retries = info->status.rates[0].count;
-       /* Sanity Check for retries */
-       if (retries > IWL_RATE_RETRY_TH)
-               retries = IWL_RATE_RETRY_TH;
-
-       first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
-       if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
-               IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
-               return;
-       }
-
-       if (!priv_sta) {
-               IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
-               return;
-       }
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (!rs_sta->priv) {
-               IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
-               return;
-       }
-
-
-       rs_sta->tx_packets++;
-
-       scale_rate_index = first_index;
-       last_index = first_index;
-
-       /*
-        * Update the window for each rate.  We determine which rates
-        * were Tx'd based on the total number of retries vs. the number
-        * of retries configured for each rate -- currently set to the
-        * priv value 'retry_rate' vs. rate specific
-        *
-        * On exit from this while loop last_index indicates the rate
-        * at which the frame was finally transmitted (or failed if no
-        * ACK)
-        */
-       while (retries > 1) {
-               if ((retries - 1) < priv->retry_rate) {
-                       current_count = (retries - 1);
-                       last_index = scale_rate_index;
-               } else {
-                       current_count = priv->retry_rate;
-                       last_index = iwl3945_rs_next_rate(priv,
-                                                        scale_rate_index);
-               }
-
-               /* Update this rate accounting for as many retries
-                * as was used for it (per current_count) */
-               iwl3945_collect_tx_data(rs_sta,
-                                   &rs_sta->win[scale_rate_index],
-                                   0, current_count, scale_rate_index);
-               IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
-                              scale_rate_index, current_count);
-
-               retries -= current_count;
-
-               scale_rate_index = last_index;
-       }
-
-
-       /* Update the last index window with success/failure based on ACK */
-       IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
-                      last_index,
-                      (info->flags & IEEE80211_TX_STAT_ACK) ?
-                      "success" : "failure");
-       iwl3945_collect_tx_data(rs_sta,
-                           &rs_sta->win[last_index],
-                           info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
-       /* We updated the rate scale window -- if its been more than
-        * flush_time since the last run, schedule the flush
-        * again */
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       if (!rs_sta->flush_pending &&
-           time_after(jiffies, rs_sta->last_flush +
-                      rs_sta->flush_time)) {
-
-               rs_sta->last_partial_flush = jiffies;
-               rs_sta->flush_pending = 1;
-               mod_timer(&rs_sta->rate_scale_flush,
-                         jiffies + rs_sta->flush_time);
-       }
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
-                                u8 index, u16 rate_mask, enum ieee80211_band band)
-{
-       u8 high = IWL_RATE_INVALID;
-       u8 low = IWL_RATE_INVALID;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-       /* 802.11A walks to the next literal adjacent rate in
-        * the rate table */
-       if (unlikely(band == IEEE80211_BAND_5GHZ)) {
-               int i;
-               u32 mask;
-
-               /* Find the previous rate that is in the rate mask */
-               i = index - 1;
-               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
-                       if (rate_mask & mask) {
-                               low = i;
-                               break;
-                       }
-               }
-
-               /* Find the next rate that is in the rate mask */
-               i = index + 1;
-               for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
-                    i++, mask <<= 1) {
-                       if (rate_mask & mask) {
-                               high = i;
-                               break;
-                       }
-               }
-
-               return (high << 8) | low;
-       }
-
-       low = index;
-       while (low != IWL_RATE_INVALID) {
-               if (rs_sta->tgg)
-                       low = iwl3945_rates[low].prev_rs_tgg;
-               else
-                       low = iwl3945_rates[low].prev_rs;
-               if (low == IWL_RATE_INVALID)
-                       break;
-               if (rate_mask & (1 << low))
-                       break;
-               IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
-       }
-
-       high = index;
-       while (high != IWL_RATE_INVALID) {
-               if (rs_sta->tgg)
-                       high = iwl3945_rates[high].next_rs_tgg;
-               else
-                       high = iwl3945_rates[high].next_rs;
-               if (high == IWL_RATE_INVALID)
-                       break;
-               if (rate_mask & (1 << high))
-                       break;
-               IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
-       }
-
-       return (high << 8) | low;
-}
-
-/**
- * rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
-                       void *priv_sta, struct ieee80211_tx_rate_control *txrc)
-{
-       struct ieee80211_supported_band *sband = txrc->sband;
-       struct sk_buff *skb = txrc->skb;
-       u8 low = IWL_RATE_INVALID;
-       u8 high = IWL_RATE_INVALID;
-       u16 high_low;
-       int index;
-       struct iwl3945_rs_sta *rs_sta = priv_sta;
-       struct iwl3945_rate_scale_data *window = NULL;
-       int current_tpt = IWL_INVALID_VALUE;
-       int low_tpt = IWL_INVALID_VALUE;
-       int high_tpt = IWL_INVALID_VALUE;
-       u32 fail_count;
-       s8 scale_action = 0;
-       unsigned long flags;
-       u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
-       s8 max_rate_idx = -1;
-       struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (rs_sta && !rs_sta->priv) {
-               IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
-               priv_sta = NULL;
-       }
-
-       if (rate_control_send_low(sta, priv_sta, txrc))
-               return;
-
-       rate_mask = sta->supp_rates[sband->band];
-
-       /* get user max rate if set */
-       max_rate_idx = txrc->max_rate_idx;
-       if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
-               max_rate_idx += IWL_FIRST_OFDM_RATE;
-       if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
-               max_rate_idx = -1;
-
-       index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
-       if (sband->band == IEEE80211_BAND_5GHZ)
-               rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       /* for recent assoc, choose best rate regarding
-        * to rssi value
-        */
-       if (rs_sta->start_rate != IWL_RATE_INVALID) {
-               if (rs_sta->start_rate < index &&
-                  (rate_mask & (1 << rs_sta->start_rate)))
-                       index = rs_sta->start_rate;
-               rs_sta->start_rate = IWL_RATE_INVALID;
-       }
-
-       /* force user max rate if set by user */
-       if ((max_rate_idx != -1) && (max_rate_idx < index)) {
-               if (rate_mask & (1 << max_rate_idx))
-                       index = max_rate_idx;
-       }
-
-       window = &(rs_sta->win[index]);
-
-       fail_count = window->counter - window->success_counter;
-
-       if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
-            (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
-               spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-               IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
-                              "counter: %d, success_counter: %d, "
-                              "expected_tpt is %sNULL\n",
-                              index,
-                              window->counter,
-                              window->success_counter,
-                              rs_sta->expected_tpt ? "not " : "");
-
-          /* Can't calculate this yet; not enough history */
-               window->average_tpt = IWL_INVALID_VALUE;
-               goto out;
-
-       }
-
-       current_tpt = window->average_tpt;
-
-       high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
-                                            sband->band);
-       low = high_low & 0xff;
-       high = (high_low >> 8) & 0xff;
-
-       /* If user set max rate, dont allow higher than user constrain */
-       if ((max_rate_idx != -1) && (max_rate_idx < high))
-               high = IWL_RATE_INVALID;
-
-       /* Collect Measured throughputs of adjacent rates */
-       if (low != IWL_RATE_INVALID)
-               low_tpt = rs_sta->win[low].average_tpt;
-
-       if (high != IWL_RATE_INVALID)
-               high_tpt = rs_sta->win[high].average_tpt;
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       scale_action = 0;
-
-       /* Low success ratio , need to drop the rate */
-       if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
-               IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
-               scale_action = -1;
-       /* No throughput measured yet for adjacent rates,
-        * try increase */
-       } else if ((low_tpt == IWL_INVALID_VALUE) &&
-                  (high_tpt == IWL_INVALID_VALUE)) {
-
-               if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
-                       scale_action = 1;
-               else if (low != IWL_RATE_INVALID)
-                       scale_action = 0;
-
-       /* Both adjacent throughputs are measured, but neither one has
-        * better throughput; we're using the best rate, don't change
-        * it! */
-       } else if ((low_tpt != IWL_INVALID_VALUE) &&
-                (high_tpt != IWL_INVALID_VALUE) &&
-                (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
-               IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
-                              "current_tpt [%d]\n",
-                              low_tpt, high_tpt, current_tpt);
-               scale_action = 0;
-
-       /* At least one of the rates has better throughput */
-       } else {
-               if (high_tpt != IWL_INVALID_VALUE) {
-
-                       /* High rate has better throughput, Increase
-                        * rate */
-                       if (high_tpt > current_tpt &&
-                               window->success_ratio >= IWL_RATE_INCREASE_TH)
-                               scale_action = 1;
-                       else {
-                               IWL_DEBUG_RATE(priv,
-                                   "decrease rate because of high tpt\n");
-                               scale_action = 0;
-                       }
-               } else if (low_tpt != IWL_INVALID_VALUE) {
-                       if (low_tpt > current_tpt) {
-                               IWL_DEBUG_RATE(priv,
-                                   "decrease rate because of low tpt\n");
-                               scale_action = -1;
-                       } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
-                               /* Lower rate has better
-                                * throughput,decrease rate */
-                               scale_action = 1;
-                       }
-               }
-       }
-
-       /* Sanity check; asked for decrease, but success rate or throughput
-        * has been good at old rate.  Don't change it. */
-       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
-                   ((window->success_ratio > IWL_RATE_HIGH_TH) ||
-                    (current_tpt > (100 * rs_sta->expected_tpt[low]))))
-               scale_action = 0;
-
-       switch (scale_action) {
-       case -1:
-
-               /* Decrese rate */
-               if (low != IWL_RATE_INVALID)
-                       index = low;
-               break;
-
-       case 1:
-               /* Increase rate */
-               if (high != IWL_RATE_INVALID)
-                       index = high;
-
-               break;
-
-       case 0:
-       default:
-               /* No change */
-               break;
-       }
-
-       IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
-                      index, scale_action, low, high);
-
- out:
-
-       rs_sta->last_txrate_idx = index;
-       if (sband->band == IEEE80211_BAND_5GHZ)
-               info->control.rates[0].idx = rs_sta->last_txrate_idx -
-                               IWL_FIRST_OFDM_RATE;
-       else
-               info->control.rates[0].idx = rs_sta->last_txrate_idx;
-
-       IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
-       file->private_data = inode->i_private;
-       return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
-                                                 char __user *user_buf,
-                                                 size_t count, loff_t *ppos)
-{
-       char *buff;
-       int desc = 0;
-       int j;
-       ssize_t ret;
-       struct iwl3945_rs_sta *lq_sta = file->private_data;
-
-       buff = kmalloc(1024, GFP_KERNEL);
-       if (!buff)
-               return -ENOMEM;
-
-       desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
-                       "rate=0x%X flush time %d\n",
-                       lq_sta->tx_packets,
-                       lq_sta->last_txrate_idx,
-                       lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
-       for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
-               desc += sprintf(buff+desc,
-                               "counter=%d success=%d %%=%d\n",
-                               lq_sta->win[j].counter,
-                               lq_sta->win[j].success_counter,
-                               lq_sta->win[j].success_ratio);
-       }
-       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-       kfree(buff);
-       return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
-       .read = iwl3945_sta_dbgfs_stats_table_read,
-       .open = iwl3945_open_file_generic,
-       .llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
-                               struct dentry *dir)
-{
-       struct iwl3945_rs_sta *lq_sta = priv_sta;
-
-       lq_sta->rs_sta_dbgfs_stats_table_file =
-               debugfs_create_file("rate_stats_table", 0600, dir,
-               lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
-       struct iwl3945_rs_sta *lq_sta = priv_sta;
-       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
-                             struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
-       .module = NULL,
-       .name = RS_NAME,
-       .tx_status = rs_tx_status,
-       .get_rate = rs_get_rate,
-       .rate_init = rs_rate_init_stub,
-       .alloc = rs_alloc,
-       .free = rs_free,
-       .alloc_sta = rs_alloc_sta,
-       .free_sta = rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
-       .add_sta_debugfs = iwl3945_add_debugfs,
-       .remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
-       struct iwl_priv *priv = hw->priv;
-       s32 rssi = 0;
-       unsigned long flags;
-       struct iwl3945_rs_sta *rs_sta;
-       struct ieee80211_sta *sta;
-       struct iwl3945_sta_priv *psta;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       rcu_read_lock();
-
-       sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
-                                priv->stations[sta_id].sta.sta.addr);
-       if (!sta) {
-               IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
-               rcu_read_unlock();
-               return;
-       }
-
-       psta = (void *) sta->drv_priv;
-       rs_sta = &psta->rs_sta;
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       rs_sta->tgg = 0;
-       switch (priv->band) {
-       case IEEE80211_BAND_2GHZ:
-               /* TODO: this always does G, not a regression */
-               if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
-                                               RXON_FLG_TGG_PROTECT_MSK) {
-                       rs_sta->tgg = 1;
-                       rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
-               } else
-                       rs_sta->expected_tpt = iwl3945_expected_tpt_g;
-               break;
-
-       case IEEE80211_BAND_5GHZ:
-               rs_sta->expected_tpt = iwl3945_expected_tpt_a;
-               break;
-       case IEEE80211_NUM_BANDS:
-               BUG();
-               break;
-       }
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       rssi = priv->_3945.last_rx_rssi;
-       if (rssi == 0)
-               rssi = IWL_MIN_RSSI_VAL;
-
-       IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
-       rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
-       IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
-                      "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
-                      iwl3945_rates[rs_sta->start_rate].plcp);
-       rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
-       return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
-       ieee80211_rate_control_unregister(&rs_ops);
-}
-
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
deleted file mode 100644 (file)
index 39b6f16..0000000
+++ /dev/null
@@ -1,2753 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-#include "iwl-legacy.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
-       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
-                                   IWL_RATE_##r##M_IEEE,   \
-                                   IWL_RATE_##ip##M_INDEX, \
-                                   IWL_RATE_##in##M_INDEX, \
-                                   IWL_RATE_##rp##M_INDEX, \
-                                   IWL_RATE_##rn##M_INDEX, \
-                                   IWL_RATE_##pp##M_INDEX, \
-                                   IWL_RATE_##np##M_INDEX, \
-                                   IWL_RATE_##r##M_INDEX_TABLE, \
-                                   IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- *   rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
-       IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),    /*  1mbps */
-       IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),          /*  2mbps */
-       IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
-       IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),      /* 11mbps */
-       IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
-       IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),       /*  9mbps */
-       IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
-       IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
-       IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
-       IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
-       IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
-       IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
-       u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
-       if (rate == IWL_RATE_INVALID)
-               rate = rate_index;
-       return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
- *   Default values of 0 enable uCode events to be logged.
- * Use for only special debugging.  This function is just a placeholder as-is,
- *   you'll need to provide the special bits! ...
- *   ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
-       int i;
-       u32 base;               /* SRAM address of event log header */
-       u32 disable_ptr;        /* SRAM address of event-disable bitmap array */
-       u32 array_size;         /* # of u32 entries in array */
-       static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
-               0x00000000,     /*   31 -    0  Event id numbers */
-               0x00000000,     /*   63 -   32 */
-               0x00000000,     /*   95 -   64 */
-               0x00000000,     /*  127 -   96 */
-               0x00000000,     /*  159 -  128 */
-               0x00000000,     /*  191 -  160 */
-               0x00000000,     /*  223 -  192 */
-               0x00000000,     /*  255 -  224 */
-               0x00000000,     /*  287 -  256 */
-               0x00000000,     /*  319 -  288 */
-               0x00000000,     /*  351 -  320 */
-               0x00000000,     /*  383 -  352 */
-               0x00000000,     /*  415 -  384 */
-               0x00000000,     /*  447 -  416 */
-               0x00000000,     /*  479 -  448 */
-               0x00000000,     /*  511 -  480 */
-               0x00000000,     /*  543 -  512 */
-               0x00000000,     /*  575 -  544 */
-               0x00000000,     /*  607 -  576 */
-               0x00000000,     /*  639 -  608 */
-               0x00000000,     /*  671 -  640 */
-               0x00000000,     /*  703 -  672 */
-               0x00000000,     /*  735 -  704 */
-               0x00000000,     /*  767 -  736 */
-               0x00000000,     /*  799 -  768 */
-               0x00000000,     /*  831 -  800 */
-               0x00000000,     /*  863 -  832 */
-               0x00000000,     /*  895 -  864 */
-               0x00000000,     /*  927 -  896 */
-               0x00000000,     /*  959 -  928 */
-               0x00000000,     /*  991 -  960 */
-               0x00000000,     /* 1023 -  992 */
-               0x00000000,     /* 1055 - 1024 */
-               0x00000000,     /* 1087 - 1056 */
-               0x00000000,     /* 1119 - 1088 */
-               0x00000000,     /* 1151 - 1120 */
-               0x00000000,     /* 1183 - 1152 */
-               0x00000000,     /* 1215 - 1184 */
-               0x00000000,     /* 1247 - 1216 */
-               0x00000000,     /* 1279 - 1248 */
-               0x00000000,     /* 1311 - 1280 */
-               0x00000000,     /* 1343 - 1312 */
-               0x00000000,     /* 1375 - 1344 */
-               0x00000000,     /* 1407 - 1376 */
-               0x00000000,     /* 1439 - 1408 */
-               0x00000000,     /* 1471 - 1440 */
-               0x00000000,     /* 1503 - 1472 */
-       };
-
-       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
-               return;
-       }
-
-       disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
-       array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
-       if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
-               IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
-                              disable_ptr);
-               for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
-                       iwl_write_targ_mem(priv,
-                                          disable_ptr + (i * sizeof(u32)),
-                                          evt_disable[i]);
-
-       } else {
-               IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
-               IWL_DEBUG_INFO(priv, "  by writing \"1\"s into disable bitmap\n");
-               IWL_DEBUG_INFO(priv, "  in SRAM at 0x%x, size %d u32s\n",
-                              disable_ptr, array_size);
-       }
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
-       int idx;
-
-       for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
-               if (iwl3945_rates[idx].plcp == plcp)
-                       return idx;
-       return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
-       switch (status & TX_STATUS_MSK) {
-       case TX_3945_STATUS_SUCCESS:
-               return "SUCCESS";
-               TX_STATUS_ENTRY(SHORT_LIMIT);
-               TX_STATUS_ENTRY(LONG_LIMIT);
-               TX_STATUS_ENTRY(FIFO_UNDERRUN);
-               TX_STATUS_ENTRY(MGMNT_ABORT);
-               TX_STATUS_ENTRY(NEXT_FRAG);
-               TX_STATUS_ENTRY(LIFE_EXPIRE);
-               TX_STATUS_ENTRY(DEST_PS);
-               TX_STATUS_ENTRY(ABORTED);
-               TX_STATUS_ENTRY(BT_RETRY);
-               TX_STATUS_ENTRY(STA_INVALID);
-               TX_STATUS_ENTRY(FRAG_DROPPED);
-               TX_STATUS_ENTRY(TID_DISABLE);
-               TX_STATUS_ENTRY(FRAME_FLUSHED);
-               TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
-               TX_STATUS_ENTRY(TX_LOCKED);
-               TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
-       }
-
-       return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
-       return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
-       int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
-       switch (priv->band) {
-       case IEEE80211_BAND_5GHZ:
-               if (rate == IWL_RATE_12M_INDEX)
-                       next_rate = IWL_RATE_9M_INDEX;
-               else if (rate == IWL_RATE_6M_INDEX)
-                       next_rate = IWL_RATE_6M_INDEX;
-               break;
-       case IEEE80211_BAND_2GHZ:
-               if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-                   iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
-                       if (rate == IWL_RATE_11M_INDEX)
-                               next_rate = IWL_RATE_5M_INDEX;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
-                                    int txq_id, int index)
-{
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
-       struct iwl_tx_info *tx_info;
-
-       BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
-       for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
-               q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-               tx_info = &txq->txb[txq->q.read_ptr];
-               ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-               tx_info->skb = NULL;
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-       }
-
-       if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
-                       (txq_id != IWL39_CMD_QUEUE_NUM) &&
-                       priv->mac80211_registered)
-               iwl_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-       int txq_id = SEQ_TO_QUEUE(sequence);
-       int index = SEQ_TO_INDEX(sequence);
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct ieee80211_tx_info *info;
-       struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
-       u32  status = le32_to_cpu(tx_resp->status);
-       int rate_idx;
-       int fail;
-
-       if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
-               IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-                         "is out of range [0-%d] %d %d\n", txq_id,
-                         index, txq->q.n_bd, txq->q.write_ptr,
-                         txq->q.read_ptr);
-               return;
-       }
-
-       txq->time_stamp = jiffies;
-       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
-       ieee80211_tx_info_clear_status(info);
-
-       /* Fill the MRR chain with some info about on-chip retransmissions */
-       rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
-       if (info->band == IEEE80211_BAND_5GHZ)
-               rate_idx -= IWL_FIRST_OFDM_RATE;
-
-       fail = tx_resp->failure_frame;
-
-       info->status.rates[0].idx = rate_idx;
-       info->status.rates[0].count = fail + 1; /* add final attempt */
-
-       /* tx_status->rts_retry_count = tx_resp->failure_rts; */
-       info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
-                               IEEE80211_TX_STAT_ACK : 0;
-
-       IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
-                       txq_id, iwl3945_get_tx_fail_reason(status), status,
-                       tx_resp->rate, tx_resp->failure_frame);
-
-       IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
-       iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
-       if (status & TX_ABORT_REQUIRED_MSK)
-               IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- *  RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- *  based on the assumption of all statistics counter are in DWORD
- *  FIXME: This function is for debugging, do not deal with
- *  the case of counters roll-over.
- */
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
-                                           __le32 *stats)
-{
-       int i;
-       __le32 *prev_stats;
-       u32 *accum_stats;
-       u32 *delta, *max_delta;
-
-       prev_stats = (__le32 *)&priv->_3945.statistics;
-       accum_stats = (u32 *)&priv->_3945.accum_statistics;
-       delta = (u32 *)&priv->_3945.delta_statistics;
-       max_delta = (u32 *)&priv->_3945.max_delta;
-
-       for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
-            i += sizeof(__le32), stats++, prev_stats++, delta++,
-            max_delta++, accum_stats++) {
-               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-                       *delta = (le32_to_cpu(*stats) -
-                               le32_to_cpu(*prev_stats));
-                       *accum_stats += *delta;
-                       if (*delta > *max_delta)
-                               *max_delta = *delta;
-               }
-       }
-
-       /* reset accumulative statistics for "no-counter" type statistics */
-       priv->_3945.accum_statistics.general.temperature =
-               priv->_3945.statistics.general.temperature;
-       priv->_3945.accum_statistics.general.ttl_timestamp =
-               priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
-               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
-                    (int)sizeof(struct iwl3945_notif_statistics),
-                    le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-       iwl_recover_from_statistics(priv, pkt);
-
-       memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       __le32 *flag = (__le32 *)&pkt->u.raw;
-
-       if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               memset(&priv->_3945.accum_statistics, 0,
-                       sizeof(struct iwl3945_notif_statistics));
-               memset(&priv->_3945.delta_statistics, 0,
-                       sizeof(struct iwl3945_notif_statistics));
-               memset(&priv->_3945.max_delta, 0,
-                       sizeof(struct iwl3945_notif_statistics));
-#endif
-               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-       }
-       iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
-               struct ieee80211_hdr *header)
-{
-       /* Filter incoming packets to determine if they are targeted toward
-        * this network, discarding packets coming from ourselves */
-       switch (priv->iw_mode) {
-       case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source    | BSSID */
-               /* packets to our IBSS update information */
-               return !compare_ether_addr(header->addr3, priv->bssid);
-       case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
-               /* packets to our IBSS update information */
-               return !compare_ether_addr(header->addr2, priv->bssid);
-       default:
-               return 1;
-       }
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
-                                  struct iwl_rx_mem_buffer *rxb,
-                                  struct ieee80211_rx_status *stats)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-       struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-       struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-       u16 len = le16_to_cpu(rx_hdr->len);
-       struct sk_buff *skb;
-       __le16 fc = hdr->frame_control;
-
-       /* We received data from the HW, so stop the watchdog */
-       if (unlikely(len + IWL39_RX_FRAME_SIZE >
-                    PAGE_SIZE << priv->hw_params.rx_page_order)) {
-               IWL_DEBUG_DROP(priv, "Corruption detected!\n");
-               return;
-       }
-
-       /* We only process data packets if the interface is open */
-       if (unlikely(!priv->is_open)) {
-               IWL_DEBUG_DROP_LIMIT(priv,
-                       "Dropping packet while interface is not open.\n");
-               return;
-       }
-
-       skb = dev_alloc_skb(128);
-       if (!skb) {
-               IWL_ERR(priv, "dev_alloc_skb failed\n");
-               return;
-       }
-
-       if (!iwl3945_mod_params.sw_crypto)
-               iwl_set_decrypted_flag(priv,
-                                      (struct ieee80211_hdr *)rxb_addr(rxb),
-                                      le32_to_cpu(rx_end->status), stats);
-
-       skb_add_rx_frag(skb, 0, rxb->page,
-                       (void *)rx_hdr->payload - (void *)pkt, len);
-
-       iwl_update_stats(priv, false, fc, len);
-       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
-       ieee80211_rx(priv->hw, skb);
-       priv->alloc_rxb_page--;
-       rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct ieee80211_hdr *header;
-       struct ieee80211_rx_status rx_status;
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
-       struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-       struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-       u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
-       u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
-       u8 network_packet;
-
-       rx_status.flag = 0;
-       rx_status.mactime = le64_to_cpu(rx_end->timestamp);
-       rx_status.freq =
-               ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
-       rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
-       rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
-       if (rx_status.band == IEEE80211_BAND_5GHZ)
-               rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
-       rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
-                                       RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
-       /* set the preamble flag if appropriate */
-       if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-               rx_status.flag |= RX_FLAG_SHORTPRE;
-
-       if ((unlikely(rx_stats->phy_count > 20))) {
-               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
-                               rx_stats->phy_count);
-               return;
-       }
-
-       if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
-           || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
-               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
-               return;
-       }
-
-
-
-       /* Convert 3945's rssi indicator to dBm */
-       rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
-       IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
-                       rx_status.signal, rx_stats_sig_avg,
-                       rx_stats_noise_diff);
-
-       header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
-       network_packet = iwl3945_is_network_packet(priv, header);
-
-       IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
-                             network_packet ? '*' : ' ',
-                             le16_to_cpu(rx_hdr->channel),
-                             rx_status.signal, rx_status.signal,
-                             rx_status.rate_idx);
-
-       iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
-
-       if (network_packet) {
-               priv->_3945.last_beacon_time =
-                       le32_to_cpu(rx_end->beacon_timestamp);
-               priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
-               priv->_3945.last_rx_rssi = rx_status.signal;
-       }
-
-       iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                    struct iwl_tx_queue *txq,
-                                    dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
-       int count;
-       struct iwl_queue *q;
-       struct iwl3945_tfd *tfd, *tfd_tmp;
-
-       q = &txq->q;
-       tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
-       tfd = &tfd_tmp[q->write_ptr];
-
-       if (reset)
-               memset(tfd, 0, sizeof(*tfd));
-
-       count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
-       if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
-               IWL_ERR(priv, "Error can not send more than %d chunks\n",
-                         NUM_TFD_CHUNKS);
-               return -EINVAL;
-       }
-
-       tfd->tbs[count].addr = cpu_to_le32(addr);
-       tfd->tbs[count].len = cpu_to_le32(len);
-
-       count++;
-
-       tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
-                                        TFD_CTL_PAD_SET(pad));
-
-       return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
-       int index = txq->q.read_ptr;
-       struct iwl3945_tfd *tfd = &tfd_tmp[index];
-       struct pci_dev *dev = priv->pci_dev;
-       int i;
-       int counter;
-
-       /* sanity check */
-       counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-       if (counter > NUM_TFD_CHUNKS) {
-               IWL_ERR(priv, "Too many chunks: %i\n", counter);
-               /* @todo issue fatal error, it is quite serious situation */
-               return;
-       }
-
-       /* Unmap tx_cmd */
-       if (counter)
-               pci_unmap_single(dev,
-                               dma_unmap_addr(&txq->meta[index], mapping),
-                               dma_unmap_len(&txq->meta[index], len),
-                               PCI_DMA_TODEVICE);
-
-       /* unmap chunks if any */
-
-       for (i = 1; i < counter; i++)
-               pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
-                        le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
-       /* free SKB */
-       if (txq->txb) {
-               struct sk_buff *skb;
-
-               skb = txq->txb[txq->q.read_ptr].skb;
-
-               /* can be called from irqs-disabled context */
-               if (skb) {
-                       dev_kfree_skb_any(skb);
-                       txq->txb[txq->q.read_ptr].skb = NULL;
-               }
-       }
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
-                                 struct iwl_device_cmd *cmd,
-                                 struct ieee80211_tx_info *info,
-                                 struct ieee80211_hdr *hdr,
-                                 int sta_id, int tx_id)
-{
-       u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
-       u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
-       u16 rate_mask;
-       int rate;
-       u8 rts_retry_limit;
-       u8 data_retry_limit;
-       __le32 tx_flags;
-       __le16 fc = hdr->frame_control;
-       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
-       rate = iwl3945_rates[rate_index].plcp;
-       tx_flags = tx_cmd->tx_flags;
-
-       /* We need to figure out how to get the sta->supp_rates while
-        * in this running context */
-       rate_mask = IWL_RATES_MASK;
-
-
-       /* Set retry limit on DATA packets and Probe Responses*/
-       if (ieee80211_is_probe_resp(fc))
-               data_retry_limit = 3;
-       else
-               data_retry_limit = IWL_DEFAULT_TX_RETRY;
-       tx_cmd->data_retry_limit = data_retry_limit;
-
-       if (tx_id >= IWL39_CMD_QUEUE_NUM)
-               rts_retry_limit = 3;
-       else
-               rts_retry_limit = 7;
-
-       if (data_retry_limit < rts_retry_limit)
-               rts_retry_limit = data_retry_limit;
-       tx_cmd->rts_retry_limit = rts_retry_limit;
-
-       tx_cmd->rate = rate;
-       tx_cmd->tx_flags = tx_flags;
-
-       /* OFDM */
-       tx_cmd->supp_rates[0] =
-          ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
-       /* CCK */
-       tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
-       IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
-                      "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
-                      tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
-                      tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
-       unsigned long flags_spin;
-       struct iwl_station_entry *station;
-
-       if (sta_id == IWL_INVALID_STATION)
-               return IWL_INVALID_STATION;
-
-       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-       station = &priv->stations[sta_id];
-
-       station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
-       station->sta.rate_n_flags = cpu_to_le16(tx_rate);
-       station->sta.mode = STA_CONTROL_MODIFY_MSK;
-       iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
-       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
-       IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
-                       sta_id, tx_rate);
-       return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
-               if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
-                       iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
-                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
-
-                       iwl_poll_bit(priv, CSR_GPIO_IN,
-                                    CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
-                                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);
-               }
- */
-
-       iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-                       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
-                       ~APMG_PS_CTRL_MSK_PWR_SRC);
-
-       iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
-                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);  /* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
-       iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
-       iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
-       iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
-               FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
-               FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
-               FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
-               FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
-               (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
-               FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
-               (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
-               FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
-       /* fake read to flush all prev I/O */
-       iwl_read_direct32(priv, FH39_RSSR_CTRL);
-
-       return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
-       /* bypass mode */
-       iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
-       /* RA 0 is active */
-       iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
-       /* all 6 fifo are active */
-       iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
-       iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
-       iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
-       iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
-       iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
-       iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
-                            priv->_3945.shared_phys);
-
-       iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
-       return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
-       int rc;
-       int txq_id, slots_num;
-
-       iwl3945_hw_txq_ctx_free(priv);
-
-       /* allocate tx queue structure */
-       rc = iwl_alloc_txq_mem(priv);
-       if (rc)
-               return rc;
-
-       /* Tx CMD queue */
-       rc = iwl3945_tx_reset(priv);
-       if (rc)
-               goto error;
-
-       /* Tx queue(s) */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
-                               TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
-                                      txq_id);
-               if (rc) {
-                       IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
-                       goto error;
-               }
-       }
-
-       return rc;
-
- error:
-       iwl3945_hw_txq_ctx_free(priv);
-       return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
- * NOTE:  This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
-       int ret = iwl_apm_init(priv);
-
-       /* Clear APMG (NIC's internal power management) interrupts */
-       iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
-       iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
-       /* Reset radio chip */
-       iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
-       udelay(5);
-       iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
-
-       return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       unsigned long flags;
-       u8 rev_id = 0;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Determine HW type */
-       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
-
-       IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
-       if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
-               IWL_DEBUG_INFO(priv, "RTP type\n");
-       else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
-               IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
-       } else {
-               IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
-       }
-
-       if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
-               IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
-       } else
-               IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
-       if ((eeprom->board_revision & 0xF0) == 0xD0) {
-               IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
-                              eeprom->board_revision);
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
-       } else {
-               IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
-                              eeprom->board_revision);
-               iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
-                             CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
-       }
-
-       if (eeprom->almgor_m_version <= 1) {
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
-               IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
-                              eeprom->almgor_m_version);
-       } else {
-               IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
-                              eeprom->almgor_m_version);
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
-       }
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
-               IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
-       if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
-               IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
-       int rc;
-       unsigned long flags;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       priv->cfg->ops->lib->apm_ops.init(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl3945_set_pwr_vmain(priv);
-
-       priv->cfg->ops->lib->apm_ops.config(priv);
-
-       /* Allocate the RX queue, or reset if it is already allocated */
-       if (!rxq->bd) {
-               rc = iwl_rx_queue_alloc(priv);
-               if (rc) {
-                       IWL_ERR(priv, "Unable to initialize Rx queue\n");
-                       return -ENOMEM;
-               }
-       } else
-               iwl3945_rx_queue_reset(priv, rxq);
-
-       iwl3945_rx_replenish(priv);
-
-       iwl3945_rx_init(priv, rxq);
-
-
-       /* Look at using this instead:
-       rxq->need_update = 1;
-       iwl_rx_queue_update_write_ptr(priv, rxq);
-       */
-
-       iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
-       rc = iwl3945_txq_ctx_reset(priv);
-       if (rc)
-               return rc;
-
-       set_bit(STATUS_INIT, &priv->status);
-
-       return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
-       int txq_id;
-
-       /* Tx queues */
-       if (priv->txq)
-               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
-                    txq_id++)
-                       if (txq_id == IWL39_CMD_QUEUE_NUM)
-                               iwl_cmd_queue_free(priv);
-                       else
-                               iwl_tx_queue_free(priv, txq_id);
-
-       /* free tx queue structure */
-       iwl_free_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
-       int txq_id;
-
-       /* stop SCD */
-       iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
-       iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
-       /* reset TFD queues */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
-               iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
-                               FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
-                               1000);
-       }
-
-       iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
-       return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
-       return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
-       return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       int temperature;
-
-       temperature = iwl3945_hw_get_temperature(priv);
-
-       /* driver's okay range is -260 to +25.
-        *   human readable okay range is 0 to +285 */
-       IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
-       /* handle insane temp reading */
-       if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
-               IWL_ERR(priv, "Error bad temperature value  %d\n", temperature);
-
-               /* if really really hot(?),
-                *   substitute the 3rd band/group's temp measured at factory */
-               if (priv->last_temperature > 100)
-                       temperature = eeprom->groups[2].temperature;
-               else /* else use most recent "sane" value from driver */
-                       temperature = priv->last_temperature;
-       }
-
-       return temperature;     /* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER   6
-
-/**
- * is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- *    (assumes caller will actually do the calibration!). */
-static int is_temp_calib_needed(struct iwl_priv *priv)
-{
-       int temp_diff;
-
-       priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
-       temp_diff = priv->temperature - priv->last_temperature;
-
-       /* get absolute value */
-       if (temp_diff < 0) {
-               IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
-               temp_diff = -temp_diff;
-       } else if (temp_diff == 0)
-               IWL_DEBUG_POWER(priv, "Same temp,\n");
-       else
-               IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
-       /* if we don't need calibration, *don't* update last_temperature */
-       if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
-               IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
-               return 0;
-       }
-
-       IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
-       /* assume that caller will actually do calib ...
-        *   update the "last temperature" value */
-       priv->last_temperature = priv->temperature;
-       return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF  -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
-       {
-        {251, 127},            /* 2.4 GHz, highest power */
-        {251, 127},
-        {251, 127},
-        {251, 127},
-        {251, 125},
-        {251, 110},
-        {251, 105},
-        {251, 98},
-        {187, 125},
-        {187, 115},
-        {187, 108},
-        {187, 99},
-        {243, 119},
-        {243, 111},
-        {243, 105},
-        {243, 97},
-        {243, 92},
-        {211, 106},
-        {211, 100},
-        {179, 120},
-        {179, 113},
-        {179, 107},
-        {147, 125},
-        {147, 119},
-        {147, 112},
-        {147, 106},
-        {147, 101},
-        {147, 97},
-        {147, 91},
-        {115, 107},
-        {235, 121},
-        {235, 115},
-        {235, 109},
-        {203, 127},
-        {203, 121},
-        {203, 115},
-        {203, 108},
-        {203, 102},
-        {203, 96},
-        {203, 92},
-        {171, 110},
-        {171, 104},
-        {171, 98},
-        {139, 116},
-        {227, 125},
-        {227, 119},
-        {227, 113},
-        {227, 107},
-        {227, 101},
-        {227, 96},
-        {195, 113},
-        {195, 106},
-        {195, 102},
-        {195, 95},
-        {163, 113},
-        {163, 106},
-        {163, 102},
-        {163, 95},
-        {131, 113},
-        {131, 106},
-        {131, 102},
-        {131, 95},
-        {99, 113},
-        {99, 106},
-        {99, 102},
-        {99, 95},
-        {67, 113},
-        {67, 106},
-        {67, 102},
-        {67, 95},
-        {35, 113},
-        {35, 106},
-        {35, 102},
-        {35, 95},
-        {3, 113},
-        {3, 106},
-        {3, 102},
-        {3, 95} },             /* 2.4 GHz, lowest power */
-       {
-        {251, 127},            /* 5.x GHz, highest power */
-        {251, 120},
-        {251, 114},
-        {219, 119},
-        {219, 101},
-        {187, 113},
-        {187, 102},
-        {155, 114},
-        {155, 103},
-        {123, 117},
-        {123, 107},
-        {123, 99},
-        {123, 92},
-        {91, 108},
-        {59, 125},
-        {59, 118},
-        {59, 109},
-        {59, 102},
-        {59, 96},
-        {59, 90},
-        {27, 104},
-        {27, 98},
-        {27, 92},
-        {115, 118},
-        {115, 111},
-        {115, 104},
-        {83, 126},
-        {83, 121},
-        {83, 113},
-        {83, 105},
-        {83, 99},
-        {51, 118},
-        {51, 111},
-        {51, 104},
-        {51, 98},
-        {19, 116},
-        {19, 109},
-        {19, 102},
-        {19, 98},
-        {19, 93},
-        {171, 113},
-        {171, 107},
-        {171, 99},
-        {139, 120},
-        {139, 113},
-        {139, 107},
-        {139, 99},
-        {107, 120},
-        {107, 113},
-        {107, 107},
-        {107, 99},
-        {75, 120},
-        {75, 113},
-        {75, 107},
-        {75, 99},
-        {43, 120},
-        {43, 113},
-        {43, 107},
-        {43, 99},
-        {11, 120},
-        {11, 113},
-        {11, 107},
-        {11, 99},
-        {131, 107},
-        {131, 99},
-        {99, 120},
-        {99, 113},
-        {99, 107},
-        {99, 99},
-        {67, 120},
-        {67, 113},
-        {67, 107},
-        {67, 99},
-        {35, 120},
-        {35, 113},
-        {35, 107},
-        {35, 99},
-        {3, 120} }             /* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
-       if (index < 0)
-               return 0;
-       if (index >= IWL_MAX_GAIN_ENTRIES)
-               return IWL_MAX_GAIN_ENTRIES - 1;
-       return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
-                              s32 rate_index, const s8 *clip_pwrs,
-                              struct iwl_channel_info *ch_info,
-                              int band_index)
-{
-       struct iwl3945_scan_power_info *scan_power_info;
-       s8 power;
-       u8 power_index;
-
-       scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
-       /* use this channel group's 6Mbit clipping/saturation pwr,
-        *   but cap at regulatory scan power restriction (set during init
-        *   based on eeprom channel data) for this channel.  */
-       power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
-       /* further limit to user's max power preference.
-        * FIXME:  Other spectrum management power limitations do not
-        *   seem to apply?? */
-       power = min(power, priv->tx_power_user_lmt);
-       scan_power_info->requested_power = power;
-
-       /* find difference between new scan *power* and current "normal"
-        *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
-        *   current "normal" temperature-compensated Tx power *index* for
-        *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
-        *   *index*. */
-       power_index = ch_info->power_info[rate_index].power_table_index
-           - (power - ch_info->power_info
-              [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
-       /* store reference index that we use when adjusting *all* scan
-        *   powers.  So we can accommodate user (all channel) or spectrum
-        *   management (single channel) power changes "between" temperature
-        *   feedback compensation procedures.
-        * don't force fit this reference index into gain table; it may be a
-        *   negative number.  This will help avoid errors when we're at
-        *   the lower bounds (highest gains, for warmest temperatures)
-        *   of the table. */
-
-       /* don't exceed table bounds for "real" setting */
-       power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
-       scan_power_info->power_table_index = power_index;
-       scan_power_info->tpc.tx_gain =
-           power_gain_table[band_index][power_index].tx_gain;
-       scan_power_info->tpc.dsp_atten =
-           power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
-       int rate_idx, i;
-       const struct iwl_channel_info *ch_info = NULL;
-       struct iwl3945_txpowertable_cmd txpower = {
-               .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
-       };
-       u16 chan;
-
-       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
-                     "TX Power requested while scanning!\n"))
-               return -EAGAIN;
-
-       chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
-       txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
-       ch_info = iwl_get_channel_info(priv, priv->band, chan);
-       if (!ch_info) {
-               IWL_ERR(priv,
-                       "Failed to get channel info for channel %d [%d]\n",
-                       chan, priv->band);
-               return -EINVAL;
-       }
-
-       if (!is_channel_valid(ch_info)) {
-               IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
-                               "non-Tx channel.\n");
-               return 0;
-       }
-
-       /* fill cmd with power settings for all rates for current channel */
-       /* Fill OFDM rate */
-       for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
-            rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
-               txpower.power[i].tpc = ch_info->power_info[i].tpc;
-               txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
-               IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
-                               le16_to_cpu(txpower.channel),
-                               txpower.band,
-                               txpower.power[i].tpc.tx_gain,
-                               txpower.power[i].tpc.dsp_atten,
-                               txpower.power[i].rate);
-       }
-       /* Fill CCK rates */
-       for (rate_idx = IWL_FIRST_CCK_RATE;
-            rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
-               txpower.power[i].tpc = ch_info->power_info[i].tpc;
-               txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
-               IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
-                               le16_to_cpu(txpower.channel),
-                               txpower.band,
-                               txpower.power[i].tpc.tx_gain,
-                               txpower.power[i].tpc.dsp_atten,
-                               txpower.power[i].rate);
-       }
-
-       return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
-                               sizeof(struct iwl3945_txpowertable_cmd),
-                               &txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update.  Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- *      properly fill out the scan powers, and actual h/w gain settings,
- *      and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
-                            struct iwl_channel_info *ch_info)
-{
-       struct iwl3945_channel_power_info *power_info;
-       int power_changed = 0;
-       int i;
-       const s8 *clip_pwrs;
-       int power;
-
-       /* Get this chnlgrp's rate-to-max/clip-powers table */
-       clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-       /* Get this channel's rate-to-current-power settings table */
-       power_info = ch_info->power_info;
-
-       /* update OFDM Txpower settings */
-       for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
-            i++, ++power_info) {
-               int delta_idx;
-
-               /* limit new power to be no more than h/w capability */
-               power = min(ch_info->curr_txpow, clip_pwrs[i]);
-               if (power == power_info->requested_power)
-                       continue;
-
-               /* find difference between old and new requested powers,
-                *    update base (non-temp-compensated) power index */
-               delta_idx = (power - power_info->requested_power) * 2;
-               power_info->base_power_index -= delta_idx;
-
-               /* save new requested power value */
-               power_info->requested_power = power;
-
-               power_changed = 1;
-       }
-
-       /* update CCK Txpower settings, based on OFDM 12M setting ...
-        *    ... all CCK power settings for a given channel are the *same*. */
-       if (power_changed) {
-               power =
-                   ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
-                   requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
-               /* do all CCK rates' iwl3945_channel_power_info structures */
-               for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
-                       power_info->requested_power = power;
-                       power_info->base_power_index =
-                           ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
-                           base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
-                       ++power_info;
-               }
-       }
-
-       return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- *      based strictly on regulatory (eeprom and spectrum mgt) limitations
- *      (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
-       s8 max_power;
-
-#if 0
-       /* if we're using TGd limits, use lower of TGd or EEPROM */
-       if (ch_info->tgd_data.max_power != 0)
-               max_power = min(ch_info->tgd_data.max_power,
-                               ch_info->eeprom.max_power_avg);
-
-       /* else just use EEPROM limits */
-       else
-#endif
-               max_power = ch_info->eeprom.max_power_avg;
-
-       return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- *   and the factory calibration temperatures, and bases the new settings
- *   on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
-       struct iwl_channel_info *ch_info = NULL;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       int delta_index;
-       const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
-       u8 a_band;
-       u8 rate_index;
-       u8 scan_tbl_index;
-       u8 i;
-       int ref_temp;
-       int temperature = priv->temperature;
-
-       if (priv->disable_tx_power_cal ||
-           test_bit(STATUS_SCANNING, &priv->status)) {
-               /* do not perform tx power calibration */
-               return 0;
-       }
-       /* set up new Tx power info for each and every channel, 2.4 and 5.x */
-       for (i = 0; i < priv->channel_count; i++) {
-               ch_info = &priv->channel_info[i];
-               a_band = is_channel_a_band(ch_info);
-
-               /* Get this chnlgrp's factory calibration temperature */
-               ref_temp = (s16)eeprom->groups[ch_info->group_index].
-                   temperature;
-
-               /* get power index adjustment based on current and factory
-                * temps */
-               delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
-                                                             ref_temp);
-
-               /* set tx power value for all rates, OFDM and CCK */
-               for (rate_index = 0; rate_index < IWL_RATE_COUNT;
-                    rate_index++) {
-                       int power_idx =
-                           ch_info->power_info[rate_index].base_power_index;
-
-                       /* temperature compensate */
-                       power_idx += delta_index;
-
-                       /* stay within table range */
-                       power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-                       ch_info->power_info[rate_index].
-                           power_table_index = (u8) power_idx;
-                       ch_info->power_info[rate_index].tpc =
-                           power_gain_table[a_band][power_idx];
-               }
-
-               /* Get this chnlgrp's rate-to-max/clip-powers table */
-               clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
-               for (scan_tbl_index = 0;
-                    scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
-                       s32 actual_index = (scan_tbl_index == 0) ?
-                           IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
-                       iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
-                                          actual_index, clip_pwrs,
-                                          ch_info, a_band);
-               }
-       }
-
-       /* send Txpower command for current channel to ucode */
-       return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
-       struct iwl_channel_info *ch_info;
-       s8 max_power;
-       u8 a_band;
-       u8 i;
-
-       if (priv->tx_power_user_lmt == power) {
-               IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
-                               "limit: %ddBm.\n", power);
-               return 0;
-       }
-
-       IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
-       priv->tx_power_user_lmt = power;
-
-       /* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
-       for (i = 0; i < priv->channel_count; i++) {
-               ch_info = &priv->channel_info[i];
-               a_band = is_channel_a_band(ch_info);
-
-               /* find minimum power of all user and regulatory constraints
-                *    (does not consider h/w clipping limitations) */
-               max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
-               max_power = min(power, max_power);
-               if (max_power != ch_info->curr_txpow) {
-                       ch_info->curr_txpow = max_power;
-
-                       /* this considers the h/w clipping limitations */
-                       iwl3945_hw_reg_set_new_power(priv, ch_info);
-               }
-       }
-
-       /* update txpower settings for all channels,
-        *   send to NIC if associated. */
-       is_temp_calib_needed(priv);
-       iwl3945_hw_reg_comp_txpower_temp(priv);
-
-       return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
-                                  struct iwl_rxon_context *ctx)
-{
-       int rc = 0;
-       struct iwl_rx_packet *pkt;
-       struct iwl3945_rxon_assoc_cmd rxon_assoc;
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_RXON_ASSOC,
-               .len = sizeof(rxon_assoc),
-               .flags = CMD_WANT_SKB,
-               .data = &rxon_assoc,
-       };
-       const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
-       const struct iwl_rxon_cmd *rxon2 = &ctx->active;
-
-       if ((rxon1->flags == rxon2->flags) &&
-           (rxon1->filter_flags == rxon2->filter_flags) &&
-           (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
-           (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
-               IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
-               return 0;
-       }
-
-       rxon_assoc.flags = ctx->staging.flags;
-       rxon_assoc.filter_flags = ctx->staging.filter_flags;
-       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
-       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
-       rxon_assoc.reserved = 0;
-
-       rc = iwl_send_cmd_sync(priv, &cmd);
-       if (rc)
-               return rc;
-
-       pkt = (struct iwl_rx_packet *)cmd.reply_page;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
-               rc = -EIO;
-       }
-
-       iwl_free_pages(priv, cmd.reply_page);
-
-       return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data.  This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       /* cast away the const for active_rxon in this function */
-       struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
-       struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
-       int rc = 0;
-       bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return -EINVAL;
-
-       if (!iwl_is_alive(priv))
-               return -1;
-
-       /* always get timestamp with Rx frame */
-       staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
-       /* select antenna */
-       staging_rxon->flags &=
-           ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
-       staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
-       rc = iwl_check_rxon_cmd(priv, ctx);
-       if (rc) {
-               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-               return -EINVAL;
-       }
-
-       /* If we don't need to send a full RXON, we can use
-        * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
-        * and other flags for the current radio configuration. */
-       if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
-               rc = iwl_send_rxon_assoc(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-               if (rc) {
-                       IWL_ERR(priv, "Error setting RXON_ASSOC "
-                                 "configuration (%d).\n", rc);
-                       return rc;
-               }
-
-               memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
-               return 0;
-       }
-
-       /* If we are currently associated and the new config requires
-        * an RXON_ASSOC and the new config wants the associated mask enabled,
-        * we must clear the associated from the active configuration
-        * before we apply the new config */
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
-               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-               /*
-                * reserved4 and 5 could have been filled by the iwlcore code.
-                * Let's clear them before pushing to the 3945.
-                */
-               active_rxon->reserved4 = 0;
-               active_rxon->reserved5 = 0;
-               rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
-                                     sizeof(struct iwl3945_rxon_cmd),
-                                     &priv->contexts[IWL_RXON_CTX_BSS].active);
-
-               /* If the mask clearing failed then we set
-                * active_rxon back to what it was previously */
-               if (rc) {
-                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-                       IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
-                                 "configuration (%d).\n", rc);
-                       return rc;
-               }
-               iwl_clear_ucode_stations(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-               iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
-       }
-
-       IWL_DEBUG_INFO(priv, "Sending RXON\n"
-                      "* with%s RXON_FILTER_ASSOC_MSK\n"
-                      "* channel = %d\n"
-                      "* bssid = %pM\n",
-                      (new_assoc ? "" : "out"),
-                      le16_to_cpu(staging_rxon->channel),
-                      staging_rxon->bssid_addr);
-
-       /*
-        * reserved4 and 5 could have been filled by the iwlcore code.
-        * Let's clear them before pushing to the 3945.
-        */
-       staging_rxon->reserved4 = 0;
-       staging_rxon->reserved5 = 0;
-
-       iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
-       /* Apply the new configuration */
-       rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
-                             sizeof(struct iwl3945_rxon_cmd),
-                             staging_rxon);
-       if (rc) {
-               IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
-               return rc;
-       }
-
-       memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
-       if (!new_assoc) {
-               iwl_clear_ucode_stations(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-               iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
-       }
-
-       /* If we issue a new RXON command which required a tune then we must
-        * send a new TXPOWER command or we won't be able to Tx any frames */
-       rc = priv->cfg->ops->lib->send_tx_power(priv);
-       if (rc) {
-               IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
-               return rc;
-       }
-
-       /* Init the hardware's rate fallback order based on the band */
-       rc = iwl3945_init_hw_rate_table(priv);
-       if (rc) {
-               IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic -  called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- *     -- correct coeffs for temp (can reset temp timer)
- *     -- save this temp as "last",
- *     -- send new set of gain settings to NIC
- * NOTE:  This should continue working, even when we're not associated,
- *   so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
-       /* This will kick in the "brute force"
-        * iwl3945_hw_reg_comp_txpower_temp() below */
-       if (!is_temp_calib_needed(priv))
-               goto reschedule;
-
-       /* Set up a new set of temp-adjusted TxPowers, send to NIC.
-        * This is based *only* on current temperature,
-        * ignoring any previous power measurements */
-       iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
-       queue_delayed_work(priv->workqueue,
-                          &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                                            _3945.thermal_periodic.work);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-       iwl3945_reg_txpower_periodic(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- *                                for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- *      These channel groups are based on factory-tested channels;
- *      on A-band, EEPROM's "group frequency" entries represent the top
- *      channel in each group 1-4.  Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
-                                      const struct iwl_channel_info *ch_info)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
-       u8 group;
-       u16 group_index = 0;    /* based on factory calib frequencies */
-       u8 grp_channel;
-
-       /* Find the group index for the channel ... don't use index 1(?) */
-       if (is_channel_a_band(ch_info)) {
-               for (group = 1; group < 5; group++) {
-                       grp_channel = ch_grp[group].group_channel;
-                       if (ch_info->channel <= grp_channel) {
-                               group_index = group;
-                               break;
-                       }
-               }
-               /* group 4 has a few channels *above* its factory cal freq */
-               if (group == 5)
-                       group_index = 4;
-       } else
-               group_index = 0;        /* 2.4 GHz, group 0 */
-
-       IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
-                       group_index);
-       return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- *   into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
-                                      s8 requested_power,
-                                      s32 setting_index, s32 *new_index)
-{
-       const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       s32 index0, index1;
-       s32 power = 2 * requested_power;
-       s32 i;
-       const struct iwl3945_eeprom_txpower_sample *samples;
-       s32 gains0, gains1;
-       s32 res;
-       s32 denominator;
-
-       chnl_grp = &eeprom->groups[setting_index];
-       samples = chnl_grp->samples;
-       for (i = 0; i < 5; i++) {
-               if (power == samples[i].power) {
-                       *new_index = samples[i].gain_index;
-                       return 0;
-               }
-       }
-
-       if (power > samples[1].power) {
-               index0 = 0;
-               index1 = 1;
-       } else if (power > samples[2].power) {
-               index0 = 1;
-               index1 = 2;
-       } else if (power > samples[3].power) {
-               index0 = 2;
-               index1 = 3;
-       } else {
-               index0 = 3;
-               index1 = 4;
-       }
-
-       denominator = (s32) samples[index1].power - (s32) samples[index0].power;
-       if (denominator == 0)
-               return -EINVAL;
-       gains0 = (s32) samples[index0].gain_index * (1 << 19);
-       gains1 = (s32) samples[index1].gain_index * (1 << 19);
-       res = gains0 + (gains1 - gains0) *
-           ((s32) power - (s32) samples[index0].power) / denominator +
-           (1 << 18);
-       *new_index = res >> 19;
-       return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
-       u32 i;
-       s32 rate_index;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       const struct iwl3945_eeprom_txpower_group *group;
-
-       IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
-       for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
-               s8 *clip_pwrs;  /* table of power levels for each rate */
-               s8 satur_pwr;   /* saturation power for each chnl group */
-               group = &eeprom->groups[i];
-
-               /* sanity check on factory saturation power value */
-               if (group->saturation_power < 40) {
-                       IWL_WARN(priv, "Error: saturation power is %d, "
-                                   "less than minimum expected 40\n",
-                                   group->saturation_power);
-                       return;
-               }
-
-               /*
-                * Derive requested power levels for each rate, based on
-                *   hardware capabilities (saturation power for band).
-                * Basic value is 3dB down from saturation, with further
-                *   power reductions for highest 3 data rates.  These
-                *   backoffs provide headroom for high rate modulation
-                *   power peaks, without too much distortion (clipping).
-                */
-               /* we'll fill in this array with h/w max power levels */
-               clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
-               /* divide factory saturation power by 2 to find -3dB level */
-               satur_pwr = (s8) (group->saturation_power >> 1);
-
-               /* fill in channel group's nominal powers for each rate */
-               for (rate_index = 0;
-                    rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
-                       switch (rate_index) {
-                       case IWL_RATE_36M_INDEX_TABLE:
-                               if (i == 0)     /* B/G */
-                                       *clip_pwrs = satur_pwr;
-                               else    /* A */
-                                       *clip_pwrs = satur_pwr - 5;
-                               break;
-                       case IWL_RATE_48M_INDEX_TABLE:
-                               if (i == 0)
-                                       *clip_pwrs = satur_pwr - 7;
-                               else
-                                       *clip_pwrs = satur_pwr - 10;
-                               break;
-                       case IWL_RATE_54M_INDEX_TABLE:
-                               if (i == 0)
-                                       *clip_pwrs = satur_pwr - 9;
-                               else
-                                       *clip_pwrs = satur_pwr - 12;
-                               break;
-                       default:
-                               *clip_pwrs = satur_pwr;
-                               break;
-                       }
-               }
-       }
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
-       struct iwl_channel_info *ch_info = NULL;
-       struct iwl3945_channel_power_info *pwr_info;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       int delta_index;
-       u8 rate_index;
-       u8 scan_tbl_index;
-       const s8 *clip_pwrs;    /* array of power levels for each rate */
-       u8 gain, dsp_atten;
-       s8 power;
-       u8 pwr_index, base_pwr_index, a_band;
-       u8 i;
-       int temperature;
-
-       /* save temperature reference,
-        *   so we can determine next time to calibrate */
-       temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
-       priv->last_temperature = temperature;
-
-       iwl3945_hw_reg_init_channel_groups(priv);
-
-       /* initialize Tx power info for each and every channel, 2.4 and 5.x */
-       for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
-            i++, ch_info++) {
-               a_band = is_channel_a_band(ch_info);
-               if (!is_channel_valid(ch_info))
-                       continue;
-
-               /* find this channel's channel group (*not* "band") index */
-               ch_info->group_index =
-                       iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
-               /* Get this chnlgrp's rate->max/clip-powers table */
-               clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-               /* calculate power index *adjustment* value according to
-                *  diff between current temperature and factory temperature */
-               delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
-                               eeprom->groups[ch_info->group_index].
-                               temperature);
-
-               IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
-                               ch_info->channel, delta_index, temperature +
-                               IWL_TEMP_CONVERT);
-
-               /* set tx power value for all OFDM rates */
-               for (rate_index = 0; rate_index < IWL_OFDM_RATES;
-                    rate_index++) {
-                       s32 uninitialized_var(power_idx);
-                       int rc;
-
-                       /* use channel group's clip-power table,
-                        *   but don't exceed channel's max power */
-                       s8 pwr = min(ch_info->max_power_avg,
-                                    clip_pwrs[rate_index]);
-
-                       pwr_info = &ch_info->power_info[rate_index];
-
-                       /* get base (i.e. at factory-measured temperature)
-                        *    power table index for this rate's power */
-                       rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
-                                                        ch_info->group_index,
-                                                        &power_idx);
-                       if (rc) {
-                               IWL_ERR(priv, "Invalid power index\n");
-                               return rc;
-                       }
-                       pwr_info->base_power_index = (u8) power_idx;
-
-                       /* temperature compensate */
-                       power_idx += delta_index;
-
-                       /* stay within range of gain table */
-                       power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
-                       /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
-                       pwr_info->requested_power = pwr;
-                       pwr_info->power_table_index = (u8) power_idx;
-                       pwr_info->tpc.tx_gain =
-                           power_gain_table[a_band][power_idx].tx_gain;
-                       pwr_info->tpc.dsp_atten =
-                           power_gain_table[a_band][power_idx].dsp_atten;
-               }
-
-               /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
-               pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
-               power = pwr_info->requested_power +
-                       IWL_CCK_FROM_OFDM_POWER_DIFF;
-               pwr_index = pwr_info->power_table_index +
-                       IWL_CCK_FROM_OFDM_INDEX_DIFF;
-               base_pwr_index = pwr_info->base_power_index +
-                       IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
-               /* stay within table range */
-               pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
-               gain = power_gain_table[a_band][pwr_index].tx_gain;
-               dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
-               /* fill each CCK rate's iwl3945_channel_power_info structure
-                * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
-                * NOTE:  CCK rates start at end of OFDM rates! */
-               for (rate_index = 0;
-                    rate_index < IWL_CCK_RATES; rate_index++) {
-                       pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
-                       pwr_info->requested_power = power;
-                       pwr_info->power_table_index = pwr_index;
-                       pwr_info->base_power_index = base_pwr_index;
-                       pwr_info->tpc.tx_gain = gain;
-                       pwr_info->tpc.dsp_atten = dsp_atten;
-               }
-
-               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
-               for (scan_tbl_index = 0;
-                    scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
-                       s32 actual_index = (scan_tbl_index == 0) ?
-                               IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
-                       iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
-                               actual_index, clip_pwrs, ch_info, a_band);
-               }
-       }
-
-       return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
-       int rc;
-
-       iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
-       rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
-                       FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-       if (rc < 0)
-               IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
-       return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       int txq_id = txq->q.id;
-
-       struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
-       shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
-       iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
-       iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
-       iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
-               FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
-               FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
-               FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
-               FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
-               FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
-       /* fake read to flush all prev. writes */
-       iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
-       return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
-       switch (cmd_id) {
-       case REPLY_RXON:
-               return sizeof(struct iwl3945_rxon_cmd);
-       case POWER_TABLE_CMD:
-               return sizeof(struct iwl3945_powertable_cmd);
-       default:
-               return len;
-       }
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
-       struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
-       addsta->mode = cmd->mode;
-       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
-       memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
-       addsta->station_flags = cmd->station_flags;
-       addsta->station_flags_msk = cmd->station_flags_msk;
-       addsta->tid_disable_tx = cpu_to_le16(0);
-       addsta->rate_n_flags = cmd->rate_n_flags;
-       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
-       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
-       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
-       return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
-                                    const u8 *addr, u8 *sta_id_r)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       int ret;
-       u8 sta_id;
-       unsigned long flags;
-
-       if (sta_id_r)
-               *sta_id_r = IWL_INVALID_STATION;
-
-       ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM\n", addr);
-               return ret;
-       }
-
-       if (sta_id_r)
-               *sta_id_r = sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].used |= IWL_STA_LOCAL;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
-                                      struct ieee80211_vif *vif, bool add)
-{
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       int ret;
-
-       if (add) {
-               ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
-                                               &vif_priv->ibss_bssid_sta_id);
-               if (ret)
-                       return ret;
-
-               iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
-                                (priv->band == IEEE80211_BAND_5GHZ) ?
-                                IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
-               iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
-               return 0;
-       }
-
-       return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
-                                 vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
-       int rc, i, index, prev_index;
-       struct iwl3945_rate_scaling_cmd rate_cmd = {
-               .reserved = {0, 0, 0},
-       };
-       struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
-       for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
-               index = iwl3945_rates[i].table_rs_index;
-
-               table[index].rate_n_flags =
-                       iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
-               table[index].try_cnt = priv->retry_rate;
-               prev_index = iwl3945_get_prev_ieee_rate(i);
-               table[index].next_rate_index =
-                               iwl3945_rates[prev_index].table_rs_index;
-       }
-
-       switch (priv->band) {
-       case IEEE80211_BAND_5GHZ:
-               IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
-               /* If one of the following CCK rates is used,
-                * have it fall back to the 6M OFDM rate */
-               for (i = IWL_RATE_1M_INDEX_TABLE;
-                       i <= IWL_RATE_11M_INDEX_TABLE; i++)
-                       table[i].next_rate_index =
-                         iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
-               /* Don't fall back to CCK rates */
-               table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
-                                               IWL_RATE_9M_INDEX_TABLE;
-
-               /* Don't drop out of OFDM rates */
-               table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
-                   iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-               break;
-
-       case IEEE80211_BAND_2GHZ:
-               IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
-               /* If an OFDM rate is used, have it fall back to the
-                * 1M CCK rates */
-
-               if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-                   iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
-                       index = IWL_FIRST_CCK_RATE;
-                       for (i = IWL_RATE_6M_INDEX_TABLE;
-                            i <= IWL_RATE_54M_INDEX_TABLE; i++)
-                               table[i].next_rate_index =
-                                       iwl3945_rates[index].table_rs_index;
-
-                       index = IWL_RATE_11M_INDEX_TABLE;
-                       /* CCK shouldn't fall back to OFDM... */
-                       table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
-               }
-               break;
-
-       default:
-               WARN_ON(1);
-               break;
-       }
-
-       /* Update the rate scaling for control frame Tx */
-       rate_cmd.table_id = 0;
-       rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
-                             &rate_cmd);
-       if (rc)
-               return rc;
-
-       /* Update the rate scaling for data frame Tx */
-       rate_cmd.table_id = 1;
-       return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
-                               &rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
-       memset((void *)&priv->hw_params, 0,
-              sizeof(struct iwl_hw_params));
-
-       priv->_3945.shared_virt =
-               dma_alloc_coherent(&priv->pci_dev->dev,
-                                  sizeof(struct iwl3945_shared),
-                                  &priv->_3945.shared_phys, GFP_KERNEL);
-       if (!priv->_3945.shared_virt) {
-               IWL_ERR(priv, "failed to allocate pci memory\n");
-               return -ENOMEM;
-       }
-
-       /* Assign number of Usable TX queues */
-       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
-       priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
-       priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
-       priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
-       priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
-       priv->hw_params.max_stations = IWL3945_STATION_COUNT;
-       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
-       priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
-       priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
-       priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
-       priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
-       return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
-                         struct iwl3945_frame *frame, u8 rate)
-{
-       struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
-       unsigned int frame_size;
-
-       tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
-       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
-       tx_beacon_cmd->tx.sta_id =
-               priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-       frame_size = iwl3945_fill_beacon_frame(priv,
-                               tx_beacon_cmd->frame,
-                               sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
-       BUG_ON(frame_size > MAX_MPDU_SIZE);
-       tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
-       tx_beacon_cmd->tx.rate = rate;
-       tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
-                                     TX_CMD_FLG_TSF_MSK);
-
-       /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
-       tx_beacon_cmd->tx.supp_rates[0] =
-               (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
-       tx_beacon_cmd->tx.supp_rates[1] =
-               (IWL_CCK_BASIC_RATES_MASK & 0xF);
-
-       return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
-       priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
-       priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
-       INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
-                         iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
-       cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       u32 reg;
-       u32 val;
-
-       IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
-       /* verify BSM SRAM contents */
-       val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
-       for (reg = BSM_SRAM_LOWER_BOUND;
-            reg < BSM_SRAM_LOWER_BOUND + len;
-            reg += sizeof(u32), image++) {
-               val = iwl_read_prph(priv, reg);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "BSM uCode verification failed at "
-                                 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
-                                 BSM_SRAM_LOWER_BOUND,
-                                 reg - BSM_SRAM_LOWER_BOUND, len,
-                                 val, le32_to_cpu(*image));
-                       return -EIO;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
-       return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism.  Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
-       _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
-       return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
-       return;
-}
-
- /**
-  * iwl3945_load_bsm - Load bootstrap instructions
-  *
-  * BSM operation:
-  *
-  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
-  * in special SRAM that does not power down during RFKILL.  When powering back
-  * up after power-saving sleeps (or during initial uCode load), the BSM loads
-  * the bootstrap program into the on-board processor, and starts it.
-  *
-  * The bootstrap program loads (via DMA) instructions and data for a new
-  * program from host DRAM locations indicated by the host driver in the
-  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
-  * automatically.
-  *
-  * When initializing the NIC, the host driver points the BSM to the
-  * "initialize" uCode image.  This uCode sets up some internal data, then
-  * notifies host via "initialize alive" that it is complete.
-  *
-  * The host then replaces the BSM_DRAM_* pointer values to point to the
-  * normal runtime uCode instructions and a backup uCode data cache buffer
-  * (filled initially with starting data values for the on-board processor),
-  * then triggers the "initialize" uCode to load and launch the runtime uCode,
-  * which begins normal operation.
-  *
-  * When doing a power-save shutdown, runtime uCode saves data SRAM into
-  * the backup data cache in DRAM before SRAM is powered down.
-  *
-  * When powering back up, the BSM loads the bootstrap program.  This reloads
-  * the runtime uCode instructions and the backup data cache into SRAM,
-  * and re-launches the runtime uCode from where it left off.
-  */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-       u32 inst_len;
-       u32 data_len;
-       int rc;
-       int i;
-       u32 done;
-       u32 reg_offset;
-
-       IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
-       /* make sure bootstrap program is no larger than BSM's SRAM size */
-       if (len > IWL39_MAX_BSM_SIZE)
-               return -EINVAL;
-
-       /* Tell bootstrap uCode where to find the "Initialize" uCode
-       *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
-       * NOTE:  iwl3945_initialize_alive_start() will replace these values,
-       *        after the "initialize" uCode has run, to point to
-       *        runtime/protocol instructions and backup data cache. */
-       pinst = priv->ucode_init.p_addr;
-       pdata = priv->ucode_init_data.p_addr;
-       inst_len = priv->ucode_init.len;
-       data_len = priv->ucode_init_data.len;
-
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
-       /* Fill BSM memory with bootstrap instructions */
-       for (reg_offset = BSM_SRAM_LOWER_BOUND;
-            reg_offset < BSM_SRAM_LOWER_BOUND + len;
-            reg_offset += sizeof(u32), image++)
-               _iwl_write_prph(priv, reg_offset,
-                                         le32_to_cpu(*image));
-
-       rc = iwl3945_verify_bsm(priv);
-       if (rc)
-               return rc;
-
-       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-       iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-       iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
-                                IWL39_RTC_INST_LOWER_BOUND);
-       iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
-       /* Load bootstrap code into instruction SRAM now,
-        *   to prepare to load "initialize" uCode */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG,
-               BSM_WR_CTRL_REG_BIT_START);
-
-       /* Wait for load of bootstrap uCode to finish */
-       for (i = 0; i < 100; i++) {
-               done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
-               if (!(done & BSM_WR_CTRL_REG_BIT_START))
-                       break;
-               udelay(10);
-       }
-       if (i < 100)
-               IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
-       else {
-               IWL_ERR(priv, "BSM write did not complete!\n");
-               return -EIO;
-       }
-
-       /* Enable future boot loads whenever power management unit triggers it
-        *   (e.g. when powering back up after power-save shutdown) */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG,
-               BSM_WR_CTRL_REG_BIT_START_EN);
-
-       return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
-       .rxon_assoc = iwl3945_send_rxon_assoc,
-       .commit_rxon = iwl3945_commit_rxon,
-       .send_bt_config = iwl_send_bt_config,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
-       .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl3945_hw_txq_free_tfd,
-       .txq_init = iwl3945_hw_tx_queue_init,
-       .load_ucode = iwl3945_load_bsm,
-       .dump_nic_event_log = iwl3945_dump_nic_event_log,
-       .dump_nic_error_log = iwl3945_dump_nic_error_log,
-       .apm_ops = {
-               .init = iwl3945_apm_init,
-               .config = iwl3945_nic_config,
-       },
-       .eeprom_ops = {
-               .regulatory_bands = {
-                       EEPROM_REGULATORY_BAND_1_CHANNELS,
-                       EEPROM_REGULATORY_BAND_2_CHANNELS,
-                       EEPROM_REGULATORY_BAND_3_CHANNELS,
-                       EEPROM_REGULATORY_BAND_4_CHANNELS,
-                       EEPROM_REGULATORY_BAND_5_CHANNELS,
-                       EEPROM_REGULATORY_BAND_NO_HT40,
-                       EEPROM_REGULATORY_BAND_NO_HT40,
-               },
-               .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
-               .release_semaphore = iwl3945_eeprom_release_semaphore,
-               .query_addr = iwlcore_eeprom_query_addr,
-       },
-       .send_tx_power  = iwl3945_send_tx_power,
-       .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-       .isr_ops = {
-               .isr = iwl_isr_legacy,
-       },
-
-       .debugfs_ops = {
-               .rx_stats_read = iwl3945_ucode_rx_stats_read,
-               .tx_stats_read = iwl3945_ucode_tx_stats_read,
-               .general_stats_read = iwl3945_ucode_general_stats_read,
-       },
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
-       .post_associate = iwl3945_post_associate,
-       .config_ap = iwl3945_config_ap,
-       .manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
-       .get_hcmd_size = iwl3945_get_hcmd_size,
-       .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
-       .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
-       .request_scan = iwl3945_request_scan,
-       .post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
-       .lib = &iwl3945_lib,
-       .hcmd = &iwl3945_hcmd,
-       .utils = &iwl3945_hcmd_utils,
-       .led = &iwl3945_led_ops,
-       .legacy = &iwl3945_legacy_ops,
-       .ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
-       .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
-       .num_of_queues = IWL39_NUM_QUEUES,
-       .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
-       .set_l0s = false,
-       .use_bsm = true,
-       .use_isr_legacy = true,
-       .led_compensation = 64,
-       .broken_powersave = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
-       .wd_timeout = IWL_DEF_WD_TIMEOUT,
-       .max_event_log_size = 512,
-       .tx_power_by_driver = true,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
-       .name = "3945BG",
-       .fw_name_pre = IWL3945_FW_PRE,
-       .ucode_api_max = IWL3945_UCODE_API_MAX,
-       .ucode_api_min = IWL3945_UCODE_API_MIN,
-       .sku = IWL_SKU_G,
-       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
-       .ops = &iwl3945_ops,
-       .mod_params = &iwl3945_mod_params,
-       .base_params = &iwl3945_base_params,
-       .led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
-       .name = "3945ABG",
-       .fw_name_pre = IWL3945_FW_PRE,
-       .ucode_api_max = IWL3945_UCODE_API_MAX,
-       .ucode_api_min = IWL3945_UCODE_API_MIN,
-       .sku = IWL_SKU_A|IWL_SKU_G,
-       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
-       .ops = &iwl3945_ops,
-       .mod_params = &iwl3945_mod_params,
-       .base_params = &iwl3945_base_params,
-       .led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
-       {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
-       {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
-       {0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
deleted file mode 100644 (file)
index 3eef1eb..0000000
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE "iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
- *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- *   Also, -127 works better than 0 when averaging frames with/without
- *   noise info (e.g. averaging might be done in app); measured dBm values are
- *   always negative ... using a negative value as the default keeps all
- *   averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
-       u64 data;
-       s32 success_counter;
-       s32 success_ratio;
-       s32 counter;
-       s32 average_tpt;
-       unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
-       spinlock_t lock;
-       struct iwl_priv *priv;
-       s32 *expected_tpt;
-       unsigned long last_partial_flush;
-       unsigned long last_flush;
-       u32 flush_time;
-       u32 last_tx_packets;
-       u32 tx_packets;
-       u8 tgg;
-       u8 flush_pending;
-       u8 start_rate;
-       struct timer_list rate_scale_flush;
-       struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
-       struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
-       /* used to be in sta_info */
-       int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and agn!
- */
-struct iwl3945_sta_priv {
-       struct iwl_station_priv_common common;
-       struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
-       IWL_ANTENNA_DIVERSITY,
-       IWL_ANTENNA_MAIN,
-       IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- *   a value of 0 means RTS on all data/management packets
- *   a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- *   than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD     2347U
-#define MIN_RTS_THRESHOLD         0U
-#define MAX_RTS_THRESHOLD         2347U
-#define MAX_MSDU_SIZE            2304U
-#define MAX_MPDU_SIZE            2346U
-#define DEFAULT_BEACON_INTERVAL   100U
-#define        DEFAULT_SHORT_RETRY_LIMIT 7U
-#define        DEFAULT_LONG_RETRY_LIMIT  4U
-
-#define IWL_TX_FIFO_AC0        0
-#define IWL_TX_FIFO_AC1        1
-#define IWL_TX_FIFO_AC2        2
-#define IWL_TX_FIFO_AC3        3
-#define IWL_TX_FIFO_HCCA_1     5
-#define IWL_TX_FIFO_HCCA_2     6
-#define IWL_TX_FIFO_NONE       7
-
-#define IEEE80211_DATA_LEN              2304
-#define IEEE80211_4ADDR_LEN             30
-#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
-       union {
-               struct ieee80211_hdr frame;
-               struct iwl3945_tx_beacon_cmd beacon;
-               u8 raw[IEEE80211_FRAME_LEN];
-               u8 cmd[360];
-       } u;
-       struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
-
-#define IWL_SUPPORTED_RATES_IE_LEN         8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT        9
-
-#define IWL_INVALID_RATE     0xFF
-#define IWL_INVALID_VALUE    -1
-
-#define STA_PS_STATUS_WAKE             0
-#define STA_PS_STATUS_SLEEP            1
-
-struct iwl3945_ibss_seq {
-       u8 mac[ETH_ALEN];
-       u16 seq_num;
-       u16 frag_num;
-       unsigned long packet_time;
-       struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
-                      x->u.rx_frame.stats.payload + \
-                      x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
-                      IWL_RX_HDR(x)->payload + \
-                      le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-                                       struct ieee80211_hdr *hdr,int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
-                                      char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl-base.c
- *
- * NOTE:  The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_         <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_      <-- Called from work queue context
- * iwl3945_mac_     <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                           struct iwl_tx_queue *txq,
-                                           dma_addr_t addr, u16 len,
-                                           u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
-                                   struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
-                               struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
-                                struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
-                                 struct iwl_device_cmd *cmd,
-                                 struct ieee80211_tx_info *info,
-                                 struct ieee80211_hdr *hdr,
-                                 int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
-                                struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE:  This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
-       const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
deleted file mode 100644 (file)
index 9166794..0000000
+++ /dev/null
@@ -1,792 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE                        1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE        7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND             (0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND             (0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND             (0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND             (0x80A000)
-
-#define IWL49_RTC_INST_SIZE  (IWL49_RTC_INST_UPPER_BOUND - \
-                               IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE  (IWL49_RTC_DATA_UPPER_BOUND - \
-                               IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
-       return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
-              (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent).  The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp).  After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
- *        must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN  (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX  (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
-       (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
-        ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- *     1) EEPROM
- *     2) "initialize" alive notification
- *     3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1)  Regulatory information (max txpower and channel usage flags) is provided
- *     separately for each channel that can possibly supported by 4965.
- *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- *     (legacy) channels.
- *
- *     See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- *     for locations in EEPROM.
- *
- * 2)  Factory txpower calibration information is provided separately for
- *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
- *     but 5 GHz has several sub-bands.
- *
- *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- *     See struct iwl4965_eeprom_calib_info (and the tree of structures
- *     contained within it) for format, and struct iwl4965_eeprom for
- *     locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1)  Temperature calculation parameters.
- *
- * 2)  Power supply voltage measurement.
- *
- * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1)  Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- *     Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- *     If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- *     2 transmitters will be used simultaneously; driver must reduce the
- *     regulatory limit by 3 dB (half-power) for each transmitter, so the
- *     combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
- *     reduce target txpower if necessary.
- *
- *     Backoff values below are in 1/2 dB units (equivalent to steps in
- *     txpower gain tables):
- *
- *     OFDM 6 - 36 MBit:  10 steps (5 dB)
- *     OFDM 48 MBit:      15 steps (7.5 dB)
- *     OFDM 54 MBit:      17 steps (8.5 dB)
- *     OFDM 60 MBit:      20 steps (10 dB)
- *     CCK all rates:     10 steps (5 dB)
- *
- *     Backoff values apply to saturation txpower on a per-transmitter basis;
- *     when using MIMO (2 transmitters), each transmitter uses the same
- *     saturation level provided in EEPROM, and the same backoff values;
- *     no reduction (such as with regulatory txpower limits) is required.
- *
- *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- *     factory measurement for ht40 channels.
- *
- *     The result of this step is the final target txpower.  The rest of
- *     the steps figure out the proper settings for the device to achieve
- *     that target txpower.
- *
- *
- * 3)  Determine (EEPROM) calibration sub band for the target channel, by
- *     comparing against first and last channels in each sub band
- *     (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
- *     referencing the 2 factory-measured (sample) channels within the sub band.
- *
- *     Interpolation is based on difference between target channel's frequency
- *     and the sample channels' frequencies.  Since channel numbers are based
- *     on frequency (5 MHz between each channel number), this is equivalent
- *     to interpolating based on channel number differences.
- *
- *     Note that the sample channels may or may not be the channels at the
- *     edges of the sub band.  The target channel may be "outside" of the
- *     span of the sampled channels.
- *
- *     Driver may choose the pair (for 2 Tx chains) of measurements (see
- *     struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- *     txpower comes closest to the desired txpower.  Usually, though,
- *     the middle set of measurements is closest to the regulatory limits,
- *     and is therefore a good choice for all txpower calculations (this
- *     assumes that high accuracy is needed for maximizing legal txpower,
- *     while lower txpower configurations do not need as much accuracy).
- *
- *     Driver should interpolate both members of the chosen measurement pair,
- *     i.e. for both Tx chains (radio transmitters), unless the driver knows
- *     that only one of the chains will be used (e.g. only one tx antenna
- *     connected, but this should be unusual).  The rate scaling algorithm
- *     switches antennas to find best performance, so both Tx chains will
- *     be used (although only one at a time) even for non-MIMO transmissions.
- *
- *     Driver should interpolate factory values for temperature, gain table
- *     index, and actual power.  The power amplifier detector values are
- *     not used by the driver.
- *
- *     Sanity check:  If the target channel happens to be one of the sample
- *     channels, the results should agree with the sample channel's
- *     measurements!
- *
- *
- * 5)  Find difference between desired txpower and (interpolated)
- *     factory-measured txpower.  Using (interpolated) factory gain table index
- *     (shown elsewhere) as a starting point, adjust this index lower to
- *     increase txpower, or higher to decrease txpower, until the target
- *     txpower is reached.  Each step in the gain table is 1/2 dB.
- *
- *     For example, if factory measured txpower is 16 dBm, and target txpower
- *     is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- *     by 3 dB.
- *
- *
- * 6)  Find difference between current device temperature and (interpolated)
- *     factory-measured temperature for sub-band.  Factory values are in
- *     degrees Celsius.  To calculate current temperature, see comments for
- *     "4965 temperature calculation".
- *
- *     If current temperature is higher than factory temperature, driver must
- *     increase gain (lower gain table index), and vice verse.
- *
- *     Temperature affects gain differently for different channels:
- *
- *     2.4 GHz all channels:  3.5 degrees per half-dB step
- *     5 GHz channels 34-43:  4.5 degrees per half-dB step
- *     5 GHz channels >= 44:  4.0 degrees per half-dB step
- *
- *     NOTE:  Temperature can increase rapidly when transmitting, especially
- *            with heavy traffic at high txpowers.  Driver should update
- *            temperature calculations often under these conditions to
- *            maintain strong txpower in the face of rising temperature.
- *
- *
- * 7)  Find difference between current power supply voltage indicator
- *     (from "initialize alive") and factory-measured power supply voltage
- *     indicator (EEPROM).
- *
- *     If the current voltage is higher (indicator is lower) than factory
- *     voltage, gain should be reduced (gain table index increased) by:
- *
- *     (eeprom - current) / 7
- *
- *     If the current voltage is lower (indicator is higher) than factory
- *     voltage, gain should be increased (gain table index decreased) by:
- *
- *     2 * (current - eeprom) / 7
- *
- *     If number of index steps in either direction turns out to be > 2,
- *     something is wrong ... just use 0.
- *
- *     NOTE:  Voltage compensation is independent of band/channel.
- *
- *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
- *            to be constant after this initial measurement.  Voltage
- *            compensation for txpower (number of steps in gain table)
- *            may be calculated once and used until the next uCode bootload.
- *
- *
- * 8)  If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- *     adjust txpower for each transmitter chain, so txpower is balanced
- *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
- *     values in "initialize alive", one pair for each of 5 channel ranges:
- *
- *     Group 0:  5 GHz channel 34-43
- *     Group 1:  5 GHz channel 44-70
- *     Group 2:  5 GHz channel 71-124
- *     Group 3:  5 GHz channel 125-200
- *     Group 4:  2.4 GHz all channels
- *
- *     Add the tx_atten[group][chain] value to the index for the target chain.
- *     The values are signed, but are in pairs of 0 and a non-negative number,
- *     so as to reduce gain (if necessary) of the "hotter" channel.  This
- *     avoids any need to double-check for regulatory compliance after
- *     this step.
- *
- *
- * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
- *     value to the index:
- *
- *     Hardware rev B:  9 steps (4.5 dB)
- *     Hardware rev C:  5 steps (2.5 dB)
- *
- *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- *     bits [3:2], 1 = B, 2 = C.
- *
- *     NOTE:  This compensation is in addition to any saturation backoff that
- *            might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- *     Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- *     location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device.  That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V   (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
- * are *relative* steps, not indications of absolute output power.  Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
- *     linear value that multiplies the output of the digital signal processor,
- *     before being sent to the analog radio.
- * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
- *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower.  This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration).  A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX              (0)  /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT    (-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index    Dsp gain   Radio gain
- *   0        110         0x3f      (highest gain)
- *   1        104         0x3f
- *   2         98         0x3f
- *   3        110         0x3e
- *   4        104         0x3e
- *   5         98         0x3e
- *   6        110         0x3d
- *   7        104         0x3d
- *   8         98         0x3d
- *   9        110         0x3c
- *  10        104         0x3c
- *  11         98         0x3c
- *  12        110         0x3b
- *  13        104         0x3b
- *  14         98         0x3b
- *  15        110         0x3a
- *  16        104         0x3a
- *  17         98         0x3a
- *  18        110         0x39
- *  19        104         0x39
- *  20         98         0x39
- *  21        110         0x38
- *  22        104         0x38
- *  23         98         0x38
- *  24        110         0x37
- *  25        104         0x37
- *  26         98         0x37
- *  27        110         0x36
- *  28        104         0x36
- *  29         98         0x36
- *  30        110         0x35
- *  31        104         0x35
- *  32         98         0x35
- *  33        110         0x34
- *  34        104         0x34
- *  35         98         0x34
- *  36        110         0x33
- *  37        104         0x33
- *  38         98         0x33
- *  39        110         0x32
- *  40        104         0x32
- *  41         98         0x32
- *  42        110         0x31
- *  43        104         0x31
- *  44         98         0x31
- *  45        110         0x30
- *  46        104         0x30
- *  47         98         0x30
- *  48        110          0x6
- *  49        104          0x6
- *  50         98          0x6
- *  51        110          0x5
- *  52        104          0x5
- *  53         98          0x5
- *  54        110          0x4
- *  55        104          0x4
- *  56         98          0x4
- *  57        110          0x3
- *  58        104          0x3
- *  59         98          0x3
- *  60        110          0x2
- *  61        104          0x2
- *  62         98          0x2
- *  63        110          0x1
- *  64        104          0x1
- *  65         98          0x1
- *  66        110          0x0
- *  67        104          0x0
- *  68         98          0x0
- *  69         97            0
- *  70         96            0
- *  71         95            0
- *  72         94            0
- *  73         93            0
- *  74         92            0
- *  75         91            0
- *  76         90            0
- *  77         89            0
- *  78         88            0
- *  79         87            0
- *  80         86            0
- *  81         85            0
- *  82         84            0
- *  83         83            0
- *  84         82            0
- *  85         81            0
- *  86         80            0
- *  87         79            0
- *  88         78            0
- *  89         77            0
- *  90         76            0
- *  91         75            0
- *  92         74            0
- *  93         73            0
- *  94         72            0
- *  95         71            0
- *  96         70            0
- *  97         69            0
- *  98         68            0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index    Dsp gain   Radio gain
- *  -9               123         0x3F      (highest gain)
- *  -8               117         0x3F
- *  -7        110         0x3F
- *  -6        104         0x3F
- *  -5         98         0x3F
- *  -4        110         0x3E
- *  -3        104         0x3E
- *  -2         98         0x3E
- *  -1        110         0x3D
- *   0        104         0x3D
- *   1         98         0x3D
- *   2        110         0x3C
- *   3        104         0x3C
- *   4         98         0x3C
- *   5        110         0x3B
- *   6        104         0x3B
- *   7         98         0x3B
- *   8        110         0x3A
- *   9        104         0x3A
- *  10         98         0x3A
- *  11        110         0x39
- *  12        104         0x39
- *  13         98         0x39
- *  14        110         0x38
- *  15        104         0x38
- *  16         98         0x38
- *  17        110         0x37
- *  18        104         0x37
- *  19         98         0x37
- *  20        110         0x36
- *  21        104         0x36
- *  22         98         0x36
- *  23        110         0x35
- *  24        104         0x35
- *  25         98         0x35
- *  26        110         0x34
- *  27        104         0x34
- *  28         98         0x34
- *  29        110         0x33
- *  30        104         0x33
- *  31         98         0x33
- *  32        110         0x32
- *  33        104         0x32
- *  34         98         0x32
- *  35        110         0x31
- *  36        104         0x31
- *  37         98         0x31
- *  38        110         0x30
- *  39        104         0x30
- *  40         98         0x30
- *  41        110         0x25
- *  42        104         0x25
- *  43         98         0x25
- *  44        110         0x24
- *  45        104         0x24
- *  46         98         0x24
- *  47        110         0x23
- *  48        104         0x23
- *  49         98         0x23
- *  50        110         0x22
- *  51        104         0x18
- *  52         98         0x18
- *  53        110         0x17
- *  54        104         0x17
- *  55         98         0x17
- *  56        110         0x16
- *  57        104         0x16
- *  58         98         0x16
- *  59        110         0x15
- *  60        104         0x15
- *  61         98         0x15
- *  62        110         0x14
- *  63        104         0x14
- *  64         98         0x14
- *  65        110         0x13
- *  66        104         0x13
- *  67         98         0x13
- *  68        110         0x12
- *  69        104         0x08
- *  70         98         0x08
- *  71        110         0x07
- *  72        104         0x07
- *  73         98         0x07
- *  74        110         0x06
- *  75        104         0x06
- *  76         98         0x06
- *  77        110         0x05
- *  78        104         0x05
- *  79         98         0x05
- *  80        110         0x04
- *  81        104         0x04
- *  82         98         0x04
- *  83        110         0x03
- *  84        104         0x03
- *  85         98         0x03
- *  86        110         0x02
- *  87        104         0x02
- *  88         98         0x02
- *  89        110         0x01
- *  90        104         0x01
- *  91         98         0x01
- *  92        110         0x00
- *  93        104         0x00
- *  94         98         0x00
- *  95         93         0x00
- *  96         88         0x00
- *  97         83         0x00
- *  98         78         0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated.  These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24   (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52   (34)
-#define IWL_TX_POWER_REGULATORY_MIN          (0)
-#define IWL_TX_POWER_REGULATORY_MAX          (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion.  This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24   (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52   (38)
-#define IWL_TX_POWER_SATURATION_MIN          (20)
-#define IWL_TX_POWER_SATURATION_MAX          (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain.  Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below.  If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
-       CALIB_CH_GROUP_1 = 0,
-       CALIB_CH_GROUP_2 = 1,
-       CALIB_CH_GROUP_3 = 2,
-       CALIB_CH_GROUP_4 = 3,
-       CALIB_CH_GROUP_5 = 4,
-       CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues).  All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device.  When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS        7
-#define IWL49_CMD_FIFO_NUM     4
-#define IWL49_NUM_QUEUES       16
-#define IWL49_NUM_AMPDU_QUEUES 8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue.  If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
-       __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-       u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
deleted file mode 100644 (file)
index 91a9f52..0000000
+++ /dev/null
@@ -1,2645 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-agn-calib.h"
-#include "iwl-sta.h"
-#include "iwl-agn-led.h"
-#include "iwl-agn.h"
-#include "iwl-agn-debugfs.h"
-#include "iwl-legacy.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       u32 reg;
-       u32 val;
-
-       IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
-       /* verify BSM SRAM contents */
-       val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
-       for (reg = BSM_SRAM_LOWER_BOUND;
-            reg < BSM_SRAM_LOWER_BOUND + len;
-            reg += sizeof(u32), image++) {
-               val = iwl_read_prph(priv, reg);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "BSM uCode verification failed at "
-                                 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
-                                 BSM_SRAM_LOWER_BOUND,
-                                 reg - BSM_SRAM_LOWER_BOUND, len,
-                                 val, le32_to_cpu(*image));
-                       return -EIO;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
-       return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL.  When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers.  Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image.  This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program.  This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-       u32 inst_len;
-       u32 data_len;
-       int i;
-       u32 done;
-       u32 reg_offset;
-       int ret;
-
-       IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
-       priv->ucode_type = UCODE_RT;
-
-       /* make sure bootstrap program is no larger than BSM's SRAM size */
-       if (len > IWL49_MAX_BSM_SIZE)
-               return -EINVAL;
-
-       /* Tell bootstrap uCode where to find the "Initialize" uCode
-        *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
-        * NOTE:  iwl_init_alive_start() will replace these values,
-        *        after the "initialize" uCode has run, to point to
-        *        runtime/protocol instructions and backup data cache.
-        */
-       pinst = priv->ucode_init.p_addr >> 4;
-       pdata = priv->ucode_init_data.p_addr >> 4;
-       inst_len = priv->ucode_init.len;
-       data_len = priv->ucode_init_data.len;
-
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
-       /* Fill BSM memory with bootstrap instructions */
-       for (reg_offset = BSM_SRAM_LOWER_BOUND;
-            reg_offset < BSM_SRAM_LOWER_BOUND + len;
-            reg_offset += sizeof(u32), image++)
-               _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
-       ret = iwl4965_verify_bsm(priv);
-       if (ret)
-               return ret;
-
-       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-       iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-       iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
-       iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
-       /* Load bootstrap code into instruction SRAM now,
-        *   to prepare to load "initialize" uCode */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
-       /* Wait for load of bootstrap uCode to finish */
-       for (i = 0; i < 100; i++) {
-               done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
-               if (!(done & BSM_WR_CTRL_REG_BIT_START))
-                       break;
-               udelay(10);
-       }
-       if (i < 100)
-               IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
-       else {
-               IWL_ERR(priv, "BSM write did not complete!\n");
-               return -EIO;
-       }
-
-       /* Enable future boot loads whenever power management unit triggers it
-        *   (e.g. when powering back up after power-save shutdown) */
-       iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
-       return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-       int ret = 0;
-
-       /* bits 35:4 for 4965 */
-       pinst = priv->ucode_code.p_addr >> 4;
-       pdata = priv->ucode_data_backup.p_addr >> 4;
-
-       /* Tell bootstrap uCode where to find image to load */
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
-                                priv->ucode_data.len);
-
-       /* Inst byte count must be last to set up, bit 31 signals uCode
-        *   that all new ptr/size info is in place */
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
-                                priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-       IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
-       return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- *   Voltage, temperature, and MIMO tx gain correction, now stored in priv
- *   (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
-       /* Check alive response for "valid" sign from uCode */
-       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-               goto restart;
-       }
-
-       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "initialize" alive if code weren't properly loaded.  */
-       if (iwl_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
-               goto restart;
-       }
-
-       /* Calculate temperature */
-       priv->temperature = iwl4965_hw_get_temperature(priv);
-
-       /* Send pointers to protocol/runtime uCode image ... init code will
-        * load and launch runtime uCode, which will send us another "Alive"
-        * notification. */
-       IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-       if (iwl4965_set_ucode_ptrs(priv)) {
-               /* Runtime instruction load won't happen;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
-               goto restart;
-       }
-       return;
-
-restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool is_ht40_channel(__le32 rxon_flags)
-{
-       int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
-                                   >> RXON_FLG_CHANNEL_MODE_POS;
-       return ((chan_mod == CHANNEL_MODE_PURE_40) ||
-                 (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-/*
- * EEPROM handlers
- */
-static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
-{
-       return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
-       iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       u16 radio_cfg;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
-       /* write radio config values to register */
-       if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
-               iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
-                           EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
-                           EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
-       /* set CSR_HW_CONFIG_REG for uCode use */
-       iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
-                   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
-       priv->calib_info = (struct iwl_eeprom_calib_info *)
-               iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- *  ... once chain noise is calibrated the first time, it's good forever.  */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
-       struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
-       if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
-           iwl_is_any_associated(priv)) {
-               struct iwl_calib_diff_gain_cmd cmd;
-
-               /* clear data for chain noise calibration algorithm */
-               data->chain_noise_a = 0;
-               data->chain_noise_b = 0;
-               data->chain_noise_c = 0;
-               data->chain_signal_a = 0;
-               data->chain_signal_b = 0;
-               data->chain_signal_c = 0;
-               data->beacon_count = 0;
-
-               memset(&cmd, 0, sizeof(cmd));
-               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
-               cmd.diff_gain_a = 0;
-               cmd.diff_gain_b = 0;
-               cmd.diff_gain_c = 0;
-               if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-                                sizeof(cmd), &cmd))
-                       IWL_ERR(priv,
-                               "Could not send REPLY_PHY_CALIBRATION_CMD\n");
-               data->state = IWL_CHAIN_NOISE_ACCUMULATE;
-               IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
-       }
-}
-
-static void iwl4965_gain_computation(struct iwl_priv *priv,
-               u32 *average_noise,
-               u16 min_average_noise_antenna_i,
-               u32 min_average_noise,
-               u8 default_chain)
-{
-       int i, ret;
-       struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
-       data->delta_gain_code[min_average_noise_antenna_i] = 0;
-
-       for (i = default_chain; i < NUM_RX_CHAINS; i++) {
-               s32 delta_g = 0;
-
-               if (!(data->disconn_array[i]) &&
-                   (data->delta_gain_code[i] ==
-                            CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
-                       delta_g = average_noise[i] - min_average_noise;
-                       data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
-                       data->delta_gain_code[i] =
-                               min(data->delta_gain_code[i],
-                               (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
-                       data->delta_gain_code[i] =
-                               (data->delta_gain_code[i] | (1 << 2));
-               } else {
-                       data->delta_gain_code[i] = 0;
-               }
-       }
-       IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
-                    data->delta_gain_code[0],
-                    data->delta_gain_code[1],
-                    data->delta_gain_code[2]);
-
-       /* Differential gain gets sent to uCode only once */
-       if (!data->radio_write) {
-               struct iwl_calib_diff_gain_cmd cmd;
-               data->radio_write = 1;
-
-               memset(&cmd, 0, sizeof(cmd));
-               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
-               cmd.diff_gain_a = data->delta_gain_code[0];
-               cmd.diff_gain_b = data->delta_gain_code[1];
-               cmd.diff_gain_c = data->delta_gain_code[2];
-               ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-                                     sizeof(cmd), &cmd);
-               if (ret)
-                       IWL_DEBUG_CALIB(priv, "fail sending cmd "
-                                    "REPLY_PHY_CALIBRATION_CMD\n");
-
-               /* TODO we might want recalculate
-                * rx_chain in rxon cmd */
-
-               /* Mark so we run this algo only once! */
-               data->state = IWL_CHAIN_NOISE_CALIBRATED;
-       }
-}
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                       txpower_work);
-
-       /* If a scan happened to start before we got here
-        * then just return; the statistics notification will
-        * kick off another scheduled work to compensate for
-        * any temperature delta we missed here. */
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-
-       /* Regardless of if we are associated, we must reconfigure the
-        * TX power since frames can be sent on non-radar channels while
-        * not associated */
-       iwl4965_send_tx_power(priv);
-
-       /* Update last_temperature to keep is_calib_needed from running
-        * when it isn't needed... */
-       priv->last_temperature = priv->temperature;
-
-       mutex_unlock(&priv->mutex);
-}
-
-/*
- * Acquire priv->lock before calling this function !
- */
-static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
-       iwl_write_direct32(priv, HBUS_TARG_WRPTR,
-                            (index & 0xff) | (txq_id << 8));
-       iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE:  Acquire priv->lock before calling this function !
- */
-static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
-                                       struct iwl_tx_queue *txq,
-                                       int tx_fifo_id, int scd_retry)
-{
-       int txq_id = txq->q.id;
-
-       /* Find out whether to activate Tx queue */
-       int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
-       /* Set up and activate */
-       iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-                        (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
-                        (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
-                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
-                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
-                        IWL49_SCD_QUEUE_STTS_REG_MSK);
-
-       txq->sched_retry = scd_retry;
-
-       IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
-                      active ? "Activate" : "Deactivate",
-                      scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
-       IWL_TX_FIFO_VO,
-       IWL_TX_FIFO_VI,
-       IWL_TX_FIFO_BE,
-       IWL_TX_FIFO_BK,
-       IWL49_CMD_FIFO_NUM,
-       IWL_TX_FIFO_UNUSED,
-       IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
-       u32 a;
-       unsigned long flags;
-       int i, chan;
-       u32 reg_val;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Clear 4965's internal Tx Scheduler data base */
-       priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
-       a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
-       for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
-               iwl_write_targ_mem(priv, a, 0);
-       for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
-               iwl_write_targ_mem(priv, a, 0);
-       for (; a < priv->scd_base_addr +
-              IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
-               iwl_write_targ_mem(priv, a, 0);
-
-       /* Tel 4965 where to find Tx byte count tables */
-       iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
-                       priv->scd_bc_tbls.dma >> 10);
-
-       /* Enable DMA channel */
-       for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
-               iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
-                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
-                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
-       /* Update FH chicken bits */
-       reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
-       iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
-                          reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
-       /* Disable chain mode for all queues */
-       iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
-       /* Initialize each Tx queue (including the command queue) */
-       for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
-               /* TFD circular buffer read/write indexes */
-               iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
-               iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
-               /* Max Tx Window size for Scheduler-ACK mode */
-               iwl_write_targ_mem(priv, priv->scd_base_addr +
-                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
-                               (SCD_WIN_SIZE <<
-                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-               /* Frame limit */
-               iwl_write_targ_mem(priv, priv->scd_base_addr +
-                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
-                               sizeof(u32),
-                               (SCD_FRAME_LIMIT <<
-                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
-                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-       }
-       iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
-                                (1 << priv->hw_params.max_txq_num) - 1);
-
-       /* Activate all Tx DMA/FIFO channels */
-       priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
-
-       iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
-       /* make sure all queue are not stopped */
-       memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
-       for (i = 0; i < 4; i++)
-               atomic_set(&priv->queue_stop_count[i], 0);
-
-       /* reset to 0 to enable all the queue first */
-       priv->txq_ctx_active_msk = 0;
-       /* Map each Tx/cmd queue to its corresponding fifo */
-       BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
-       for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
-               int ac = default_queue_to_tx_fifo[i];
-
-               iwl_txq_ctx_activate(priv, i);
-
-               if (ac == IWL_TX_FIFO_UNUSED)
-                       continue;
-
-               iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
-       }
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
-       .min_nrg_cck = 97,
-       .max_nrg_cck = 0, /* not used, set to 0 */
-
-       .auto_corr_min_ofdm = 85,
-       .auto_corr_min_ofdm_mrc = 170,
-       .auto_corr_min_ofdm_x1 = 105,
-       .auto_corr_min_ofdm_mrc_x1 = 220,
-
-       .auto_corr_max_ofdm = 120,
-       .auto_corr_max_ofdm_mrc = 210,
-       .auto_corr_max_ofdm_x1 = 140,
-       .auto_corr_max_ofdm_mrc_x1 = 270,
-
-       .auto_corr_min_cck = 125,
-       .auto_corr_max_cck = 200,
-       .auto_corr_min_cck_mrc = 200,
-       .auto_corr_max_cck_mrc = 400,
-
-       .nrg_th_cck = 100,
-       .nrg_th_ofdm = 100,
-
-       .barker_corr_th_min = 190,
-       .barker_corr_th_min_mrc = 390,
-       .nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
-       /* want Kelvin */
-       priv->hw_params.ct_kill_threshold =
-               CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
-       if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
-           priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
-               priv->cfg->base_params->num_of_queues =
-                       priv->cfg->mod_params->num_of_queues;
-
-       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-       priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
-       priv->hw_params.scd_bc_tbls_size =
-                       priv->cfg->base_params->num_of_queues *
-                       sizeof(struct iwl4965_scd_bc_tbl);
-       priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
-       priv->hw_params.max_stations = IWL4965_STATION_COUNT;
-       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
-       priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
-       priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
-       priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
-       priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
-       priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
-       priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
-       priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
-       priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
-       priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
-       iwl4965_set_ct_threshold(priv);
-
-       priv->hw_params.sens = &iwl4965_sensitivity;
-       priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
-
-       return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
-       s32 sign = 1;
-
-       if (num < 0) {
-               sign = -sign;
-               num = -num;
-       }
-       if (denom < 0) {
-               sign = -sign;
-               denom = -denom;
-       }
-       *res = 1;
-       *res = ((num * 2 + denom) / (denom * 2)) * sign;
-
-       return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
-                                           s32 current_voltage)
-{
-       s32 comp = 0;
-
-       if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
-           (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
-               return 0;
-
-       iwl4965_math_div_round(current_voltage - eeprom_voltage,
-                              TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
-       if (current_voltage > eeprom_voltage)
-               comp *= 2;
-       if ((comp < -2) || (comp > 2))
-               comp = 0;
-
-       return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
-       if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
-               return CALIB_CH_GROUP_5;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
-               return CALIB_CH_GROUP_1;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
-               return CALIB_CH_GROUP_2;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
-               return CALIB_CH_GROUP_3;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
-               return CALIB_CH_GROUP_4;
-
-       return -1;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
-       s32 b = -1;
-
-       for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
-               if (priv->calib_info->band_info[b].ch_from == 0)
-                       continue;
-
-               if ((channel >= priv->calib_info->band_info[b].ch_from)
-                   && (channel <= priv->calib_info->band_info[b].ch_to))
-                       break;
-       }
-
-       return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
-       s32 val;
-
-       if (x2 == x1)
-               return y1;
-       else {
-               iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
-               return val + y2;
-       }
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest.  Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
-                                   struct iwl_eeprom_calib_ch_info *chan_info)
-{
-       s32 s = -1;
-       u32 c;
-       u32 m;
-       const struct iwl_eeprom_calib_measure *m1;
-       const struct iwl_eeprom_calib_measure *m2;
-       struct iwl_eeprom_calib_measure *omeas;
-       u32 ch_i1;
-       u32 ch_i2;
-
-       s = iwl4965_get_sub_band(priv, channel);
-       if (s >= EEPROM_TX_POWER_BANDS) {
-               IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
-               return -1;
-       }
-
-       ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
-       ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
-       chan_info->ch_num = (u8) channel;
-
-       IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
-                         channel, s, ch_i1, ch_i2);
-
-       for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
-               for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
-                       m1 = &(priv->calib_info->band_info[s].ch1.
-                              measurements[c][m]);
-                       m2 = &(priv->calib_info->band_info[s].ch2.
-                              measurements[c][m]);
-                       omeas = &(chan_info->measurements[c][m]);
-
-                       omeas->actual_pow =
-                           (u8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->actual_pow,
-                                                          ch_i2,
-                                                          m2->actual_pow);
-                       omeas->gain_idx =
-                           (u8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->gain_idx, ch_i2,
-                                                          m2->gain_idx);
-                       omeas->temperature =
-                           (u8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->temperature,
-                                                          ch_i2,
-                                                          m2->temperature);
-                       omeas->pa_det =
-                           (s8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->pa_det, ch_i2,
-                                                          m2->pa_det);
-
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
-                               m1->actual_pow, m2->actual_pow, omeas->actual_pow);
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
-                               m1->gain_idx, m2->gain_idx, omeas->gain_idx);
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
-                               m1->pa_det, m2->pa_det, omeas->pa_det);
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d  T1=%d  T2=%d  T=%d\n", c, m,
-                               m1->temperature, m2->temperature,
-                               omeas->temperature);
-               }
-       }
-
-       return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
-       10                      /* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
-       s32 degrees_per_05db_a;
-       s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
-       {9, 2},                 /* group 0 5.2, ch  34-43 */
-       {4, 1},                 /* group 1 5.2, ch  44-70 */
-       {4, 1},                 /* group 2 5.2, ch  71-124 */
-       {4, 1},                 /* group 3 5.2, ch 125-200 */
-       {3, 1}                  /* group 4 2.4, ch   all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
-       if (!band) {
-               if ((rate_power_index & 7) <= 4)
-                       return MIN_TX_GAIN_INDEX_52GHZ_EXT;
-       }
-       return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
-       u8 dsp;
-       u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
-       /* 5.2GHz power gain index table */
-       {
-        {123, 0x3F},           /* highest txpower */
-        {117, 0x3F},
-        {110, 0x3F},
-        {104, 0x3F},
-        {98, 0x3F},
-        {110, 0x3E},
-        {104, 0x3E},
-        {98, 0x3E},
-        {110, 0x3D},
-        {104, 0x3D},
-        {98, 0x3D},
-        {110, 0x3C},
-        {104, 0x3C},
-        {98, 0x3C},
-        {110, 0x3B},
-        {104, 0x3B},
-        {98, 0x3B},
-        {110, 0x3A},
-        {104, 0x3A},
-        {98, 0x3A},
-        {110, 0x39},
-        {104, 0x39},
-        {98, 0x39},
-        {110, 0x38},
-        {104, 0x38},
-        {98, 0x38},
-        {110, 0x37},
-        {104, 0x37},
-        {98, 0x37},
-        {110, 0x36},
-        {104, 0x36},
-        {98, 0x36},
-        {110, 0x35},
-        {104, 0x35},
-        {98, 0x35},
-        {110, 0x34},
-        {104, 0x34},
-        {98, 0x34},
-        {110, 0x33},
-        {104, 0x33},
-        {98, 0x33},
-        {110, 0x32},
-        {104, 0x32},
-        {98, 0x32},
-        {110, 0x31},
-        {104, 0x31},
-        {98, 0x31},
-        {110, 0x30},
-        {104, 0x30},
-        {98, 0x30},
-        {110, 0x25},
-        {104, 0x25},
-        {98, 0x25},
-        {110, 0x24},
-        {104, 0x24},
-        {98, 0x24},
-        {110, 0x23},
-        {104, 0x23},
-        {98, 0x23},
-        {110, 0x22},
-        {104, 0x18},
-        {98, 0x18},
-        {110, 0x17},
-        {104, 0x17},
-        {98, 0x17},
-        {110, 0x16},
-        {104, 0x16},
-        {98, 0x16},
-        {110, 0x15},
-        {104, 0x15},
-        {98, 0x15},
-        {110, 0x14},
-        {104, 0x14},
-        {98, 0x14},
-        {110, 0x13},
-        {104, 0x13},
-        {98, 0x13},
-        {110, 0x12},
-        {104, 0x08},
-        {98, 0x08},
-        {110, 0x07},
-        {104, 0x07},
-        {98, 0x07},
-        {110, 0x06},
-        {104, 0x06},
-        {98, 0x06},
-        {110, 0x05},
-        {104, 0x05},
-        {98, 0x05},
-        {110, 0x04},
-        {104, 0x04},
-        {98, 0x04},
-        {110, 0x03},
-        {104, 0x03},
-        {98, 0x03},
-        {110, 0x02},
-        {104, 0x02},
-        {98, 0x02},
-        {110, 0x01},
-        {104, 0x01},
-        {98, 0x01},
-        {110, 0x00},
-        {104, 0x00},
-        {98, 0x00},
-        {93, 0x00},
-        {88, 0x00},
-        {83, 0x00},
-        {78, 0x00},
-        },
-       /* 2.4GHz power gain index table */
-       {
-        {110, 0x3f},           /* highest txpower */
-        {104, 0x3f},
-        {98, 0x3f},
-        {110, 0x3e},
-        {104, 0x3e},
-        {98, 0x3e},
-        {110, 0x3d},
-        {104, 0x3d},
-        {98, 0x3d},
-        {110, 0x3c},
-        {104, 0x3c},
-        {98, 0x3c},
-        {110, 0x3b},
-        {104, 0x3b},
-        {98, 0x3b},
-        {110, 0x3a},
-        {104, 0x3a},
-        {98, 0x3a},
-        {110, 0x39},
-        {104, 0x39},
-        {98, 0x39},
-        {110, 0x38},
-        {104, 0x38},
-        {98, 0x38},
-        {110, 0x37},
-        {104, 0x37},
-        {98, 0x37},
-        {110, 0x36},
-        {104, 0x36},
-        {98, 0x36},
-        {110, 0x35},
-        {104, 0x35},
-        {98, 0x35},
-        {110, 0x34},
-        {104, 0x34},
-        {98, 0x34},
-        {110, 0x33},
-        {104, 0x33},
-        {98, 0x33},
-        {110, 0x32},
-        {104, 0x32},
-        {98, 0x32},
-        {110, 0x31},
-        {104, 0x31},
-        {98, 0x31},
-        {110, 0x30},
-        {104, 0x30},
-        {98, 0x30},
-        {110, 0x6},
-        {104, 0x6},
-        {98, 0x6},
-        {110, 0x5},
-        {104, 0x5},
-        {98, 0x5},
-        {110, 0x4},
-        {104, 0x4},
-        {98, 0x4},
-        {110, 0x3},
-        {104, 0x3},
-        {98, 0x3},
-        {110, 0x2},
-        {104, 0x2},
-        {98, 0x2},
-        {110, 0x1},
-        {104, 0x1},
-        {98, 0x1},
-        {110, 0x0},
-        {104, 0x0},
-        {98, 0x0},
-        {97, 0},
-        {96, 0},
-        {95, 0},
-        {94, 0},
-        {93, 0},
-        {92, 0},
-        {91, 0},
-        {90, 0},
-        {89, 0},
-        {88, 0},
-        {87, 0},
-        {86, 0},
-        {85, 0},
-        {84, 0},
-        {83, 0},
-        {82, 0},
-        {81, 0},
-        {80, 0},
-        {79, 0},
-        {78, 0},
-        {77, 0},
-        {76, 0},
-        {75, 0},
-        {74, 0},
-        {73, 0},
-        {72, 0},
-        {71, 0},
-        {70, 0},
-        {69, 0},
-        {68, 0},
-        {67, 0},
-        {66, 0},
-        {65, 0},
-        {64, 0},
-        {63, 0},
-        {62, 0},
-        {61, 0},
-        {60, 0},
-        {59, 0},
-        }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
-                                   u8 is_ht40, u8 ctrl_chan_high,
-                                   struct iwl4965_tx_power_db *tx_power_tbl)
-{
-       u8 saturation_power;
-       s32 target_power;
-       s32 user_target_power;
-       s32 power_limit;
-       s32 current_temp;
-       s32 reg_limit;
-       s32 current_regulatory;
-       s32 txatten_grp = CALIB_CH_GROUP_MAX;
-       int i;
-       int c;
-       const struct iwl_channel_info *ch_info = NULL;
-       struct iwl_eeprom_calib_ch_info ch_eeprom_info;
-       const struct iwl_eeprom_calib_measure *measurement;
-       s16 voltage;
-       s32 init_voltage;
-       s32 voltage_compensation;
-       s32 degrees_per_05db_num;
-       s32 degrees_per_05db_denom;
-       s32 factory_temp;
-       s32 temperature_comp[2];
-       s32 factory_gain_index[2];
-       s32 factory_actual_pwr[2];
-       s32 power_index;
-
-       /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
-        *   are used for indexing into txpower table) */
-       user_target_power = 2 * priv->tx_power_user_lmt;
-
-       /* Get current (RXON) channel, band, width */
-       IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
-                         is_ht40);
-
-       ch_info = iwl_get_channel_info(priv, priv->band, channel);
-
-       if (!is_channel_valid(ch_info))
-               return -EINVAL;
-
-       /* get txatten group, used to select 1) thermal txpower adjustment
-        *   and 2) mimo txpower balance between Tx chains. */
-       txatten_grp = iwl4965_get_tx_atten_grp(channel);
-       if (txatten_grp < 0) {
-               IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
-                         channel);
-               return -EINVAL;
-       }
-
-       IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
-                         channel, txatten_grp);
-
-       if (is_ht40) {
-               if (ctrl_chan_high)
-                       channel -= 2;
-               else
-                       channel += 2;
-       }
-
-       /* hardware txpower limits ...
-        * saturation (clipping distortion) txpowers are in half-dBm */
-       if (band)
-               saturation_power = priv->calib_info->saturation_power24;
-       else
-               saturation_power = priv->calib_info->saturation_power52;
-
-       if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
-           saturation_power > IWL_TX_POWER_SATURATION_MAX) {
-               if (band)
-                       saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
-               else
-                       saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
-       }
-
-       /* regulatory txpower limits ... reg_limit values are in half-dBm,
-        *   max_power_avg values are in dBm, convert * 2 */
-       if (is_ht40)
-               reg_limit = ch_info->ht40_max_power_avg * 2;
-       else
-               reg_limit = ch_info->max_power_avg * 2;
-
-       if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
-           (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
-               if (band)
-                       reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
-               else
-                       reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
-       }
-
-       /* Interpolate txpower calibration values for this channel,
-        *   based on factory calibration tests on spaced channels. */
-       iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
-       /* calculate tx gain adjustment based on power supply voltage */
-       voltage = le16_to_cpu(priv->calib_info->voltage);
-       init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
-       voltage_compensation =
-           iwl4965_get_voltage_compensation(voltage, init_voltage);
-
-       IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
-                         init_voltage,
-                         voltage, voltage_compensation);
-
-       /* get current temperature (Celsius) */
-       current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
-       current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
-       current_temp = KELVIN_TO_CELSIUS(current_temp);
-
-       /* select thermal txpower adjustment params, based on channel group
-        *   (same frequency group used for mimo txatten adjustment) */
-       degrees_per_05db_num =
-           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
-       degrees_per_05db_denom =
-           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
-       /* get per-chain txpower values from factory measurements */
-       for (c = 0; c < 2; c++) {
-               measurement = &ch_eeprom_info.measurements[c][1];
-
-               /* txgain adjustment (in half-dB steps) based on difference
-                *   between factory and current temperature */
-               factory_temp = measurement->temperature;
-               iwl4965_math_div_round((current_temp - factory_temp) *
-                                      degrees_per_05db_denom,
-                                      degrees_per_05db_num,
-                                      &temperature_comp[c]);
-
-               factory_gain_index[c] = measurement->gain_idx;
-               factory_actual_pwr[c] = measurement->actual_pow;
-
-               IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
-               IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
-                                 "curr tmp %d, comp %d steps\n",
-                                 factory_temp, current_temp,
-                                 temperature_comp[c]);
-
-               IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
-                                 factory_gain_index[c],
-                                 factory_actual_pwr[c]);
-       }
-
-       /* for each of 33 bit-rates (including 1 for CCK) */
-       for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
-               u8 is_mimo_rate;
-               union iwl4965_tx_power_dual_stream tx_power;
-
-               /* for mimo, reduce each chain's txpower by half
-                * (3dB, 6 steps), so total output power is regulatory
-                * compliant. */
-               if (i & 0x8) {
-                       current_regulatory = reg_limit -
-                           IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
-                       is_mimo_rate = 1;
-               } else {
-                       current_regulatory = reg_limit;
-                       is_mimo_rate = 0;
-               }
-
-               /* find txpower limit, either hardware or regulatory */
-               power_limit = saturation_power - back_off_table[i];
-               if (power_limit > current_regulatory)
-                       power_limit = current_regulatory;
-
-               /* reduce user's txpower request if necessary
-                * for this rate on this channel */
-               target_power = user_target_power;
-               if (target_power > power_limit)
-                       target_power = power_limit;
-
-               IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
-                                 i, saturation_power - back_off_table[i],
-                                 current_regulatory, user_target_power,
-                                 target_power);
-
-               /* for each of 2 Tx chains (radio transmitters) */
-               for (c = 0; c < 2; c++) {
-                       s32 atten_value;
-
-                       if (is_mimo_rate)
-                               atten_value =
-                                   (s32)le32_to_cpu(priv->card_alive_init.
-                                   tx_atten[txatten_grp][c]);
-                       else
-                               atten_value = 0;
-
-                       /* calculate index; higher index means lower txpower */
-                       power_index = (u8) (factory_gain_index[c] -
-                                           (target_power -
-                                            factory_actual_pwr[c]) -
-                                           temperature_comp[c] -
-                                           voltage_compensation +
-                                           atten_value);
-
-/*                     IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
-                                               power_index); */
-
-                       if (power_index < get_min_power_index(i, band))
-                               power_index = get_min_power_index(i, band);
-
-                       /* adjust 5 GHz index to support negative indexes */
-                       if (!band)
-                               power_index += 9;
-
-                       /* CCK, rate 32, reduce txpower for CCK */
-                       if (i == POWER_TABLE_CCK_ENTRY)
-                               power_index +=
-                                   IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
-                       /* stay within the table! */
-                       if (power_index > 107) {
-                               IWL_WARN(priv, "txpower index %d > 107\n",
-                                           power_index);
-                               power_index = 107;
-                       }
-                       if (power_index < 0) {
-                               IWL_WARN(priv, "txpower index %d < 0\n",
-                                           power_index);
-                               power_index = 0;
-                       }
-
-                       /* fill txpower command for this rate/chain */
-                       tx_power.s.radio_tx_gain[c] =
-                               gain_table[band][power_index].radio;
-                       tx_power.s.dsp_predis_atten[c] =
-                               gain_table[band][power_index].dsp;
-
-                       IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
-                                         "gain 0x%02x dsp %d\n",
-                                         c, atten_value, power_index,
-                                       tx_power.s.radio_tx_gain[c],
-                                       tx_power.s.dsp_predis_atten[c]);
-               } /* for each chain */
-
-               tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
-       } /* for each rate */
-
-       return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
-       struct iwl4965_txpowertable_cmd cmd = { 0 };
-       int ret;
-       u8 band = 0;
-       bool is_ht40 = false;
-       u8 ctrl_chan_high = 0;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
-                     "TX Power requested while scanning!\n"))
-               return -EAGAIN;
-
-       band = priv->band == IEEE80211_BAND_2GHZ;
-
-       is_ht40 = is_ht40_channel(ctx->active.flags);
-
-       if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
-               ctrl_chan_high = 1;
-
-       cmd.band = band;
-       cmd.channel = ctx->active.channel;
-
-       ret = iwl4965_fill_txpower_tbl(priv, band,
-                               le16_to_cpu(ctx->active.channel),
-                               is_ht40, ctrl_chan_high, &cmd.tx_power);
-       if (ret)
-               goto out;
-
-       ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
-       return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
-                                  struct iwl_rxon_context *ctx)
-{
-       int ret = 0;
-       struct iwl4965_rxon_assoc_cmd rxon_assoc;
-       const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
-       const struct iwl_rxon_cmd *rxon2 = &ctx->active;
-
-       if ((rxon1->flags == rxon2->flags) &&
-           (rxon1->filter_flags == rxon2->filter_flags) &&
-           (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
-           (rxon1->ofdm_ht_single_stream_basic_rates ==
-            rxon2->ofdm_ht_single_stream_basic_rates) &&
-           (rxon1->ofdm_ht_dual_stream_basic_rates ==
-            rxon2->ofdm_ht_dual_stream_basic_rates) &&
-           (rxon1->rx_chain == rxon2->rx_chain) &&
-           (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
-               IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
-               return 0;
-       }
-
-       rxon_assoc.flags = ctx->staging.flags;
-       rxon_assoc.filter_flags = ctx->staging.filter_flags;
-       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
-       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
-       rxon_assoc.reserved = 0;
-       rxon_assoc.ofdm_ht_single_stream_basic_rates =
-           ctx->staging.ofdm_ht_single_stream_basic_rates;
-       rxon_assoc.ofdm_ht_dual_stream_basic_rates =
-           ctx->staging.ofdm_ht_dual_stream_basic_rates;
-       rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
-       ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
-                                    sizeof(rxon_assoc), &rxon_assoc, NULL);
-       if (ret)
-               return ret;
-
-       return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       /* cast away the const for active_rxon in this function */
-       struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
-       int ret;
-       bool new_assoc =
-               !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
-       if (!iwl_is_alive(priv))
-               return -EBUSY;
-
-       if (!ctx->is_active)
-               return 0;
-
-       /* always get timestamp with Rx frame */
-       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
-       ret = iwl_check_rxon_cmd(priv, ctx);
-       if (ret) {
-               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-               return -EINVAL;
-       }
-
-       /*
-        * receive commit_rxon request
-        * abort any previous channel switch if still in process
-        */
-       if (priv->switch_rxon.switch_in_progress &&
-           (priv->switch_rxon.channel != ctx->staging.channel)) {
-               IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
-                     le16_to_cpu(priv->switch_rxon.channel));
-               iwl_chswitch_done(priv, false);
-       }
-
-       /* If we don't need to send a full RXON, we can use
-        * iwl_rxon_assoc_cmd which is used to reconfigure filter
-        * and other flags for the current radio configuration. */
-       if (!iwl_full_rxon_required(priv, ctx)) {
-               ret = iwl_send_rxon_assoc(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
-                       return ret;
-               }
-
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_print_rx_config_cmd(priv, ctx);
-               return 0;
-       }
-
-       /* If we are currently associated and the new config requires
-        * an RXON_ASSOC and the new config wants the associated mask enabled,
-        * we must clear the associated from the active configuration
-        * before we apply the new config */
-       if (iwl_is_associated_ctx(ctx) && new_assoc) {
-               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                                      sizeof(struct iwl_rxon_cmd),
-                                      active_rxon);
-
-               /* If the mask clearing failed then we set
-                * active_rxon back to what it was previously */
-               if (ret) {
-                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-                       IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
-                       return ret;
-               }
-               iwl_clear_ucode_stations(priv, ctx);
-               iwl_restore_stations(priv, ctx);
-               ret = iwl_restore_default_wep_keys(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-                       return ret;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "Sending RXON\n"
-                      "* with%s RXON_FILTER_ASSOC_MSK\n"
-                      "* channel = %d\n"
-                      "* bssid = %pM\n",
-                      (new_assoc ? "" : "out"),
-                      le16_to_cpu(ctx->staging.channel),
-                      ctx->staging.bssid_addr);
-
-       iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
-
-       /* Apply the new configuration
-        * RXON unassoc clears the station table in uCode so restoration of
-        * stations is needed after it (the RXON command) completes
-        */
-       if (!new_assoc) {
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-                       return ret;
-               }
-               IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_clear_ucode_stations(priv, ctx);
-               iwl_restore_stations(priv, ctx);
-               ret = iwl_restore_default_wep_keys(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-                       return ret;
-               }
-       }
-       if (new_assoc) {
-               priv->start_calib = 0;
-               /* Apply the new configuration
-                * RXON assoc doesn't clear the station table in uCode,
-                */
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-                       return ret;
-               }
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-       }
-       iwl_print_rx_config_cmd(priv, ctx);
-
-       iwl_init_sensitivity(priv);
-
-       /* If we issue a new RXON command which required a tune then we must
-        * send a new TXPOWER command or we won't be able to Tx any frames */
-       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
-       if (ret) {
-               IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
-                                    struct ieee80211_channel_switch *ch_switch)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       int rc;
-       u8 band = 0;
-       bool is_ht40 = false;
-       u8 ctrl_chan_high = 0;
-       struct iwl4965_channel_switch_cmd cmd;
-       const struct iwl_channel_info *ch_info;
-       u32 switch_time_in_usec, ucode_switch_time;
-       u16 ch;
-       u32 tsf_low;
-       u8 switch_count;
-       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
-       struct ieee80211_vif *vif = ctx->vif;
-       band = priv->band == IEEE80211_BAND_2GHZ;
-
-       is_ht40 = is_ht40_channel(ctx->staging.flags);
-
-       if (is_ht40 &&
-           (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
-               ctrl_chan_high = 1;
-
-       cmd.band = band;
-       cmd.expect_beacon = 0;
-       ch = ch_switch->channel->hw_value;
-       cmd.channel = cpu_to_le16(ch);
-       cmd.rxon_flags = ctx->staging.flags;
-       cmd.rxon_filter_flags = ctx->staging.filter_flags;
-       switch_count = ch_switch->count;
-       tsf_low = ch_switch->timestamp & 0x0ffffffff;
-       /*
-        * calculate the ucode channel switch time
-        * adding TSF as one of the factor for when to switch
-        */
-       if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
-               if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
-                   beacon_interval)) {
-                       switch_count -= (priv->ucode_beacon_time -
-                               tsf_low) / beacon_interval;
-               } else
-                       switch_count = 0;
-       }
-       if (switch_count <= 1)
-               cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-       else {
-               switch_time_in_usec =
-                       vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-               ucode_switch_time = iwl_usecs_to_beacons(priv,
-                                                        switch_time_in_usec,
-                                                        beacon_interval);
-               cmd.switch_time = iwl_add_beacon_time(priv,
-                                                     priv->ucode_beacon_time,
-                                                     ucode_switch_time,
-                                                     beacon_interval);
-       }
-       IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
-                     cmd.switch_time);
-       ch_info = iwl_get_channel_info(priv, priv->band, ch);
-       if (ch_info)
-               cmd.expect_beacon = is_channel_radar(ch_info);
-       else {
-               IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-                       ctx->active.channel, ch);
-               return -EFAULT;
-       }
-
-       rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
-                                     ctrl_chan_high, &cmd.tx_power);
-       if (rc) {
-               IWL_DEBUG_11H(priv, "error:%d  fill txpower_tbl\n", rc);
-               return rc;
-       }
-
-       priv->switch_rxon.channel = cmd.channel;
-       priv->switch_rxon.switch_in_progress = true;
-
-       return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
-                                           struct iwl_tx_queue *txq,
-                                           u16 byte_cnt)
-{
-       struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
-       int txq_id = txq->q.id;
-       int write_ptr = txq->q.write_ptr;
-       int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
-       __le16 bc_ent;
-
-       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
-       bc_ent = cpu_to_le16(len & 0xFFF);
-       /* Set up byte count within first 256 entries */
-       scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
-       /* If within first 64 entries, duplicate at end */
-       if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
-               scd_bc_tbl[txq_id].
-                       tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
-       s32 temperature;
-       s32 vt;
-       s32 R1, R2, R3;
-       u32 R4;
-
-       if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
-           (priv->_agn.statistics.flag &
-                       STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
-               IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
-               R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
-               R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
-               R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
-               R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
-       } else {
-               IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
-               R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
-               R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
-               R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
-               R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
-       }
-
-       /*
-        * Temperature is only 23 bits, so sign extend out to 32.
-        *
-        * NOTE If we haven't received a statistics notification yet
-        * with an updated temperature, use R4 provided to us in the
-        * "initialize" ALIVE response.
-        */
-       if (!test_bit(STATUS_TEMPERATURE, &priv->status))
-               vt = sign_extend32(R4, 23);
-       else
-               vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
-                                general.common.temperature), 23);
-
-       IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
-       if (R3 == R1) {
-               IWL_ERR(priv, "Calibration conflict R1 == R3\n");
-               return -1;
-       }
-
-       /* Calculate temperature in degrees Kelvin, adjust by 97%.
-        * Add offset to center the adjustment around 0 degrees Centigrade. */
-       temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
-       temperature /= (R3 - R1);
-       temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
-       IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
-                       temperature, KELVIN_TO_CELSIUS(temperature));
-
-       return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD   3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
-       int temp_diff;
-
-       if (!test_bit(STATUS_STATISTICS, &priv->status)) {
-               IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
-               return 0;
-       }
-
-       temp_diff = priv->temperature - priv->last_temperature;
-
-       /* get absolute value */
-       if (temp_diff < 0) {
-               IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
-               temp_diff = -temp_diff;
-       } else if (temp_diff == 0)
-               IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
-       else
-               IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
-       if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
-               IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
-               return 0;
-       }
-
-       IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
-       return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
-       s32 temp;
-
-       temp = iwl4965_hw_get_temperature(priv);
-       if (temp < 0)
-               return;
-
-       if (priv->temperature != temp) {
-               if (priv->temperature)
-                       IWL_DEBUG_TEMP(priv, "Temperature changed "
-                                      "from %dC to %dC\n",
-                                      KELVIN_TO_CELSIUS(priv->temperature),
-                                      KELVIN_TO_CELSIUS(temp));
-               else
-                       IWL_DEBUG_TEMP(priv, "Temperature "
-                                      "initialized to %dC\n",
-                                      KELVIN_TO_CELSIUS(temp));
-       }
-
-       priv->temperature = temp;
-       iwl_tt_handler(priv);
-       set_bit(STATUS_TEMPERATURE, &priv->status);
-
-       if (!priv->disable_tx_power_cal &&
-            unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-            iwl4965_is_temp_calib_needed(priv))
-               queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
-                                           u16 txq_id)
-{
-       /* Simply stop the queue, but don't change any configuration;
-        * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
-       iwl_write_prph(priv,
-               IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-               (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
-               (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
-                                  u16 ssn_idx, u8 tx_fifo)
-{
-       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-           (IWL49_FIRST_AMPDU_QUEUE +
-               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-               IWL_WARN(priv,
-                       "queue number out of range: %d, must be %d to %d\n",
-                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
-                       IWL49_FIRST_AMPDU_QUEUE +
-                       priv->cfg->base_params->num_of_ampdu_queues - 1);
-               return -EINVAL;
-       }
-
-       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-       iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-       /* supposes that ssn_idx is valid (!= 0xFFF) */
-       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-       iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-       iwl_txq_ctx_deactivate(priv, txq_id);
-       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
-       return 0;
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
-                                       u16 txq_id)
-{
-       u32 tbl_dw_addr;
-       u32 tbl_dw;
-       u16 scd_q2ratid;
-
-       scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
-       tbl_dw_addr = priv->scd_base_addr +
-                       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
-       tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
-       if (txq_id & 0x1)
-               tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
-       else
-               tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
-       iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
-       return 0;
-}
-
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE:  txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- *        i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
-                                 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
-       unsigned long flags;
-       u16 ra_tid;
-       int ret;
-
-       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-           (IWL49_FIRST_AMPDU_QUEUE +
-               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-               IWL_WARN(priv,
-                       "queue number out of range: %d, must be %d to %d\n",
-                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
-                       IWL49_FIRST_AMPDU_QUEUE +
-                       priv->cfg->base_params->num_of_ampdu_queues - 1);
-               return -EINVAL;
-       }
-
-       ra_tid = BUILD_RAxTID(sta_id, tid);
-
-       /* Modify device's station table to Tx this TID */
-       ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-       if (ret)
-               return ret;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Stop this Tx queue before configuring it */
-       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-       /* Map receiver-address / traffic-ID to this queue */
-       iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
-       /* Set this queue as a chain-building queue */
-       iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-       /* Place first TFD at index corresponding to start sequence number.
-        * Assumes that ssn_idx is valid (!= 0xFFF) */
-       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-       /* Set up Tx window size and frame limit for this queue */
-       iwl_write_targ_mem(priv,
-               priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
-               (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-       iwl_write_targ_mem(priv, priv->scd_base_addr +
-               IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
-               (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
-               & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-       iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
-       /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
-       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
-       switch (cmd_id) {
-       case REPLY_RXON:
-               return (u16) sizeof(struct iwl4965_rxon_cmd);
-       default:
-               return len;
-       }
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
-       struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
-       addsta->mode = cmd->mode;
-       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
-       memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
-       addsta->station_flags = cmd->station_flags;
-       addsta->station_flags_msk = cmd->station_flags_msk;
-       addsta->tid_disable_tx = cmd->tid_disable_tx;
-       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
-       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
-       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-       addsta->sleep_tx_count = cmd->sleep_tx_count;
-       addsta->reserved1 = cpu_to_le16(0);
-       addsta->reserved2 = cpu_to_le16(0);
-
-       return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
-       return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
-                                     struct iwl_ht_agg *agg,
-                                     struct iwl4965_tx_resp *tx_resp,
-                                     int txq_id, u16 start_idx)
-{
-       u16 status;
-       struct agg_tx_status *frame_status = tx_resp->u.agg_status;
-       struct ieee80211_tx_info *info = NULL;
-       struct ieee80211_hdr *hdr = NULL;
-       u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
-       int i, sh, idx;
-       u16 seq;
-       if (agg->wait_for_ba)
-               IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
-       agg->frame_count = tx_resp->frame_count;
-       agg->start_idx = start_idx;
-       agg->rate_n_flags = rate_n_flags;
-       agg->bitmap = 0;
-
-       /* num frames attempted by Tx command */
-       if (agg->frame_count == 1) {
-               /* Only one frame was attempted; no block-ack will arrive */
-               status = le16_to_cpu(frame_status[0].status);
-               idx = start_idx;
-
-               /* FIXME: code repetition */
-               IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
-                                  agg->frame_count, agg->start_idx, idx);
-
-               info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
-               info->status.rates[0].count = tx_resp->failure_frame + 1;
-               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-               info->flags |= iwl_tx_status_to_mac80211(status);
-               iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
-               /* FIXME: code repetition end */
-
-               IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
-                                   status & 0xff, tx_resp->failure_frame);
-               IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
-               agg->wait_for_ba = 0;
-       } else {
-               /* Two or more frames were attempted; expect block-ack */
-               u64 bitmap = 0;
-               int start = agg->start_idx;
-
-               /* Construct bit-map of pending frames within Tx window */
-               for (i = 0; i < agg->frame_count; i++) {
-                       u16 sc;
-                       status = le16_to_cpu(frame_status[i].status);
-                       seq  = le16_to_cpu(frame_status[i].sequence);
-                       idx = SEQ_TO_INDEX(seq);
-                       txq_id = SEQ_TO_QUEUE(seq);
-
-                       if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
-                                     AGG_TX_STATE_ABORT_MSK))
-                               continue;
-
-                       IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
-                                          agg->frame_count, txq_id, idx);
-
-                       hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
-                       if (!hdr) {
-                               IWL_ERR(priv,
-                                       "BUG_ON idx doesn't point to valid skb"
-                                       " idx=%d, txq_id=%d\n", idx, txq_id);
-                               return -1;
-                       }
-
-                       sc = le16_to_cpu(hdr->seq_ctrl);
-                       if (idx != (SEQ_TO_SN(sc) & 0xff)) {
-                               IWL_ERR(priv,
-                                       "BUG_ON idx doesn't match seq control"
-                                       " idx=%d, seq_idx=%d, seq=%d\n",
-                                       idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
-                               return -1;
-                       }
-
-                       IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
-                                          i, idx, SEQ_TO_SN(sc));
-
-                       sh = idx - start;
-                       if (sh > 64) {
-                               sh = (start - idx) + 0xff;
-                               bitmap = bitmap << sh;
-                               sh = 0;
-                               start = idx;
-                       } else if (sh < -64)
-                               sh  = 0xff - (start - idx);
-                       else if (sh < 0) {
-                               sh = start - idx;
-                               start = idx;
-                               bitmap = bitmap << sh;
-                               sh = 0;
-                       }
-                       bitmap |= 1ULL << sh;
-                       IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
-                                          start, (unsigned long long)bitmap);
-               }
-
-               agg->bitmap = bitmap;
-               agg->start_idx = start;
-               IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
-                                  agg->frame_count, agg->start_idx,
-                                  (unsigned long long)agg->bitmap);
-
-               if (bitmap)
-                       agg->wait_for_ba = 1;
-       }
-       return 0;
-}
-
-static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
-{
-       int i;
-       int start = 0;
-       int ret = IWL_INVALID_STATION;
-       unsigned long flags;
-
-       if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
-           (priv->iw_mode == NL80211_IFTYPE_AP))
-               start = IWL_STA_ID;
-
-       if (is_broadcast_ether_addr(addr))
-               return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       for (i = start; i < priv->hw_params.max_stations; i++)
-               if (priv->stations[i].used &&
-                   (!compare_ether_addr(priv->stations[i].sta.sta.addr,
-                                        addr))) {
-                       ret = i;
-                       goto out;
-               }
-
-       IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
-                             addr, priv->num_stations);
-
- out:
-       /*
-        * It may be possible that more commands interacting with stations
-        * arrive before we completed processing the adding of
-        * station
-        */
-       if (ret != IWL_INVALID_STATION &&
-           (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
-            ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
-             (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
-               IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
-                       ret);
-               ret = IWL_INVALID_STATION;
-       }
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-       return ret;
-}
-
-static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
-       if (priv->iw_mode == NL80211_IFTYPE_STATION) {
-               return IWL_AP_ID;
-       } else {
-               u8 *da = ieee80211_get_DA(hdr);
-               return iwl_find_station(priv, da);
-       }
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-       int txq_id = SEQ_TO_QUEUE(sequence);
-       int index = SEQ_TO_INDEX(sequence);
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct ieee80211_hdr *hdr;
-       struct ieee80211_tx_info *info;
-       struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
-       u32  status = le32_to_cpu(tx_resp->u.status);
-       int uninitialized_var(tid);
-       int sta_id;
-       int freed;
-       u8 *qc = NULL;
-       unsigned long flags;
-
-       if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
-               IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-                         "is out of range [0-%d] %d %d\n", txq_id,
-                         index, txq->q.n_bd, txq->q.write_ptr,
-                         txq->q.read_ptr);
-               return;
-       }
-
-       txq->time_stamp = jiffies;
-       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
-       memset(&info->status, 0, sizeof(info->status));
-
-       hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
-       if (ieee80211_is_data_qos(hdr->frame_control)) {
-               qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & 0xf;
-       }
-
-       sta_id = iwl_get_ra_sta_id(priv, hdr);
-       if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
-               IWL_ERR(priv, "Station not known\n");
-               return;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       if (txq->sched_retry) {
-               const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
-               struct iwl_ht_agg *agg = NULL;
-               WARN_ON(!qc);
-
-               agg = &priv->stations[sta_id].tid[tid].agg;
-
-               iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
-               /* check if BAR is needed */
-               if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
-                       info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
-               if (txq->q.read_ptr != (scd_ssn & 0xff)) {
-                       index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
-                       IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
-                                          "%d index %d\n", scd_ssn , index);
-                       freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
-                       if (qc)
-                               iwl_free_tfds_in_queue(priv, sta_id,
-                                                      tid, freed);
-
-                       if (priv->mac80211_registered &&
-                           (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
-                           (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
-                               iwl_wake_queue(priv, txq);
-               }
-       } else {
-               info->status.rates[0].count = tx_resp->failure_frame + 1;
-               info->flags |= iwl_tx_status_to_mac80211(status);
-               iwlagn_hwrate_to_tx_control(priv,
-                                       le32_to_cpu(tx_resp->rate_n_flags),
-                                       info);
-
-               IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
-                                  "rate_n_flags 0x%x retries %d\n",
-                                  txq_id,
-                                  iwl_get_tx_fail_reason(status), status,
-                                  le32_to_cpu(tx_resp->rate_n_flags),
-                                  tx_resp->failure_frame);
-
-               freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
-               if (qc && likely(sta_id != IWL_INVALID_STATION))
-                       iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-               else if (sta_id == IWL_INVALID_STATION)
-                       IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
-               if (priv->mac80211_registered &&
-                   (iwl_queue_space(&txq->q) > txq->q.low_mark))
-                       iwl_wake_queue(priv, txq);
-       }
-       if (qc && likely(sta_id != IWL_INVALID_STATION))
-               iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
-
-       iwl_check_abort_status(priv, tx_resp->frame_count, status);
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
-                            struct iwl_rx_phy_res *rx_resp)
-{
-       /* data from PHY/DSP regarding signal strength, etc.,
-        *   contents are always there, not configurable by host.  */
-       struct iwl4965_rx_non_cfg_phy *ncphy =
-           (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
-       u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
-                       >> IWL49_AGC_DB_POS;
-
-       u32 valid_antennae =
-           (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
-                       >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
-       u8 max_rssi = 0;
-       u32 i;
-
-       /* Find max rssi among 3 possible receivers.
-        * These values are measured by the digital signal processor (DSP).
-        * They should stay fairly constant even as the signal strength varies,
-        *   if the radio's automatic gain control (AGC) is working right.
-        * AGC value (see below) will provide the "interesting" info. */
-       for (i = 0; i < 3; i++)
-               if (valid_antennae & (1 << i))
-                       max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
-       IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
-               ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
-               max_rssi, agc);
-
-       /* dBm = max_rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal. */
-       return max_rssi - agc - IWLAGN_RSSI_OFFSET;
-}
-
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
-       /* Legacy Rx frames */
-       priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
-       /* Tx response */
-       priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
-       INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
-       cancel_work_sync(&priv->txpower_work);
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
-       .rxon_assoc = iwl4965_send_rxon_assoc,
-       .commit_rxon = iwl4965_commit_rxon,
-       .set_rxon_chain = iwlagn_set_rxon_chain,
-       .send_bt_config = iwl_send_bt_config,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       /*
-        * Since setting the RXON may have been deferred while
-        * performing the scan, fire one off if needed
-        */
-       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-               iwlcore_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif = ctx->vif;
-       struct ieee80211_conf *conf = NULL;
-       int ret = 0;
-
-       if (!vif || !priv->is_open)
-               return;
-
-       if (vif->type == NL80211_IFTYPE_AP) {
-               IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
-               return;
-       }
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       iwl_scan_cancel_timeout(priv, 200);
-
-       conf = ieee80211_get_hw_conf(priv->hw);
-
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwlcore_commit_rxon(priv, ctx);
-
-       ret = iwl_send_rxon_timing(priv, ctx);
-       if (ret)
-               IWL_WARN(priv, "RXON timing - "
-                           "Attempting to continue.\n");
-
-       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-       iwl_set_rxon_ht(priv, &priv->current_ht_config);
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
-       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-                       vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
-       if (vif->bss_conf.use_short_preamble)
-               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-               if (vif->bss_conf.use_short_slot)
-                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-       }
-
-       iwlcore_commit_rxon(priv, ctx);
-
-       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-                       vif->bss_conf.aid, ctx->active.bssid_addr);
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               iwlagn_send_beacon_cmd(priv);
-               break;
-       default:
-               IWL_ERR(priv, "%s Should not be called in %d mode\n",
-                         __func__, vif->type);
-               break;
-       }
-
-       /* the chain noise calibration will enabled PM upon completion
-        * If chain noise has already been run, then we need to enable
-        * power management here */
-       if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
-               iwl_power_update_mode(priv, false);
-
-       /* Enable Rx differential gain and sensitivity calibrations */
-       iwl_chain_noise_reset(priv);
-       priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif = ctx->vif;
-       int ret = 0;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       /* The following should be done only at AP bring up */
-       if (!iwl_is_associated_ctx(ctx)) {
-
-               /* RXON - unassoc (to set timing command) */
-               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-               iwlcore_commit_rxon(priv, ctx);
-
-               /* RXON Timing */
-               ret = iwl_send_rxon_timing(priv, ctx);
-               if (ret)
-                       IWL_WARN(priv, "RXON timing failed - "
-                                       "Attempting to continue.\n");
-
-               /* AP has all antennas */
-               priv->chain_noise_data.active_chains =
-                       priv->hw_params.valid_rx_ant;
-               iwl_set_rxon_ht(priv, &priv->current_ht_config);
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-               ctx->staging.assoc_id = 0;
-
-               if (vif->bss_conf.use_short_preamble)
-                       ctx->staging.flags |=
-                               RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &=
-                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-                       if (vif->bss_conf.use_short_slot)
-                               ctx->staging.flags |=
-                                       RXON_FLG_SHORT_SLOT_MSK;
-                       else
-                               ctx->staging.flags &=
-                                       ~RXON_FLG_SHORT_SLOT_MSK;
-               }
-               /* need to send beacon cmd before committing assoc RXON! */
-               iwlagn_send_beacon_cmd(priv);
-               /* restore RXON assoc */
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               iwlcore_commit_rxon(priv, ctx);
-       }
-       iwlagn_send_beacon_cmd(priv);
-
-       /* FIXME - we need to add code here to detect a totally new
-        * configuration, reset the AP, unassoc, rxon timing, assoc,
-        * clear sta table, add BCAST sta... */
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
-       .get_hcmd_size = iwl4965_get_hcmd_size,
-       .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
-       .chain_noise_reset = iwl4965_chain_noise_reset,
-       .gain_computation = iwl4965_gain_computation,
-       .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
-       .calc_rssi = iwl4965_calc_rssi,
-       .request_scan = iwlagn_request_scan,
-       .post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
-       .set_hw_params = iwl4965_hw_set_hw_params,
-       .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
-       .txq_set_sched = iwl4965_txq_set_sched,
-       .txq_agg_enable = iwl4965_txq_agg_enable,
-       .txq_agg_disable = iwl4965_txq_agg_disable,
-       .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl_hw_txq_free_tfd,
-       .txq_init = iwl_hw_tx_queue_init,
-       .rx_handler_setup = iwl4965_rx_handler_setup,
-       .setup_deferred_work = iwl4965_setup_deferred_work,
-       .cancel_deferred_work = iwl4965_cancel_deferred_work,
-       .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
-       .alive_notify = iwl4965_alive_notify,
-       .init_alive_start = iwl4965_init_alive_start,
-       .load_ucode = iwl4965_load_bsm,
-       .dump_nic_event_log = iwl_dump_nic_event_log,
-       .dump_nic_error_log = iwl_dump_nic_error_log,
-       .dump_fh = iwl_dump_fh,
-       .set_channel_switch = iwl4965_hw_channel_switch,
-       .apm_ops = {
-               .init = iwl_apm_init,
-               .config = iwl4965_nic_config,
-       },
-       .eeprom_ops = {
-               .regulatory_bands = {
-                       EEPROM_REGULATORY_BAND_1_CHANNELS,
-                       EEPROM_REGULATORY_BAND_2_CHANNELS,
-                       EEPROM_REGULATORY_BAND_3_CHANNELS,
-                       EEPROM_REGULATORY_BAND_4_CHANNELS,
-                       EEPROM_REGULATORY_BAND_5_CHANNELS,
-                       EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
-                       EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
-               },
-               .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
-               .release_semaphore = iwlcore_eeprom_release_semaphore,
-               .calib_version = iwl4965_eeprom_calib_version,
-               .query_addr = iwlcore_eeprom_query_addr,
-       },
-       .send_tx_power  = iwl4965_send_tx_power,
-       .update_chain_flags = iwl_update_chain_flags,
-       .isr_ops = {
-               .isr = iwl_isr_legacy,
-       },
-       .temp_ops = {
-               .temperature = iwl4965_temperature_calib,
-       },
-       .debugfs_ops = {
-               .rx_stats_read = iwl_ucode_rx_stats_read,
-               .tx_stats_read = iwl_ucode_tx_stats_read,
-               .general_stats_read = iwl_ucode_general_stats_read,
-               .bt_stats_read = iwl_ucode_bt_stats_read,
-               .reply_tx_error = iwl_reply_tx_error_read,
-       },
-       .check_plcp_health = iwl_good_plcp_health,
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
-       .post_associate = iwl4965_post_associate,
-       .config_ap = iwl4965_config_ap,
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
-       .tx = iwlagn_mac_tx,
-       .start = iwlagn_mac_start,
-       .stop = iwlagn_mac_stop,
-       .add_interface = iwl_mac_add_interface,
-       .remove_interface = iwl_mac_remove_interface,
-       .change_interface = iwl_mac_change_interface,
-       .config = iwl_legacy_mac_config,
-       .configure_filter = iwlagn_configure_filter,
-       .set_key = iwlagn_mac_set_key,
-       .update_tkip_key = iwlagn_mac_update_tkip_key,
-       .conf_tx = iwl_mac_conf_tx,
-       .reset_tsf = iwl_legacy_mac_reset_tsf,
-       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
-       .ampdu_action = iwlagn_mac_ampdu_action,
-       .hw_scan = iwl_mac_hw_scan,
-       .sta_add = iwlagn_mac_sta_add,
-       .sta_remove = iwl_mac_sta_remove,
-       .channel_switch = iwlagn_mac_channel_switch,
-       .flush = iwlagn_mac_flush,
-       .tx_last_beacon = iwl_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
-       .lib = &iwl4965_lib,
-       .hcmd = &iwl4965_hcmd,
-       .utils = &iwl4965_hcmd_utils,
-       .led = &iwlagn_led_ops,
-       .legacy = &iwl4965_legacy_ops,
-       .ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
-       .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
-       .num_of_queues = IWL49_NUM_QUEUES,
-       .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
-       .pll_cfg_val = 0,
-       .set_l0s = true,
-       .use_bsm = true,
-       .use_isr_legacy = true,
-       .broken_powersave = true,
-       .led_compensation = 61,
-       .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .wd_timeout = IWL_DEF_WD_TIMEOUT,
-       .temperature_kelvin = true,
-       .max_event_log_size = 512,
-       .tx_power_by_driver = true,
-       .ucode_tracing = true,
-       .sensitivity_calib_by_driver = true,
-       .chain_noise_calib_by_driver = true,
-       .no_agg_framecnt_info = true,
-};
-
-struct iwl_cfg iwl4965_agn_cfg = {
-       .name = "Intel(R) Wireless WiFi Link 4965AGN",
-       .fw_name_pre = IWL4965_FW_PRE,
-       .ucode_api_max = IWL4965_UCODE_API_MAX,
-       .ucode_api_min = IWL4965_UCODE_API_MIN,
-       .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-       .valid_tx_ant = ANT_AB,
-       .valid_rx_ant = ANT_ABC,
-       .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
-       .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
-       .ops = &iwl4965_ops,
-       .mod_params = &iwlagn_mod_params,
-       .base_params = &iwl4965_base_params,
-       .led_mode = IWL_LED_BLINK,
-       /*
-        * Force use of chains B and C for scan RX on 5 GHz band
-        * because the device has off-channel reception on chain A.
-        */
-       .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-
index 537fb8c84e3af9dab42874a99f26bef07a6a8245..3ea31b659d1a5b620318d731c4036648e9ebd2b6 100644 (file)
@@ -402,8 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
@@ -471,8 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
index ef36aff1bb43e71a629c643e29ca9e02a5858adf..a745b01c0ec1b79086bf5179bad471b0ae446bc3 100644 (file)
 #define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
 #define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
 
-#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
-#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
-#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
 
-#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
-#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
-#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
 
 static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 {
@@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
                                CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
 }
 
-static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
+static void iwl6150_additional_nic_config(struct iwl_priv *priv)
 {
        /* Indicate calibration version to uCode. */
        if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -343,8 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
@@ -354,7 +352,7 @@ static struct iwl_lib_ops iwl6000_lib = {
        }
 };
 
-static struct iwl_lib_ops iwl6000g2b_lib = {
+static struct iwl_lib_ops iwl6030_lib = {
        .set_hw_params = iwl6000_hw_set_hw_params,
        .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
        .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -415,8 +413,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
                .bt_stats_read = iwl_ucode_bt_stats_read,
                .reply_tx_error = iwl_reply_tx_error_read,
        },
-       .check_plcp_health = iwl_good_plcp_health,
-       .check_ack_health = iwl_good_ack_health,
        .txfifo_flush = iwlagn_txfifo_flush,
        .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
        .tt_ops = {
@@ -430,8 +426,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
        .additional_nic_config = &iwl6050_additional_nic_config,
 };
 
-static struct iwl_nic_ops iwl6050g2_nic_ops = {
-       .additional_nic_config = &iwl6050g2_additional_nic_config,
+static struct iwl_nic_ops iwl6150_nic_ops = {
+       .additional_nic_config = &iwl6150_additional_nic_config,
 };
 
 static const struct iwl_ops iwl6000_ops = {
@@ -451,17 +447,17 @@ static const struct iwl_ops iwl6050_ops = {
        .ieee80211_ops = &iwlagn_hw_ops,
 };
 
-static const struct iwl_ops iwl6050g2_ops = {
+static const struct iwl_ops iwl6150_ops = {
        .lib = &iwl6000_lib,
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
-       .nic = &iwl6050g2_nic_ops,
+       .nic = &iwl6150_nic_ops,
        .ieee80211_ops = &iwlagn_hw_ops,
 };
 
-static const struct iwl_ops iwl6000g2b_ops = {
-       .lib = &iwl6000g2b_lib,
+static const struct iwl_ops iwl6030_ops = {
+       .lib = &iwl6030_lib,
        .hcmd = &iwlagn_bt_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
@@ -479,7 +475,6 @@ static struct iwl_base_params iwl6000_base_params = {
        .shadow_ram_support = true,
        .led_compensation = 51,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
-       .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -503,7 +498,6 @@ static struct iwl_base_params iwl6050_base_params = {
        .shadow_ram_support = true,
        .led_compensation = 51,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
-       .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -526,7 +520,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
        .shadow_ram_support = true,
        .led_compensation = 57,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
-       .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -555,11 +548,11 @@ static struct iwl_bt_params iwl6000_bt_params = {
 };
 
 #define IWL_DEVICE_6005                                                \
-       .fw_name_pre = IWL6000G2A_FW_PRE,                       \
+       .fw_name_pre = IWL6005_FW_PRE,                  \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
-       .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,             \
-       .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,     \
+       .eeprom_ver = EEPROM_6005_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,       \
        .ops = &iwl6000_ops,                                    \
        .mod_params = &iwlagn_mod_params,                       \
        .base_params = &iwl6000_g2_base_params,                 \
@@ -584,12 +577,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
 };
 
 #define IWL_DEVICE_6030                                                \
-       .fw_name_pre = IWL6000G2B_FW_PRE,                       \
+       .fw_name_pre = IWL6030_FW_PRE,                  \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
-       .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,             \
-       .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,     \
-       .ops = &iwl6000g2b_ops,                                 \
+       .eeprom_ver = EEPROM_6030_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,       \
+       .ops = &iwl6030_ops,                                    \
        .mod_params = &iwlagn_mod_params,                       \
        .base_params = &iwl6000_g2_base_params,                 \
        .bt_params = &iwl6000_bt_params,                        \
@@ -708,9 +701,9 @@ struct iwl_cfg iwl6150_bgn_cfg = {
        .fw_name_pre = IWL6050_FW_PRE,
        .ucode_api_max = IWL6050_UCODE_API_MAX,
        .ucode_api_min = IWL6050_UCODE_API_MIN,
-       .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
-       .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
-       .ops = &iwl6050g2_ops,
+       .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
+       .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
+       .ops = &iwl6150_ops,
        .mod_params = &iwlagn_mod_params,
        .base_params = &iwl6050_base_params,
        .ht_params = &iwl6000_ht_params,
@@ -736,5 +729,5 @@ struct iwl_cfg iwl6000_3agn_cfg = {
 
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
index d16bb5ede01474d9b30631b1ee769890aa257a8f..9006293e740c4110e782f37c424114ad767ddac7 100644 (file)
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
        }
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
                              rx.general.common);
                ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        }
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
                              rx.general.common);
        } else {
@@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
 
        rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
        rxon_chnum = le16_to_cpu(ctx->staging.channel);
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                stat_band24 = !!(((struct iwl_bt_notif_statistics *)
                                 stat_resp)->flag &
                                 STATISTICS_REPLY_FLG_BAND_24G_MSK);
index a6dbd8983dacb32dda5a2cd15ab8cc0f26bfd2f6..b500aaae53ec1bcc0f0e3541e19e31cfa71bd71c 100644 (file)
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
        int p = 0;
        u32 flag;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics)
+       if (iwl_bt_statistics(priv))
                flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
        else
                flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
         * the last statistics notification from uCode
         * might not reflect the current uCode activity
         */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                ofdm = &priv->_agn.statistics_bt.rx.ofdm;
                cck = &priv->_agn.statistics_bt.rx.cck;
                general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
          * the last statistics notification from uCode
          * might not reflect the current uCode activity
          */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                tx = &priv->_agn.statistics_bt.tx;
                accum_tx = &priv->_agn.accum_statistics_bt.tx;
                delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
          * the last statistics notification from uCode
          * might not reflect the current uCode activity
          */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
+       if (iwl_bt_statistics(priv)) {
                general = &priv->_agn.statistics_bt.general.common;
                dbg = &priv->_agn.statistics_bt.general.common.dbg;
                div = &priv->_agn.statistics_bt.general.common.div;
index 366340f3fb0f076a2af46ae4a1c50325185803ba..41543ad4cb8499ba5a365bf54a1c6cd1a47d1f92 100644 (file)
@@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
        cmd.slots[0].type = 0; /* BSS */
        cmd.slots[1].type = 1; /* PAN */
 
-       if (ctx_bss->vif && ctx_pan->vif) {
+       if (priv->_agn.hw_roc_channel) {
+               /* both contexts must be used for this to happen */
+               slot1 = priv->_agn.hw_roc_duration;
+               slot0 = IWL_MIN_SLOT_TIME;
+       } else if (ctx_bss->vif && ctx_pan->vif) {
                int bcnint = ctx_pan->vif->bss_conf.beacon_int;
                int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
 
@@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
                if (test_bit(STATUS_SCAN_HW, &priv->status) ||
                    (!ctx_bss->vif->bss_conf.idle &&
                     !ctx_bss->vif->bss_conf.assoc)) {
-                       slot0 = dtim * bcnint * 3 - 20;
-                       slot1 = 20;
+                       slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+                       slot1 = IWL_MIN_SLOT_TIME;
                } else if (!ctx_pan->vif->bss_conf.idle &&
                           !ctx_pan->vif->bss_conf.assoc) {
-                       slot1 = bcnint * 3 - 20;
-                       slot0 = 20;
+                       slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+                       slot0 = IWL_MIN_SLOT_TIME;
                }
        } else if (ctx_pan->vif) {
                slot0 = 0;
@@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
                slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
 
                if (test_bit(STATUS_SCAN_HW, &priv->status)) {
-                       slot0 = slot1 * 3 - 20;
-                       slot1 = 20;
+                       slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+                       slot1 = IWL_MIN_SLOT_TIME;
                }
        }
 
index 1a24946bc2030fade543ab154558295fd9c50de4..c1190d96561441f88d281f1f7be446041f96a7a2 100644 (file)
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
 }
 
 /* Set led register off */
-static int iwl_led_on_reg(struct iwl_priv *priv)
+void iwlagn_led_enable(struct iwl_priv *priv)
 {
-       IWL_DEBUG_LED(priv, "led on\n");
        iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-       return 0;
-}
-
-/* Set led register off */
-static int iwl_led_off_reg(struct iwl_priv *priv)
-{
-       IWL_DEBUG_LED(priv, "LED Reg off\n");
-       iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
-       return 0;
 }
 
 const struct iwl_led_ops iwlagn_led_ops = {
        .cmd = iwl_send_led_cmd,
-       .on = iwl_led_on_reg,
-       .off = iwl_led_off_reg,
 };
index a594e4fdc6b802ecd53830bbe76781afd8261b83..96f323dc5dd66c73247f3889872e07c216bf9c48 100644 (file)
@@ -28,5 +28,6 @@
 #define __iwl_agn_led_h__
 
 extern const struct iwl_led_ops iwlagn_led_ops;
+void iwlagn_led_enable(struct iwl_priv *priv);
 
 #endif /* __iwl_agn_led_h__ */
index 3dee87e8f55dbdf1f09633f81cc54e8280a61dd0..fd142bee91896ca1ba2a121193c4183f83b89d72 100644 (file)
@@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
        priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
                                        iwlagn_rx_calib_complete;
        priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+       /* set up notification wait support */
+       spin_lock_init(&priv->_agn.notif_wait_lock);
+       INIT_LIST_HEAD(&priv->_agn.notif_waits);
+       init_waitqueue_head(&priv->_agn.notif_waitq);
 }
 
 void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -604,6 +609,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
 struct iwl_mod_params iwlagn_mod_params = {
        .amsdu_size_8K = 1,
        .restart_fw = 1,
+       .plcp_check = true,
        /* the rest are 0 by default */
 };
 
@@ -1157,17 +1163,18 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        /* rx_status carries information about the packet to mac80211 */
        rx_status.mactime = le64_to_cpu(phy_res->timestamp);
-       rx_status.freq =
-               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
        rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
                                IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+                                              rx_status.band);
        rx_status.rate_idx =
                iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
        rx_status.flag = 0;
 
        /* TSF isn't reliable. In order to allow smooth user experience,
         * this W/A doesn't propagate it to the mac80211 */
-       /*rx_status.flag |= RX_FLAG_TSFT;*/
+       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
 
        priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
 
@@ -1389,15 +1396,12 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                u32 extra;
                u32 suspend_time = 100;
                u32 scan_suspend_time = 100;
-               unsigned long flags;
 
                IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-               spin_lock_irqsave(&priv->lock, flags);
                if (priv->is_internal_short_scan)
                        interval = 0;
                else
                        interval = vif->bss_conf.beacon_int;
-               spin_unlock_irqrestore(&priv->lock, flags);
 
                scan->suspend_time = 0;
                scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1801,26 +1805,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = {
 
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
 {
-       struct iwlagn_bt_cmd bt_cmd = {
+       struct iwl_basic_bt_cmd basic = {
                .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
                .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
                .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
                .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
        };
+       struct iwl6000_bt_cmd bt_cmd_6000;
+       struct iwl2000_bt_cmd bt_cmd_2000;
+       int ret;
 
        BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
-                       sizeof(bt_cmd.bt3_lookup_table));
-
-       if (priv->cfg->bt_params)
-               bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost;
-       else
-               bt_cmd.prio_boost = 0;
-       bt_cmd.kill_ack_mask = priv->kill_ack_mask;
-       bt_cmd.kill_cts_mask = priv->kill_cts_mask;
+                       sizeof(basic.bt3_lookup_table));
+
+       if (priv->cfg->bt_params) {
+               if (priv->cfg->bt_params->bt_session_2) {
+                       bt_cmd_2000.prio_boost = cpu_to_le32(
+                               priv->cfg->bt_params->bt_prio_boost);
+                       bt_cmd_2000.tx_prio_boost = 0;
+                       bt_cmd_2000.rx_prio_boost = 0;
+               } else {
+                       bt_cmd_6000.prio_boost =
+                               priv->cfg->bt_params->bt_prio_boost;
+                       bt_cmd_6000.tx_prio_boost = 0;
+                       bt_cmd_6000.rx_prio_boost = 0;
+               }
+       } else {
+               IWL_ERR(priv, "failed to construct BT Coex Config\n");
+               return;
+       }
 
-       bt_cmd.valid = priv->bt_valid;
-       bt_cmd.tx_prio_boost = 0;
-       bt_cmd.rx_prio_boost = 0;
+       basic.kill_ack_mask = priv->kill_ack_mask;
+       basic.kill_cts_mask = priv->kill_cts_mask;
+       basic.valid = priv->bt_valid;
 
        /*
         * Configure BT coex mode to "no coexistence" when the
@@ -1829,49 +1846,45 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
         * IBSS mode (no proper uCode support for coex then).
         */
        if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
-               bt_cmd.flags = 0;
+               basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
        } else {
-               bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
+               basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
                                        IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
                if (priv->cfg->bt_params &&
                    priv->cfg->bt_params->bt_sco_disable)
-                       bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+                       basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
 
                if (priv->bt_ch_announce)
-                       bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
-               IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
+                       basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
+               IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
        }
-       priv->bt_enable_flag = bt_cmd.flags;
+       priv->bt_enable_flag = basic.flags;
        if (priv->bt_full_concurrent)
-               memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
+               memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
                        sizeof(iwlagn_concurrent_lookup));
        else
-               memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
+               memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
                        sizeof(iwlagn_def_3w_lookup));
 
        IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
-                      bt_cmd.flags ? "active" : "disabled",
+                      basic.flags ? "active" : "disabled",
                       priv->bt_full_concurrent ?
                       "full concurrency" : "3-wire");
 
-       if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
+       if (priv->cfg->bt_params->bt_session_2) {
+               memcpy(&bt_cmd_2000.basic, &basic,
+                       sizeof(basic));
+               ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+                       sizeof(bt_cmd_2000), &bt_cmd_2000);
+       } else {
+               memcpy(&bt_cmd_6000.basic, &basic,
+                       sizeof(basic));
+               ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+                       sizeof(bt_cmd_6000), &bt_cmd_6000);
+       }
+       if (ret)
                IWL_ERR(priv, "failed to send BT Coex Config\n");
 
-       /*
-        * When we are doing a restart, need to also reconfigure BT
-        * SCO to the device. If not doing a restart, bt_sco_active
-        * will always be false, so there's no need to have an extra
-        * variable to check for it.
-        */
-       if (priv->bt_sco_active) {
-               struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
-
-               if (priv->bt_sco_active)
-                       sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
-               if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
-                                    sizeof(sco_cmd), &sco_cmd))
-                       IWL_ERR(priv, "failed to send BT SCO command\n");
-       }
 }
 
 static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -1881,6 +1894,11 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
        struct iwl_rxon_context *ctx;
        int smps_request = -1;
 
+       if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+               /* bt coex disabled */
+               return;
+       }
+
        /*
         * Note: bt_traffic_load can be overridden by scan complete and
         * coex profile notifications. Ignore that since only bad consequence
@@ -1991,12 +2009,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
                (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
                        BT_UART_MSG_FRAME6DISCOVERABLE_POS);
 
-       IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
-                       "0x%X, Connectable = 0x%X",
+       IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
+                       "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
                (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
                        BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
-               (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
-                       BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
+               (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
+                       BT_UART_MSG_FRAME7PAGE_POS,
+               (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
+                       BT_UART_MSG_FRAME7INQUIRY_POS,
                (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
                        BT_UART_MSG_FRAME7CONNECTABLE_POS);
 }
@@ -2032,9 +2052,13 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
        unsigned long flags;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
-       struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
        struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
 
+       if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+               /* bt coex disabled */
+               return;
+       }
+
        IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
        IWL_DEBUG_NOTIF(priv, "    status: %d\n", coex->bt_status);
        IWL_DEBUG_NOTIF(priv, "    traffic load: %d\n", coex->bt_traffic_load);
@@ -2063,15 +2087,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
                        queue_work(priv->workqueue,
                                   &priv->bt_traffic_change_work);
                }
-               if (priv->bt_sco_active !=
-                   (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
-                       priv->bt_sco_active = uart_msg->frame3 &
-                               BT_UART_MSG_FRAME3SCOESCO_MSK;
-                       if (priv->bt_sco_active)
-                               sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
-                       iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
-                                      sizeof(sco_cmd), &sco_cmd, NULL);
-               }
        }
 
        iwlagn_set_kill_msk(priv, uart_msg);
@@ -2389,3 +2404,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
        }
        return 0;
 }
+
+/* notification wait support */
+void iwlagn_init_notification_wait(struct iwl_priv *priv,
+                                  struct iwl_notification_wait *wait_entry,
+                                  void (*fn)(struct iwl_priv *priv,
+                                             struct iwl_rx_packet *pkt),
+                                  u8 cmd)
+{
+       wait_entry->fn = fn;
+       wait_entry->cmd = cmd;
+       wait_entry->triggered = false;
+
+       spin_lock_bh(&priv->_agn.notif_wait_lock);
+       list_add(&wait_entry->list, &priv->_agn.notif_waits);
+       spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
+
+signed long iwlagn_wait_notification(struct iwl_priv *priv,
+                                    struct iwl_notification_wait *wait_entry,
+                                    unsigned long timeout)
+{
+       int ret;
+
+       ret = wait_event_timeout(priv->_agn.notif_waitq,
+                                &wait_entry->triggered,
+                                timeout);
+
+       spin_lock_bh(&priv->_agn.notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&priv->_agn.notif_wait_lock);
+
+       return ret;
+}
+
+void iwlagn_remove_notification(struct iwl_priv *priv,
+                               struct iwl_notification_wait *wait_entry)
+{
+       spin_lock_bh(&priv->_agn.notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
index 75fcd30a7c13fbbd22e7a276f8e612c096a2a53b..d03b4734c8921cdc20c4ef67c820ab24d9f7fa3b 100644 (file)
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
 };
 
 static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
-       {0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
-       {0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
-       {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+       {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
+       {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
+       {0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
+       {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
 };
 
 static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
        {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
-       {0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
-       {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+       {0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+       {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
 };
 
 static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
-       {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
-       {0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
-       {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+       {0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+       {0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+       {0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+       {0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
 };
 
 static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
        {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
-       {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
-       {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+       {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+       {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
 };
 
 static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
        u8 ant_toggle_cnt = 0;
        u8 use_ht_possible = 1;
        u8 valid_tx_ant = 0;
+       struct iwl_station_priv *sta_priv =
+               container_of(lq_sta, struct iwl_station_priv, lq_sta);
        struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
 
        /* Override starting rate (index 0) if needed for debug purposes */
@@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
                repeat_rate--;
        }
 
-       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_params.agg_frame_cnt_limit =
+               sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
        lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
 
        lq_cmd->agg_params.agg_time_limit =
index 75e50d33ecb3a14a9d304b0ffe0342dc0d81a81d..184828c72b311099ec8c56e8a8a132644b421d3f 100644 (file)
@@ -213,6 +213,7 @@ enum {
         IWL_CCK_BASIC_RATES_MASK)
 
 #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
 
 #define IWL_INVALID_VALUE    -1
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
deleted file mode 100644 (file)
index bbd40b7..0000000
+++ /dev/null
@@ -1,356 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-agn-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
-
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_missed_beacon_notif *missed_beacon;
-
-       missed_beacon = &pkt->u.missed_beacon;
-       if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
-           priv->missed_beacon_threshold) {
-               IWL_DEBUG_CALIB(priv,
-                   "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
-                   le32_to_cpu(missed_beacon->consecutive_missed_beacons),
-                   le32_to_cpu(missed_beacon->total_missed_becons),
-                   le32_to_cpu(missed_beacon->num_recvd_beacons),
-                   le32_to_cpu(missed_beacon->num_expected_beacons));
-               if (!test_bit(STATUS_SCANNING, &priv->status))
-                       iwl_init_sensitivity(priv);
-       }
-}
-
-/* Calculate noise level, based on measurements during network silence just
- *   before arriving beacon.  This measurement can be done only if we know
- *   exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
-{
-       struct statistics_rx_non_phy *rx_info;
-       int num_active_rx = 0;
-       int total_silence = 0;
-       int bcn_silence_a, bcn_silence_b, bcn_silence_c;
-       int last_rx_noise;
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics)
-               rx_info = &(priv->_agn.statistics_bt.rx.general.common);
-       else
-               rx_info = &(priv->_agn.statistics.rx.general);
-       bcn_silence_a =
-               le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
-       bcn_silence_b =
-               le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
-       bcn_silence_c =
-               le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
-       if (bcn_silence_a) {
-               total_silence += bcn_silence_a;
-               num_active_rx++;
-       }
-       if (bcn_silence_b) {
-               total_silence += bcn_silence_b;
-               num_active_rx++;
-       }
-       if (bcn_silence_c) {
-               total_silence += bcn_silence_c;
-               num_active_rx++;
-       }
-
-       /* Average among active antennas */
-       if (num_active_rx)
-               last_rx_noise = (total_silence / num_active_rx) - 107;
-       else
-               last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-       IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
-                       bcn_silence_a, bcn_silence_b, bcn_silence_c,
-                       last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- *  based on the assumption of all statistics counter are in DWORD
- *  FIXME: This function is for debugging, do not deal with
- *  the case of counters roll-over.
- */
-static void iwl_accumulative_statistics(struct iwl_priv *priv,
-                                       __le32 *stats)
-{
-       int i, size;
-       __le32 *prev_stats;
-       u32 *accum_stats;
-       u32 *delta, *max_delta;
-       struct statistics_general_common *general, *accum_general;
-       struct statistics_tx *tx, *accum_tx;
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
-               prev_stats = (__le32 *)&priv->_agn.statistics_bt;
-               accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
-               size = sizeof(struct iwl_bt_notif_statistics);
-               general = &priv->_agn.statistics_bt.general.common;
-               accum_general = &priv->_agn.accum_statistics_bt.general.common;
-               tx = &priv->_agn.statistics_bt.tx;
-               accum_tx = &priv->_agn.accum_statistics_bt.tx;
-               delta = (u32 *)&priv->_agn.delta_statistics_bt;
-               max_delta = (u32 *)&priv->_agn.max_delta_bt;
-       } else {
-               prev_stats = (__le32 *)&priv->_agn.statistics;
-               accum_stats = (u32 *)&priv->_agn.accum_statistics;
-               size = sizeof(struct iwl_notif_statistics);
-               general = &priv->_agn.statistics.general.common;
-               accum_general = &priv->_agn.accum_statistics.general.common;
-               tx = &priv->_agn.statistics.tx;
-               accum_tx = &priv->_agn.accum_statistics.tx;
-               delta = (u32 *)&priv->_agn.delta_statistics;
-               max_delta = (u32 *)&priv->_agn.max_delta;
-       }
-       for (i = sizeof(__le32); i < size;
-            i += sizeof(__le32), stats++, prev_stats++, delta++,
-            max_delta++, accum_stats++) {
-               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-                       *delta = (le32_to_cpu(*stats) -
-                               le32_to_cpu(*prev_stats));
-                       *accum_stats += *delta;
-                       if (*delta > *max_delta)
-                               *max_delta = *delta;
-               }
-       }
-
-       /* reset accumulative statistics for "no-counter" type statistics */
-       accum_general->temperature = general->temperature;
-       accum_general->temperature_m = general->temperature_m;
-       accum_general->ttl_timestamp = general->ttl_timestamp;
-       accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
-       accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
-       accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-bool iwl_good_plcp_health(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
-{
-       bool rc = true;
-       int combined_plcp_delta;
-       unsigned int plcp_msec;
-       unsigned long plcp_received_jiffies;
-
-       if (priv->cfg->base_params->plcp_delta_threshold ==
-           IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
-               IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
-               return rc;
-       }
-
-       /*
-        * check for plcp_err and trigger radio reset if it exceeds
-        * the plcp error threshold plcp_delta.
-        */
-       plcp_received_jiffies = jiffies;
-       plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-                                       (long) priv->plcp_jiffies);
-       priv->plcp_jiffies = plcp_received_jiffies;
-       /*
-        * check to make sure plcp_msec is not 0 to prevent division
-        * by zero.
-        */
-       if (plcp_msec) {
-               struct statistics_rx_phy *ofdm;
-               struct statistics_rx_ht_phy *ofdm_ht;
-
-               if (priv->cfg->bt_params &&
-                   priv->cfg->bt_params->bt_statistics) {
-                       ofdm = &pkt->u.stats_bt.rx.ofdm;
-                       ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
-                       combined_plcp_delta =
-                          (le32_to_cpu(ofdm->plcp_err) -
-                          le32_to_cpu(priv->_agn.statistics_bt.
-                                      rx.ofdm.plcp_err)) +
-                          (le32_to_cpu(ofdm_ht->plcp_err) -
-                          le32_to_cpu(priv->_agn.statistics_bt.
-                                      rx.ofdm_ht.plcp_err));
-               } else {
-                       ofdm = &pkt->u.stats.rx.ofdm;
-                       ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
-                       combined_plcp_delta =
-                           (le32_to_cpu(ofdm->plcp_err) -
-                           le32_to_cpu(priv->_agn.statistics.
-                                       rx.ofdm.plcp_err)) +
-                           (le32_to_cpu(ofdm_ht->plcp_err) -
-                           le32_to_cpu(priv->_agn.statistics.
-                                       rx.ofdm_ht.plcp_err));
-               }
-
-               if ((combined_plcp_delta > 0) &&
-                   ((combined_plcp_delta * 100) / plcp_msec) >
-                       priv->cfg->base_params->plcp_delta_threshold) {
-                       /*
-                        * if plcp_err exceed the threshold,
-                        * the following data is printed in csv format:
-                        *    Text: plcp_err exceeded %d,
-                        *    Received ofdm.plcp_err,
-                        *    Current ofdm.plcp_err,
-                        *    Received ofdm_ht.plcp_err,
-                        *    Current ofdm_ht.plcp_err,
-                        *    combined_plcp_delta,
-                        *    plcp_msec
-                        */
-                       IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-                               "%u, %u, %u, %u, %d, %u mSecs\n",
-                               priv->cfg->base_params->plcp_delta_threshold,
-                               le32_to_cpu(ofdm->plcp_err),
-                               le32_to_cpu(ofdm->plcp_err),
-                               le32_to_cpu(ofdm_ht->plcp_err),
-                               le32_to_cpu(ofdm_ht->plcp_err),
-                               combined_plcp_delta, plcp_msec);
-
-                       rc = false;
-               }
-       }
-       return rc;
-}
-
-void iwl_rx_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-       int change;
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics) {
-               IWL_DEBUG_RX(priv,
-                            "Statistics notification received (%d vs %d).\n",
-                            (int)sizeof(struct iwl_bt_notif_statistics),
-                            le32_to_cpu(pkt->len_n_flags) &
-                            FH_RSCSR_FRAME_SIZE_MSK);
-
-               change = ((priv->_agn.statistics_bt.general.common.temperature !=
-                          pkt->u.stats_bt.general.common.temperature) ||
-                          ((priv->_agn.statistics_bt.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-                          (pkt->u.stats_bt.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
-#endif
-
-       } else {
-               IWL_DEBUG_RX(priv,
-                            "Statistics notification received (%d vs %d).\n",
-                            (int)sizeof(struct iwl_notif_statistics),
-                            le32_to_cpu(pkt->len_n_flags) &
-                            FH_RSCSR_FRAME_SIZE_MSK);
-
-               change = ((priv->_agn.statistics.general.common.temperature !=
-                          pkt->u.stats.general.common.temperature) ||
-                          ((priv->_agn.statistics.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-                          (pkt->u.stats.flag &
-                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
-       }
-
-       iwl_recover_from_statistics(priv, pkt);
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->bt_statistics)
-               memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
-                       sizeof(priv->_agn.statistics_bt));
-       else
-               memcpy(&priv->_agn.statistics, &pkt->u.stats,
-                       sizeof(priv->_agn.statistics));
-
-       set_bit(STATUS_STATISTICS, &priv->status);
-
-       /* Reschedule the statistics timer to occur in
-        * REG_RECALIB_PERIOD seconds to ensure we get a
-        * thermal update even if the uCode doesn't give
-        * us one */
-       mod_timer(&priv->statistics_periodic, jiffies +
-                 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
-       if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-           (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
-               iwl_rx_calc_noise(priv);
-               queue_work(priv->workqueue, &priv->run_time_calib_work);
-       }
-       if (priv->cfg->ops->lib->temp_ops.temperature && change)
-               priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl_reply_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               memset(&priv->_agn.accum_statistics, 0,
-                       sizeof(struct iwl_notif_statistics));
-               memset(&priv->_agn.delta_statistics, 0,
-                       sizeof(struct iwl_notif_statistics));
-               memset(&priv->_agn.max_delta, 0,
-                       sizeof(struct iwl_notif_statistics));
-               memset(&priv->_agn.accum_statistics_bt, 0,
-                       sizeof(struct iwl_bt_notif_statistics));
-               memset(&priv->_agn.delta_statistics_bt, 0,
-                       sizeof(struct iwl_bt_notif_statistics));
-               memset(&priv->_agn.max_delta_bt, 0,
-                       sizeof(struct iwl_bt_notif_statistics));
-#endif
-               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-       }
-       iwl_rx_statistics(priv, rxb);
-}
index 6d140bd5329187ed7a2d8a9b713933c00e835e80..dfdbea6e8f99530f05ce3c7fec062f2a6ff1a381 100644 (file)
@@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
                              struct iwl_rxon_context *ctx,
                              struct iwl_rxon_cmd *send)
 {
+       struct iwl_notification_wait disable_wait;
        __le32 old_filter = send->filter_flags;
        u8 old_dev_type = send->dev_type;
        int ret;
 
+       iwlagn_init_notification_wait(priv, &disable_wait, NULL,
+                                     REPLY_WIPAN_DEACTIVATION_COMPLETE);
+
        send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
        send->dev_type = RXON_DEV_TYPE_P2P;
        ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
@@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        send->filter_flags = old_filter;
        send->dev_type = old_dev_type;
 
-       if (ret)
+       if (ret) {
                IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-
-       /* FIXME: WAIT FOR PAN DISABLE */
-       msleep(300);
+               iwlagn_remove_notification(priv, &disable_wait);
+       } else {
+               signed long wait_res;
+
+               wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
+               if (wait_res == 0) {
+                       IWL_ERR(priv, "Timed out waiting for PAN disable\n");
+                       ret = -EIO;
+               }
+       }
 
        return ret;
 }
@@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* always get timestamp with Rx frame */
        ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
 
+       if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
+               struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
+
+               iwl_set_rxon_channel(priv, chan, ctx);
+               iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+               ctx->staging.filter_flags |=
+                       RXON_FILTER_ASSOC_MSK |
+                       RXON_FILTER_PROMISC_MSK |
+                       RXON_FILTER_CTL2HOST_MSK;
+               ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+               new_assoc = true;
+
+               if (memcmp(&ctx->staging, &ctx->active,
+                          sizeof(ctx->staging)) == 0)
+                       return 0;
+       }
+
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
         * If we issue a new RXON command which required a tune then we must
         * send a new TXPOWER command or we won't be able to Tx any frames.
         *
-        * FIXME: which RXON requires a tune? Can we optimise this out in
-        *        some cases?
+        * It's expected we set power here if channel is changing.
         */
-       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+       ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
        if (ret) {
                IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
                return ret;
@@ -444,6 +471,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
        struct iwl_rxon_context *tmp;
        struct ieee80211_sta *sta;
        struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct ieee80211_sta_ht_cap *ht_cap;
        bool need_multiple;
 
        lockdep_assert_held(&priv->mutex);
@@ -452,23 +480,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
        case NL80211_IFTYPE_STATION:
                rcu_read_lock();
                sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (sta) {
-                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-                       int maxstreams;
-
-                       maxstreams = (ht_cap->mcs.tx_params &
-                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-                       maxstreams += 1;
-
-                       need_multiple = true;
-
-                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
-                           (ht_cap->mcs.rx_mask[2] == 0))
-                               need_multiple = false;
-                       if (maxstreams <= 1)
-                               need_multiple = false;
-               } else {
+               if (!sta) {
                        /*
                         * If at all, this can only happen through a race
                         * when the AP disconnects us while we're still
@@ -476,7 +488,46 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
                         * will soon tell us about that.
                         */
                        need_multiple = false;
+                       rcu_read_unlock();
+                       break;
+               }
+
+               ht_cap = &sta->ht_cap;
+
+               need_multiple = true;
+
+               /*
+                * If the peer advertises no support for receiving 2 and 3
+                * stream MCS rates, it can't be transmitting them either.
+                */
+               if (ht_cap->mcs.rx_mask[1] == 0 &&
+                   ht_cap->mcs.rx_mask[2] == 0) {
+                       need_multiple = false;
+               } else if (!(ht_cap->mcs.tx_params &
+                                               IEEE80211_HT_MCS_TX_DEFINED)) {
+                       /* If it can't TX MCS at all ... */
+                       need_multiple = false;
+               } else if (ht_cap->mcs.tx_params &
+                                               IEEE80211_HT_MCS_TX_RX_DIFF) {
+                       int maxstreams;
+
+                       /*
+                        * But if it can receive them, it might still not
+                        * be able to transmit them, which is what we need
+                        * to check here -- so check the number of streams
+                        * it advertises for TX (if different from RX).
+                        */
+
+                       maxstreams = (ht_cap->mcs.tx_params &
+                                IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
+                       maxstreams >>=
+                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       if (maxstreams <= 1)
+                               need_multiple = false;
                }
+
                rcu_read_unlock();
                break;
        case NL80211_IFTYPE_ADHOC:
@@ -546,12 +597,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changes & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
-                       iwl_led_associate(priv);
                        priv->timestamp = bss_conf->timestamp;
                        ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
                } else {
                        ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-                       iwl_led_disassociate(priv);
                }
        }
 
index 24a11b8f73bc1f707de2a5b528e61d7c7b4c6b82..a709d05c5868f3e6a19d586bcdffd560cb69bf6f 100644 (file)
@@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        unsigned long flags;
        bool is_agg = false;
 
-       if (info->control.vif)
+       /*
+        * If the frame needs to go out off-channel, then
+        * we'll have put the PAN context to that channel,
+        * so make the frame go out there.
+        */
+       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+               ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+       else if (info->control.vif)
                ctx = iwl_rxon_ctx_from_vif(info->control.vif);
 
        spin_lock_irqsave(&priv->lock, flags);
@@ -940,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
  */
 void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
 {
-       int ch;
+       int ch, txq_id;
        unsigned long flags;
 
        /* Turn off all Tx DMA fifos */
@@ -959,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
                            iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
        }
        spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!priv->txq)
+               return;
+
+       /* Unmap DMA from host system and free skb's */
+       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+               if (txq_id == priv->cmd_queue)
+                       iwl_cmd_queue_unmap(priv);
+               else
+                       iwl_tx_queue_unmap(priv, txq_id);
 }
 
 /*
index 24dabcd2a36c6af8e4bb29f88ee395e5ebd94445..d807e5e2b7180056639fd0b012eb775865568b17 100644 (file)
@@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
 {
        int ret = 0;
 
-       /* Check alive response for "valid" sign from uCode */
-       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-               goto restart;
-       }
-
        /* initialize uCode was loaded... verify inst image.
         * This is a paranoid check, because we would not have gotten the
         * "initialize" alive if code weren't properly loaded.  */
index c1cfd9952e520a332667f9eda3e7fa1920f46aa8..f189bbe78fa677cf31b4eab3b6e6f760e08272b3 100644 (file)
@@ -59,6 +59,7 @@
 #include "iwl-sta.h"
 #include "iwl-agn-calib.h"
 #include "iwl-agn.h"
+#include "iwl-agn-led.h"
 
 
 /******************************************************************************
@@ -85,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_VERSION(DRV_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
 
 static int iwlagn_ant_coupling;
 static bool iwlagn_bt_ch_announce = 1;
@@ -461,8 +461,21 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
        if (palive->is_valid == UCODE_VALID_OK)
                queue_delayed_work(priv->workqueue, pwork,
                                   msecs_to_jiffies(5));
-       else
-               IWL_WARN(priv, "uCode did not respond OK.\n");
+       else {
+               IWL_WARN(priv, "%s uCode did not respond OK.\n",
+                       (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
+                       "init" : "runtime");
+               /*
+                * If fail to load init uCode,
+                * let's try to load the init uCode again.
+                * We should not get into this situation, but if it
+                * does happen, we should not move on and loading "runtime"
+                * without proper calibrate the device.
+                */
+               if (palive->ver_subtype == INITIALIZE_SUBTYPE)
+                       priv->ucode_type = UCODE_NONE;
+               queue_work(priv->workqueue, &priv->restart);
+       }
 }
 
 static void iwl_bg_beacon_update(struct work_struct *work)
@@ -699,18 +712,18 @@ static void iwl_bg_ucode_trace(unsigned long data)
        }
 }
 
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+                                  struct iwl_rx_mem_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl4965_beacon_notif *beacon =
-               (struct iwl4965_beacon_notif *)pkt->u.raw;
+       struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
 #ifdef CONFIG_IWLWIFI_DEBUG
+       u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
        u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
 
-       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-               "tsf %d %d rate %d\n",
-               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+       IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+               "tsf:0x%.8x%.8x rate:%d\n",
+               status & TX_STATUS_MSK,
                beacon->beacon_notify_hdr.failure_frame,
                le32_to_cpu(beacon->ibss_mgr_status),
                le32_to_cpu(beacon->high_tsf),
@@ -813,7 +826,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
        priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
        priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
            iwl_rx_pm_debug_statistics_notif;
-       priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
+       priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
 
        /*
         * The same handler is used for both the REPLY to a discrete
@@ -846,7 +859,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
  * the appropriate handlers, including command responses,
  * frame-received notifications, and other notifications.
  */
-void iwl_rx_handle(struct iwl_priv *priv)
+static void iwl_rx_handle(struct iwl_priv *priv)
 {
        struct iwl_rx_mem_buffer *rxb;
        struct iwl_rx_packet *pkt;
@@ -910,6 +923,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
                        (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
                        (pkt->hdr.cmd != REPLY_TX);
 
+               /*
+                * Do the notification wait before RX handlers so
+                * even if the RX handler consumes the RXB we have
+                * access to it in the notification wait entry.
+                */
+               if (!list_empty(&priv->_agn.notif_waits)) {
+                       struct iwl_notification_wait *w;
+
+                       spin_lock(&priv->_agn.notif_wait_lock);
+                       list_for_each_entry(w, &priv->_agn.notif_waits, list) {
+                               if (w->cmd == pkt->hdr.cmd) {
+                                       w->triggered = true;
+                                       if (w->fn)
+                                               w->fn(priv, pkt);
+                               }
+                       }
+                       spin_unlock(&priv->_agn.notif_wait_lock);
+
+                       wake_up_all(&priv->_agn.notif_waitq);
+               }
+
                /* Based on type of command response or notification,
                 *   handle those that need handling via function in
                 *   rx_handlers table.  See iwl_setup_rx_handlers() */
@@ -1379,66 +1413,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
                iwl_enable_rfkill_int(priv);
 }
 
-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
-#define ACK_CNT_RATIO (50)
-#define BA_TIMEOUT_CNT (5)
-#define BA_TIMEOUT_MAX (16)
-
-/**
- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
- *
- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
- * operation state.
- */
-bool iwl_good_ack_health(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
-{
-       bool rc = true;
-       int actual_ack_cnt_delta, expected_ack_cnt_delta;
-       int ba_timeout_delta;
-
-       actual_ack_cnt_delta =
-               le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
-               le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
-       expected_ack_cnt_delta =
-               le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
-               le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
-       ba_timeout_delta =
-               le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
-               le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
-       if ((priv->_agn.agg_tids_count > 0) &&
-           (expected_ack_cnt_delta > 0) &&
-           (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
-               < ACK_CNT_RATIO) &&
-           (ba_timeout_delta > BA_TIMEOUT_CNT)) {
-               IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
-                               " expected_ack_cnt = %d\n",
-                               actual_ack_cnt_delta, expected_ack_cnt_delta);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               /*
-                * This is ifdef'ed on DEBUGFS because otherwise the
-                * statistics aren't available. If DEBUGFS is set but
-                * DEBUG is not, these will just compile out.
-                */
-               IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
-                               priv->_agn.delta_statistics.tx.rx_detected_cnt);
-               IWL_DEBUG_RADIO(priv,
-                               "ack_or_ba_timeout_collision delta = %d\n",
-                               priv->_agn.delta_statistics.tx.
-                               ack_or_ba_timeout_collision);
-#endif
-               IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
-                               ba_timeout_delta);
-               if (!actual_ack_cnt_delta &&
-                   (ba_timeout_delta >= BA_TIMEOUT_MAX))
-                       rc = false;
-       }
-       return rc;
-}
-
-
 /*****************************************************************************
  *
  * sysfs attributes
@@ -2632,13 +2606,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
 
        IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
 
-       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Alive failed.\n");
-               goto restart;
-       }
-
        /* Initialize uCode has loaded Runtime uCode ... verify inst image.
         * This is a paranoid check, because we would not have gotten the
         * "runtime" alive if code weren't properly loaded.  */
@@ -2710,9 +2677,11 @@ static void iwl_alive_start(struct iwl_priv *priv)
                        priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
        }
 
-       if (priv->cfg->bt_params &&
-           !priv->cfg->bt_params->advanced_bt_coexist) {
-               /* Configure Bluetooth device coexistence support */
+       if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
+           !priv->cfg->bt_params->advanced_bt_coexist)) {
+               /*
+                * default is 2-wire BT coexexistence support
+                */
                priv->cfg->ops->hcmd->send_bt_config(priv);
        }
 
@@ -2726,8 +2695,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
        /* At this point, the NIC is initialized and operational */
        iwl_rf_kill_ct_config(priv);
 
-       iwl_leds_init(priv);
-
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
        wake_up_interruptible(&priv->wait_command_queue);
 
@@ -2769,7 +2736,6 @@ static void __iwl_down(struct iwl_priv *priv)
                         priv->cfg->bt_params->bt_init_traffic_load;
        else
                priv->bt_traffic_load = 0;
-       priv->bt_sco_active = false;
        priv->bt_full_concurrent = false;
        priv->bt_ci_compliance = 0;
 
@@ -3063,8 +3029,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
        }
 
        if (priv->start_calib) {
-               if (priv->cfg->bt_params &&
-                   priv->cfg->bt_params->bt_statistics) {
+               if (iwl_bt_statistics(priv)) {
                        iwl_chain_noise_calibration(priv,
                                        (void *)&priv->_agn.statistics_bt);
                        iwl_sensitivity_calibration(priv,
@@ -3089,7 +3054,7 @@ static void iwl_bg_restart(struct work_struct *data)
 
        if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
                struct iwl_rxon_context *ctx;
-               bool bt_sco, bt_full_concurrent;
+               bool bt_full_concurrent;
                u8 bt_ci_compliance;
                u8 bt_load;
                u8 bt_status;
@@ -3108,7 +3073,6 @@ static void iwl_bg_restart(struct work_struct *data)
                 * re-configure the hw when we reconfigure the BT
                 * command.
                 */
-               bt_sco = priv->bt_sco_active;
                bt_full_concurrent = priv->bt_full_concurrent;
                bt_ci_compliance = priv->bt_ci_compliance;
                bt_load = priv->bt_traffic_load;
@@ -3116,7 +3080,6 @@ static void iwl_bg_restart(struct work_struct *data)
 
                __iwl_down(priv);
 
-               priv->bt_sco_active = bt_sco;
                priv->bt_full_concurrent = bt_full_concurrent;
                priv->bt_ci_compliance = bt_ci_compliance;
                priv->bt_traffic_load = bt_load;
@@ -3178,6 +3141,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
+       hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
        if (!priv->cfg->base_params->broken_powersave)
                hw->flags |= IEEE80211_HW_SUPPORTS_PS |
                             IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
@@ -3194,8 +3159,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
        }
 
+       hw->wiphy->max_remain_on_channel_duration = 1000;
+
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
+                           WIPHY_FLAG_IBSS_RSN;
 
        /*
         * For now, disable PS by default because it affects
@@ -3219,6 +3187,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
                priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &priv->bands[IEEE80211_BAND_5GHZ];
 
+       iwl_leds_init(priv);
+
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3263,7 +3233,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
                }
        }
 
-       iwl_led_start(priv);
+       iwlagn_led_enable(priv);
 
 out:
        priv->is_open = 1;
@@ -3294,7 +3264,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct iwl_priv *priv = hw->priv;
 
@@ -3307,7 +3277,6 @@ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                dev_kfree_skb_any(skb);
 
        IWL_DEBUG_MACDUMP(priv, "leave\n");
-       return NETDEV_TX_OK;
 }
 
 void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3345,6 +3314,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
+       /*
+        * To support IBSS RSN, don't program group keys in IBSS, the
+        * hardware will then not attempt to decrypt the frames.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
        sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
        if (sta_id == IWL_INVALID_STATION)
                return -EINVAL;
@@ -3399,10 +3376,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size)
 {
        struct iwl_priv *priv = hw->priv;
        int ret = -EINVAL;
+       struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 
        IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
                     sta->addr, tid);
@@ -3457,11 +3436,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                }
                break;
        case IEEE80211_AMPDU_TX_OPERATIONAL:
+               /*
+                * If the limit is 0, then it wasn't initialised yet,
+                * use the default. We can do that since we take the
+                * minimum below, and we don't want to go above our
+                * default due to hardware restrictions.
+                */
+               if (sta_priv->max_agg_bufsize == 0)
+                       sta_priv->max_agg_bufsize =
+                               LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+               /*
+                * Even though in theory the peer could have different
+                * aggregation reorder buffer sizes for different sessions,
+                * our ucode doesn't allow for that and has a global limit
+                * for each station. Therefore, use the minimum of all the
+                * aggregation sessions and our default value.
+                */
+               sta_priv->max_agg_bufsize =
+                       min(sta_priv->max_agg_bufsize, buf_size);
+
                if (priv->cfg->ht_params &&
                    priv->cfg->ht_params->use_rts_for_aggregation) {
-                       struct iwl_station_priv *sta_priv =
-                               (void *) sta->drv_priv;
-
                        /*
                         * switch to RTS/CTS if it is the prefer protection
                         * method for HT traffic
@@ -3469,9 +3465,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
 
                        sta_priv->lq_sta.lq.general_params.flags |=
                                LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
-                       iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
-                                       &sta_priv->lq_sta.lq, CMD_ASYNC, false);
                }
+
+               sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+                       sta_priv->max_agg_bufsize;
+
+               iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+                               &sta_priv->lq_sta.lq, CMD_ASYNC, false);
                ret = 0;
                break;
        }
@@ -3709,6 +3709,95 @@ done:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
+static void iwlagn_disable_roc(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+       struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwl_set_rxon_channel(priv, chan, ctx);
+       iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+
+       priv->_agn.hw_roc_channel = NULL;
+
+       iwlcore_commit_rxon(priv, ctx);
+
+       ctx->is_active = false;
+}
+
+static void iwlagn_bg_roc_done(struct work_struct *work)
+{
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                                            _agn.hw_roc_work.work);
+
+       mutex_lock(&priv->mutex);
+       ieee80211_remain_on_channel_expired(priv->hw);
+       iwlagn_disable_roc(priv);
+       mutex_unlock(&priv->mutex);
+}
+
+static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+                                    struct ieee80211_channel *channel,
+                                    enum nl80211_channel_type channel_type,
+                                    int duration)
+{
+       struct iwl_priv *priv = hw->priv;
+       int err = 0;
+
+       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+               return -EOPNOTSUPP;
+
+       if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+                                       BIT(NL80211_IFTYPE_P2P_CLIENT)))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&priv->mutex);
+
+       if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
+           test_bit(STATUS_SCAN_HW, &priv->status)) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
+       priv->_agn.hw_roc_channel = channel;
+       priv->_agn.hw_roc_chantype = channel_type;
+       priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
+       iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+       queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
+                          msecs_to_jiffies(duration + 20));
+
+       msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
+       ieee80211_ready_on_channel(priv->hw);
+
+ out:
+       mutex_unlock(&priv->mutex);
+
+       return err;
+}
+
+static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+               return -EOPNOTSUPP;
+
+       cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
+
+       mutex_lock(&priv->mutex);
+       iwlagn_disable_roc(priv);
+       mutex_unlock(&priv->mutex);
+
+       return 0;
+}
+
 /*****************************************************************************
  *
  * driver setup and teardown
@@ -3730,6 +3819,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
        INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
        INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
        INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
+       INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
 
        iwl_setup_scan_deferred_work(priv);
 
@@ -3876,7 +3966,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
        kfree(priv->scan_cmd);
 }
 
-#ifdef CONFIG_IWL5000
 struct ieee80211_ops iwlagn_hw_ops = {
        .tx = iwlagn_mac_tx,
        .start = iwlagn_mac_start,
@@ -3898,14 +3987,15 @@ struct ieee80211_ops iwlagn_hw_ops = {
        .channel_switch = iwlagn_mac_channel_switch,
        .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwl_mac_tx_last_beacon,
+       .remain_on_channel = iwl_mac_remain_on_channel,
+       .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
 };
-#endif
 
 static void iwl_hw_detect(struct iwl_priv *priv)
 {
        priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
        priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
-       pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+       priv->rev_id = priv->pci_dev->revision;
        IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
 }
 
@@ -3967,12 +4057,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (cfg->mod_params->disable_hw_scan) {
                dev_printk(KERN_DEBUG, &(pdev->dev),
                        "sw scan support is deprecated\n");
-#ifdef CONFIG_IWL5000
                iwlagn_hw_ops.hw_scan = NULL;
-#endif
-#ifdef CONFIG_IWL4965
-               iwl4965_hw_ops.hw_scan = NULL;
-#endif
        }
 
        hw = iwl_alloc_all(cfg);
@@ -4025,6 +4110,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
        priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
                BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_IWL_P2P
+       priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+               BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+#endif
        priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
        priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
        priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4272,6 +4361,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
         * we need to set STATUS_EXIT_PENDING bit.
         */
        set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+       iwl_leds_exit(priv);
+
        if (priv->mac80211_registered) {
                ieee80211_unregister_hw(priv->hw);
                priv->mac80211_registered = 0;
@@ -4344,12 +4436,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
 
 /* Hardware specific file defines the PCI IDs table for that hardware module */
 static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
-#ifdef CONFIG_IWL4965
-       {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
-       {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
-#endif /* CONFIG_IWL4965 */
-#ifdef CONFIG_IWL5000
-/* 5100 Series WiFi */
        {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
        {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
        {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4492,7 +4578,48 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
 
-#endif /* CONFIG_IWL5000 */
+/* 2x00 Series */
+       {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+
+/* 2x30 Series */
+       {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+/* 6x35 Series */
+       {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
+
+/* 200 Series */
+       {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
+
+/* 230 Series */
+       {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
+       {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
 
        {0}
 };
@@ -4592,3 +4719,9 @@ MODULE_PARM_DESC(antenna_coupling,
 module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
 MODULE_PARM_DESC(bt_ch_inhibition,
                 "Disable BT channel inhibition (default: enable)");
+
+module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
+MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
+
+module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
+MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
index da303585f801851037bc8397f98c52bd70d36d06..b5a169be48e2d8cc9d1df6703389335aa71f9182 100644 (file)
@@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg;
 extern struct iwl_cfg iwl100_bg_cfg;
 extern struct iwl_cfg iwl130_bgn_cfg;
 extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl200_bg_cfg;
+extern struct iwl_cfg iwl200_bgn_cfg;
+extern struct iwl_cfg iwl230_bg_cfg;
+extern struct iwl_cfg iwl230_bgn_cfg;
 
 extern struct iwl_mod_params iwlagn_mod_params;
 extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -110,8 +121,6 @@ void iwl_disable_ict(struct iwl_priv *priv);
 int iwl_alloc_isr_ict(struct iwl_priv *priv);
 void iwl_free_isr_ict(struct iwl_priv *priv);
 irqreturn_t iwl_isr_ict(int irq, void *data);
-bool iwl_good_ack_health(struct iwl_priv *priv,
-                        struct iwl_rx_packet *pkt);
 
 /* tx queue */
 void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -185,7 +194,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
                     struct iwl_rx_mem_buffer *rxb);
 void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
                         struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_handle(struct iwl_priv *priv);
 
 /* tx */
 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -238,8 +246,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
 /* rx */
 void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
                                struct iwl_rx_mem_buffer *rxb);
-bool iwl_good_plcp_health(struct iwl_priv *priv,
-                         struct iwl_rx_packet *pkt);
 void iwl_rx_statistics(struct iwl_priv *priv,
                       struct iwl_rx_mem_buffer *rxb);
 void iwl_reply_statistics(struct iwl_priv *priv,
@@ -330,8 +336,23 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
 int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
 void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
 
+/* notification wait support */
+void __acquires(wait_entry)
+iwlagn_init_notification_wait(struct iwl_priv *priv,
+                             struct iwl_notification_wait *wait_entry,
+                             void (*fn)(struct iwl_priv *priv,
+                                        struct iwl_rx_packet *pkt),
+                             u8 cmd);
+signed long __releases(wait_entry)
+iwlagn_wait_notification(struct iwl_priv *priv,
+                        struct iwl_notification_wait *wait_entry,
+                        unsigned long timeout);
+void __releases(wait_entry)
+iwlagn_remove_notification(struct iwl_priv *priv,
+                          struct iwl_notification_wait *wait_entry);
+
 /* mac80211 handlers (for 4965) */
-int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 int iwlagn_mac_start(struct ieee80211_hw *hw);
 void iwlagn_mac_stop(struct ieee80211_hw *hw);
 void iwlagn_configure_filter(struct ieee80211_hw *hw,
@@ -349,7 +370,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
 int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size);
 int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
                       struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
index f893d4a6aa876a69b2869c3fbb0edaa642b7dfed..03cfb74da2bc41213a2de882268a1bd2bd402033 100644 (file)
@@ -178,7 +178,6 @@ enum {
        REPLY_BT_COEX_PRIO_TABLE = 0xcc,
        REPLY_BT_COEX_PROT_ENV = 0xcd,
        REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
-       REPLY_BT_COEX_SCO = 0xcf,
 
        /* PAN commands */
        REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
        REPLY_WIPAN_WEPKEY = 0xb8,      /* use REPLY_WEPKEY structure */
        REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
        REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
+       REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
 
        REPLY_MAX = 0xff
 };
@@ -2477,7 +2477,7 @@ struct iwl_bt_cmd {
                                        IWLAGN_BT_VALID_BT4_TIMES | \
                                        IWLAGN_BT_VALID_3W_LUT)
 
-struct iwlagn_bt_cmd {
+struct iwl_basic_bt_cmd {
        u8 flags;
        u8 ledtime; /* unused */
        u8 max_kill;
@@ -2490,6 +2490,10 @@ struct iwlagn_bt_cmd {
        __le32 bt3_lookup_table[12];
        __le16 bt4_decision_time; /* unused */
        __le16 valid;
+};
+
+struct iwl6000_bt_cmd {
+       struct iwl_basic_bt_cmd basic;
        u8 prio_boost;
        /*
         * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
@@ -2499,6 +2503,18 @@ struct iwlagn_bt_cmd {
        __le16 rx_prio_boost;   /* SW boost of WiFi rx priority */
 };
 
+struct iwl2000_bt_cmd {
+       struct iwl_basic_bt_cmd basic;
+       __le32 prio_boost;
+       /*
+        * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
+        * if configure the following patterns
+        */
+       u8 reserved;
+       u8 tx_prio_boost;       /* SW boost of WiFi tx priority */
+       __le16 rx_prio_boost;   /* SW boost of WiFi rx priority */
+};
+
 #define IWLAGN_BT_SCO_ACTIVE   cpu_to_le32(BIT(0))
 
 struct iwlagn_bt_sco_cmd {
@@ -3082,6 +3098,13 @@ struct iwl4965_beacon_notif {
        __le32 ibss_mgr_status;
 } __packed;
 
+struct iwlagn_beacon_notif {
+       struct iwlagn_tx_resp beacon_notify_hdr;
+       __le32 low_tsf;
+       __le32 high_tsf;
+       __le32 ibss_mgr_status;
+} __packed;
+
 /*
  * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
  */
@@ -4143,6 +4166,10 @@ enum iwl_bt_coex_profile_traffic_load {
  */
 };
 
+#define BT_SESSION_ACTIVITY_1_UART_MSG         0x1
+#define BT_SESSION_ACTIVITY_2_UART_MSG         0x2
+
+/* BT UART message - Share Part (BT -> WiFi) */
 #define BT_UART_MSG_FRAME1MSGTYPE_POS          (0)
 #define BT_UART_MSG_FRAME1MSGTYPE_MSK          \
                (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
@@ -4227,9 +4254,12 @@ enum iwl_bt_coex_profile_traffic_load {
 #define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS    (0)
 #define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK    \
                (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS        (3)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK        \
-               (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
+#define BT_UART_MSG_FRAME7PAGE_POS             (3)
+#define BT_UART_MSG_FRAME7PAGE_MSK             \
+               (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
+#define BT_UART_MSG_FRAME7INQUIRY_POS          (4)
+#define BT_UART_MSG_FRAME7INQUIRY_MSK          \
+               (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
 #define BT_UART_MSG_FRAME7CONNECTABLE_POS      (5)
 #define BT_UART_MSG_FRAME7CONNECTABLE_MSK      \
                (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
@@ -4237,6 +4267,83 @@ enum iwl_bt_coex_profile_traffic_load {
 #define BT_UART_MSG_FRAME7RESERVED_MSK         \
                (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
 
+/* BT Session Activity 2 UART message (BT -> WiFi) */
+#define BT_UART_MSG_2_FRAME1RESERVED1_POS      (5)
+#define BT_UART_MSG_2_FRAME1RESERVED1_MSK      \
+               (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
+#define BT_UART_MSG_2_FRAME1RESERVED2_POS      (6)
+#define BT_UART_MSG_2_FRAME1RESERVED2_MSK      \
+               (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
+
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
+               (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
+#define BT_UART_MSG_2_FRAME2RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME2RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS  (0)
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK  \
+               (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS  (4)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK  \
+               (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
+#define BT_UART_MSG_2_FRAME3LEMASTER_POS       (5)
+#define BT_UART_MSG_2_FRAME3LEMASTER_MSK       \
+               (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
+#define BT_UART_MSG_2_FRAME3RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME3RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS  (0)
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK  \
+               (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_POS      (4)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK      \
+               (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
+#define BT_UART_MSG_2_FRAME4RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME4RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS      (0)
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK      \
+               (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
+               (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS    (5)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK    \
+               (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
+#define BT_UART_MSG_2_FRAME5RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME5RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
+               (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
+#define BT_UART_MSG_2_FRAME6RFU_POS            (5)
+#define BT_UART_MSG_2_FRAME6RFU_MSK            \
+               (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
+#define BT_UART_MSG_2_FRAME6RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME6RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
+               (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS     (3)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK     \
+               (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS     (4)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK     \
+               (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
+               (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
+#define BT_UART_MSG_2_FRAME7RESERVED_POS       (6)
+#define BT_UART_MSG_2_FRAME7RESERVED_MSK       \
+               (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
+
 
 struct iwl_bt_uart_msg {
        u8 header;
@@ -4369,6 +4476,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
  * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
  */
 
+/*
+ * Minimum slot time in TU
+ */
+#define IWL_MIN_SLOT_TIME      20
+
 /**
  * struct iwl_wipan_slot
  * @width: Time in TU
index efbde1f1a8bfca29924465c0c56ba57cf4c74e17..4bd342060254d93069dea376cc5498f0dfe1d1a0 100644 (file)
 #include "iwl-helpers.h"
 
 
-MODULE_DESCRIPTION("iwl core");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
 /*
  * set bt_coex_active to true, uCode will do kill/defer
  * every time the priority line is asserted (BT is sending signals on the
@@ -65,15 +60,12 @@ MODULE_LICENSE("GPL");
  * default: bt_coex_active = true (BT_COEX_ENABLE)
  */
 bool bt_coex_active = true;
-EXPORT_SYMBOL_GPL(bt_coex_active);
 module_param(bt_coex_active, bool, S_IRUGO);
 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
 
 u32 iwl_debug_level;
-EXPORT_SYMBOL(iwl_debug_level);
 
 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwl_bcast_addr);
 
 
 /* This function both allocates and initializes hw and priv. */
@@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
 out:
        return hw;
 }
-EXPORT_SYMBOL(iwl_alloc_all);
 
 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -219,15 +210,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
                if (!is_channel_valid(ch))
                        continue;
 
-               if (is_channel_a_band(ch))
-                       sband =  &priv->bands[IEEE80211_BAND_5GHZ];
-               else
-                       sband =  &priv->bands[IEEE80211_BAND_2GHZ];
+               sband =  &priv->bands[ch->band];
 
                geo_ch = &sband->channels[sband->n_channels++];
 
                geo_ch->center_freq =
-                               ieee80211_channel_to_frequency(ch->channel);
+                       ieee80211_channel_to_frequency(ch->channel, ch->band);
                geo_ch->max_power = ch->max_power_avg;
                geo_ch->max_antenna_gain = 0xff;
                geo_ch->hw_value = ch->channel;
@@ -275,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
 
        return 0;
 }
-EXPORT_SYMBOL(iwlcore_init_geos);
 
 /*
  * iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -286,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
        kfree(priv->ieee_rates);
        clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
 }
-EXPORT_SYMBOL(iwlcore_free_geos);
 
 static bool iwl_is_channel_extension(struct iwl_priv *priv,
                                     enum ieee80211_band band,
@@ -331,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
                        le16_to_cpu(ctx->staging.channel),
                        ctx->ht.extension_chan_offset);
 }
-EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
 
 static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
 {
@@ -432,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
                                sizeof(ctx->timing), &ctx->timing);
 }
-EXPORT_SYMBOL(iwl_send_rxon_timing);
 
 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                           int hw_decrypt)
@@ -445,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
 
 }
-EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
 
 /* validate RXON structure is valid */
 int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -518,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        }
        return 0;
 }
-EXPORT_SYMBOL(iwl_check_rxon_cmd);
 
 /**
  * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -582,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_full_rxon_required);
 
 u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx)
@@ -596,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
        else
                return IWL_RATE_6M_PLCP;
 }
-EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
 
 static void _iwl_set_rxon_ht(struct iwl_priv *priv,
                             struct iwl_ht_config *ht_conf,
@@ -673,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
        for_each_context(priv, ctx)
                _iwl_set_rxon_ht(priv, ht_conf, ctx);
 }
-EXPORT_SYMBOL(iwl_set_rxon_ht);
 
 /* Return valid, unused, channel for a passive scan to reset the RF */
 u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -714,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
 
        return channel;
 }
-EXPORT_SYMBOL(iwl_get_single_channel_number);
 
 /**
  * iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -745,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_set_rxon_channel);
 
 void iwl_set_flags_for_band(struct iwl_priv *priv,
                            struct iwl_rxon_context *ctx,
@@ -769,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
                ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
        }
 }
-EXPORT_SYMBOL(iwl_set_flags_for_band);
 
 /*
  * initialize rxon structure with default values from eeprom
@@ -841,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
        ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
        ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
 }
-EXPORT_SYMBOL(iwl_connection_init_rx_config);
 
 void iwl_set_rate(struct iwl_priv *priv)
 {
@@ -874,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
                   (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
        }
 }
-EXPORT_SYMBOL(iwl_set_rate);
 
 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
 {
@@ -894,7 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
                mutex_unlock(&priv->mutex);
        }
 }
-EXPORT_SYMBOL(iwl_chswitch_done);
 
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 {
@@ -922,7 +895,6 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
                }
        }
 }
-EXPORT_SYMBOL(iwl_rx_csa);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -944,13 +916,15 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
        IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
        IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
 }
-EXPORT_SYMBOL(iwl_print_rx_config_cmd);
 #endif
 /**
  * iwl_irq_handle_error - called for HW or SW error interrupt from card
  */
 void iwl_irq_handle_error(struct iwl_priv *priv)
 {
+       unsigned int reload_msec;
+       unsigned long reload_jiffies;
+
        /* Set the FW error flag -- cleared on iwl_down */
        set_bit(STATUS_FW_ERROR, &priv->status);
 
@@ -994,6 +968,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
         * commands by clearing the INIT status bit */
        clear_bit(STATUS_READY, &priv->status);
 
+       /*
+        * If firmware keep reloading, then it indicate something
+        * serious wrong and firmware having problem to recover
+        * from it. Instead of keep trying which will fill the syslog
+        * and hang the system, let's just stop it
+        */
+       reload_jiffies = jiffies;
+       reload_msec = jiffies_to_msecs((long) reload_jiffies -
+                               (long) priv->reload_jiffies);
+       priv->reload_jiffies = reload_jiffies;
+       if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
+               priv->reload_count++;
+               if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
+                       IWL_ERR(priv, "BUG_ON, Stop restarting\n");
+                       return;
+               }
+       } else
+               priv->reload_count = 0;
+
        if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
                IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
                          "Restarting adapter due to uCode error.\n");
@@ -1002,7 +995,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
                        queue_work(priv->workqueue, &priv->restart);
        }
 }
-EXPORT_SYMBOL(iwl_irq_handle_error);
 
 static int iwl_apm_stop_master(struct iwl_priv *priv)
 {
@@ -1039,7 +1031,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
         */
        iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 }
-EXPORT_SYMBOL(iwl_apm_stop);
 
 
 /*
@@ -1154,13 +1145,14 @@ int iwl_apm_init(struct iwl_priv *priv)
 out:
        return ret;
 }
-EXPORT_SYMBOL(iwl_apm_init);
 
 
 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
 {
        int ret;
        s8 prev_tx_power;
+       bool defer;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 
        lockdep_assert_held(&priv->mutex);
 
@@ -1188,10 +1180,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
        if (!iwl_is_ready_rf(priv))
                return -EIO;
 
-       /* scan complete use tx_power_next, need to be updated */
+       /* scan complete and commit_rxon use tx_power_next value,
+        * it always need to be updated for newest request */
        priv->tx_power_next = tx_power;
-       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
-               IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
+
+       /* do not set tx power when scanning or channel changing */
+       defer = test_bit(STATUS_SCANNING, &priv->status) ||
+               memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+       if (defer && !force) {
+               IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
                return 0;
        }
 
@@ -1207,7 +1204,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
        }
        return ret;
 }
-EXPORT_SYMBOL(iwl_set_tx_power);
 
 void iwl_send_bt_config(struct iwl_priv *priv)
 {
@@ -1231,7 +1227,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
                             sizeof(struct iwl_bt_cmd), &bt_cmd))
                IWL_ERR(priv, "failed to send BT Coex Config\n");
 }
-EXPORT_SYMBOL(iwl_send_bt_config);
 
 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
 {
@@ -1249,7 +1244,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
                                        sizeof(struct iwl_statistics_cmd),
                                        &statistics_cmd);
 }
-EXPORT_SYMBOL(iwl_send_statistics_request);
 
 void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
                           struct iwl_rx_mem_buffer *rxb)
@@ -1261,7 +1255,6 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
                     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
 #endif
 }
-EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
 
 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
                                      struct iwl_rx_mem_buffer *rxb)
@@ -1273,7 +1266,6 @@ void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
                        get_cmd_string(pkt->hdr.cmd));
        iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
 }
-EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
 
 void iwl_rx_reply_error(struct iwl_priv *priv,
                        struct iwl_rx_mem_buffer *rxb)
@@ -1288,7 +1280,6 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
                le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
                le32_to_cpu(pkt->u.err_resp.error_info));
 }
-EXPORT_SYMBOL(iwl_rx_reply_error);
 
 void iwl_clear_isr_stats(struct iwl_priv *priv)
 {
@@ -1340,7 +1331,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
        IWL_DEBUG_MAC80211(priv, "leave\n");
        return 0;
 }
-EXPORT_SYMBOL(iwl_mac_conf_tx);
 
 int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
 {
@@ -1348,7 +1338,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
 
        return priv->ibss_manager == IWL_IBSS_MANAGER;
 }
-EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
 
 static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
@@ -1403,9 +1392,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *tmp, *ctx = NULL;
        int err;
+       enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
 
        IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
-                          vif->type, vif->addr);
+                          viftype, vif->addr);
 
        mutex_lock(&priv->mutex);
 
@@ -1429,7 +1419,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
                        continue;
                }
 
-               if (!(possible_modes & BIT(vif->type)))
+               if (!(possible_modes & BIT(viftype)))
                        continue;
 
                /* have maybe usable context w/o interface */
@@ -1457,7 +1447,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        IWL_DEBUG_MAC80211(priv, "leave\n");
        return err;
 }
-EXPORT_SYMBOL(iwl_mac_add_interface);
 
 static void iwl_teardown_interface(struct iwl_priv *priv,
                                   struct ieee80211_vif *vif,
@@ -1510,7 +1499,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "leave\n");
 
 }
-EXPORT_SYMBOL(iwl_mac_remove_interface);
 
 int iwl_alloc_txq_mem(struct iwl_priv *priv)
 {
@@ -1525,14 +1513,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
        }
        return 0;
 }
-EXPORT_SYMBOL(iwl_alloc_txq_mem);
 
 void iwl_free_txq_mem(struct iwl_priv *priv)
 {
        kfree(priv->txq);
        priv->txq = NULL;
 }
-EXPORT_SYMBOL(iwl_free_txq_mem);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 
@@ -1571,7 +1557,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
        iwl_reset_traffic_log(priv);
        return 0;
 }
-EXPORT_SYMBOL(iwl_alloc_traffic_mem);
 
 void iwl_free_traffic_mem(struct iwl_priv *priv)
 {
@@ -1581,7 +1566,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
        kfree(priv->rx_traffic);
        priv->rx_traffic = NULL;
 }
-EXPORT_SYMBOL(iwl_free_traffic_mem);
 
 void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
                      u16 length, struct ieee80211_hdr *header)
@@ -1606,7 +1590,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
                        (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
        }
 }
-EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
 
 void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
                      u16 length, struct ieee80211_hdr *header)
@@ -1631,7 +1614,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
                        (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
        }
 }
-EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
 
 const char *get_mgmt_string(int cmd)
 {
@@ -1675,7 +1657,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
 {
        memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
        memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-       priv->led_tpt = 0;
 }
 
 /*
@@ -1768,9 +1749,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
                stats->data_cnt++;
                stats->data_bytes += len;
        }
-       iwl_leds_background(priv);
 }
-EXPORT_SYMBOL(iwl_update_stats);
 #endif
 
 static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -1909,7 +1888,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        mutex_unlock(&priv->mutex);
        return err;
 }
-EXPORT_SYMBOL(iwl_mac_change_interface);
 
 /*
  * On every watchdog tick we check (latest) time stamp. If it does not
@@ -1981,7 +1959,6 @@ void iwl_bg_watchdog(unsigned long data)
        mod_timer(&priv->watchdog, jiffies +
                  msecs_to_jiffies(IWL_WD_TICK(timeout)));
 }
-EXPORT_SYMBOL(iwl_bg_watchdog);
 
 void iwl_setup_watchdog(struct iwl_priv *priv)
 {
@@ -1993,7 +1970,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
        else
                del_timer(&priv->watchdog);
 }
-EXPORT_SYMBOL(iwl_setup_watchdog);
 
 /*
  * extended beacon time format
@@ -2019,7 +1995,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
 
        return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
 }
-EXPORT_SYMBOL(iwl_usecs_to_beacons);
 
 /* base is usually what we get from ucode with each received frame,
  * the same as HW timer counter counting down
@@ -2047,7 +2022,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
 
        return cpu_to_le32(res);
 }
-EXPORT_SYMBOL(iwl_add_beacon_time);
 
 #ifdef CONFIG_PM
 
@@ -2067,7 +2041,6 @@ int iwl_pci_suspend(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_pci_suspend);
 
 int iwl_pci_resume(struct device *device)
 {
@@ -2096,7 +2069,6 @@ int iwl_pci_resume(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_pci_resume);
 
 const struct dev_pm_ops iwl_pm_ops = {
        .suspend = iwl_pci_suspend,
@@ -2106,6 +2078,5 @@ const struct dev_pm_ops iwl_pm_ops = {
        .poweroff = iwl_pci_suspend,
        .restore = iwl_pci_resume,
 };
-EXPORT_SYMBOL(iwl_pm_ops);
 
 #endif /* CONFIG_PM */
index a3474376fdbc72afe78225f62551adf44cb7786e..d47f3a87fce49cd4bd1c959eb9c9b9a5cf67e99b 100644 (file)
@@ -210,12 +210,7 @@ struct iwl_lib_ops {
 
        /* temperature */
        struct iwl_temp_ops temp_ops;
-       /* check for plcp health */
-       bool (*check_plcp_health)(struct iwl_priv *priv,
-                                       struct iwl_rx_packet *pkt);
-       /* check for ack health */
-       bool (*check_ack_health)(struct iwl_priv *priv,
-                                       struct iwl_rx_packet *pkt);
+
        int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
        void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
 
@@ -227,8 +222,6 @@ struct iwl_lib_ops {
 
 struct iwl_led_ops {
        int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-       int (*on)(struct iwl_priv *priv);
-       int (*off)(struct iwl_priv *priv);
 };
 
 /* NIC specific ops */
@@ -263,6 +256,8 @@ struct iwl_mod_params {
        int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
        int antenna;            /* def: 0 = both antennas (use diversity) */
        int restart_fw;         /* def: 1 = restart firmware */
+       bool plcp_check;        /* def: true = enable plcp health check */
+       bool ack_check;         /* def: false = disable ack health check */
 };
 
 /*
@@ -307,7 +302,6 @@ struct iwl_base_params {
        u16 led_compensation;
        const bool broken_powersave;
        int chain_noise_num_beacons;
-       const bool supports_idle;
        bool adv_thermal_throttle;
        bool support_ct_kill_exit;
        const bool support_wimax_coexist;
@@ -342,6 +336,7 @@ struct iwl_bt_params {
        u8 ampdu_factor;
        u8 ampdu_density;
        bool bt_sco_disable;
+       bool bt_session_2;
 };
 /*
  * @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -366,6 +361,7 @@ struct iwl_ht_params {
  * @adv_pm: advance power management
  * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
  * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
  *
  * We enable the driver to be backward compatible wrt API version. The
  * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -415,6 +411,7 @@ struct iwl_cfg {
        const bool adv_pm;
        const bool rx_with_siso_diversity;
        const bool internal_wimax_coex;
+       const bool iq_invert;
 };
 
 /***************************
@@ -494,18 +491,6 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
 static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
                                    __le16 fc, u16 len)
 {
-       struct traffic_stats    *stats;
-
-       if (is_tx)
-               stats = &priv->tx_stats;
-       else
-               stats = &priv->rx_stats;
-
-       if (ieee80211_is_data(fc)) {
-               /* data */
-               stats->data_bytes += len;
-       }
-       iwl_leds_background(priv);
 }
 #endif
 /*****************************************************
@@ -522,6 +507,7 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
 * RX
 ******************************************************/
 void iwl_cmd_queue_free(struct iwl_priv *priv);
+void iwl_cmd_queue_unmap(struct iwl_priv *priv);
 int iwl_rx_queue_alloc(struct iwl_priv *priv);
 void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
                                  struct iwl_rx_queue *q);
@@ -530,8 +516,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 /* Handlers */
 void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
                                          struct iwl_rx_mem_buffer *rxb);
-void iwl_recover_from_statistics(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt);
 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
 
@@ -546,6 +530,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
 void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                        int slots_num, u32 txq_id);
 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
 void iwl_setup_watchdog(struct iwl_priv *priv);
 /*****************************************************
  * TX power
@@ -755,6 +740,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
        return priv->hw->wiphy->bands[band];
 }
 
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+       return priv->cfg->bt_params &&
+              priv->cfg->bt_params->advanced_bt_coexist;
+}
+
+static inline bool iwl_bt_statistics(struct iwl_priv *priv)
+{
+       return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
+}
+
 extern bool bt_coex_active;
 extern bool bt_siso_mode;
 
index b80bf7dff55bda64556ddb63de0c816bdddfe546..f52bc040bcbf96fb5d059c279f8acc8a8f19140f 100644 (file)
 
 
 /* HW REV */
-#define CSR_HW_REV_TYPE_MSK            (0x00000F0)
+#define CSR_HW_REV_TYPE_MSK            (0x00001F0)
 #define CSR_HW_REV_TYPE_3945           (0x00000D0)
 #define CSR_HW_REV_TYPE_4965           (0x0000000)
 #define CSR_HW_REV_TYPE_5300           (0x0000020)
 #define CSR_HW_REV_TYPE_1000           (0x0000060)
 #define CSR_HW_REV_TYPE_6x00           (0x0000070)
 #define CSR_HW_REV_TYPE_6x50           (0x0000080)
-#define CSR_HW_REV_TYPE_6x50g2         (0x0000084)
-#define CSR_HW_REV_TYPE_6x00g2         (0x00000B0)
-#define CSR_HW_REV_TYPE_NONE           (0x00000F0)
+#define CSR_HW_REV_TYPE_6150           (0x0000084)
+#define CSR_HW_REV_TYPE_6x05          (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30          CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35          CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30          (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00          (0x0000100)
+#define CSR_HW_REV_TYPE_200           (0x0000110)
+#define CSR_HW_REV_TYPE_230           (0x0000120)
+#define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 
 /* EEPROM REG */
 #define CSR_EEPROM_REG_READ_VALID_MSK  (0x00000001)
 #define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6       (0x00000004)
 #define CSR_GP_DRIVER_REG_BIT_6050_1x2             (0x00000008)
 
+#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER       (0x00000080)
+
 /* GIO Chicken Bits (PCI Express bus link power management) */
 #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
 #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
index 6fe80b5e7a159ab0b91450314e1cd89d7feedcd5..8842411f1cf312019c5edc75987efc198720760c 100644 (file)
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
        return ret;
 }
 
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
 static ssize_t iwl_dbgfs_sram_read(struct file *file,
                                        char __user *user_buf,
                                        size_t count, loff_t *ppos)
 {
-       u32 val;
+       u32 val = 0;
        char *buf;
        ssize_t ret;
-       int i;
+       int i = 0;
+       bool device_format = false;
+       int offset = 0;
+       int len = 0;
        int pos = 0;
+       int sram;
        struct iwl_priv *priv = file->private_data;
        size_t bufsz;
 
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
                else
                        priv->dbgfs_sram_len = priv->ucode_data.len;
        }
-       bufsz =  30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+       len = priv->dbgfs_sram_len;
+
+       if (len == -4) {
+               device_format = true;
+               len = 4;
+       }
+
+       bufsz =  50 + len * 4;
        buf = kmalloc(bufsz, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
+
        pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
-                       priv->dbgfs_sram_len);
+                        len);
        pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
                        priv->dbgfs_sram_offset);
-       for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
-               val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
-                                       priv->dbgfs_sram_len - i);
-               if (i < 4) {
-                       switch (i) {
-                       case 1:
-                               val &= BYTE1_MASK;
-                               break;
-                       case 2:
-                               val &= BYTE2_MASK;
-                               break;
-                       case 3:
-                               val &= BYTE3_MASK;
-                               break;
-                       }
+
+       /* adjust sram address since reads are only on even u32 boundaries */
+       offset = priv->dbgfs_sram_offset & 0x3;
+       sram = priv->dbgfs_sram_offset & ~0x3;
+
+       /* read the first u32 from sram */
+       val = iwl_read_targ_mem(priv, sram);
+
+       for (; len; len--) {
+               /* put the address at the start of every line */
+               if (i == 0)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%08X: ", sram + offset);
+
+               if (device_format)
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%02x", (val >> (8 * (3 - offset))) & 0xff);
+               else
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                               "%02x ", (val >> (8 * offset)) & 0xff);
+
+               /* if all bytes processed, read the next u32 from sram */
+               if (++offset == 4) {
+                       sram += 4;
+                       offset = 0;
+                       val = iwl_read_targ_mem(priv, sram);
                }
-               if (!(i % 16))
+
+               /* put in extra spaces and split lines for human readability */
+               if (++i == 16) {
+                       i = 0;
                        pos += scnprintf(buf + pos, bufsz - pos, "\n");
-               pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+               } else if (!(i & 7)) {
+                       pos += scnprintf(buf + pos, bufsz - pos, "   ");
+               } else if (!(i & 3)) {
+                       pos += scnprintf(buf + pos, bufsz - pos, " ");
+               }
        }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       if (i)
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
        kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
        if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
                priv->dbgfs_sram_offset = offset;
                priv->dbgfs_sram_len = len;
+       } else if (sscanf(buf, "%x", &offset) == 1) {
+               priv->dbgfs_sram_offset = offset;
+               priv->dbgfs_sram_len = -4;
        } else {
                priv->dbgfs_sram_offset = 0;
                priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
-                                 size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char buf[256];
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "allow blinking: %s\n",
-                        (priv->allow_blinking) ? "True" : "False");
-       if (priv->allow_blinking) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "Led blinking rate: %u\n",
-                                priv->last_blink_rate);
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "Last blink time: %lu\n",
-                                priv->last_blink_time);
-       }
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
 static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
                                char __user *user_buf,
                                size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
 DEBUGFS_READ_FILE_OPS(status);
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_FILE_OPS(led);
 DEBUGFS_READ_FILE_OPS(thermal_throttling);
 DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
                         "last traffic notif: %d\n",
                priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
        pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
-                        "sco_active: %d, kill_ack_mask: %x, "
-                        "kill_cts_mask: %x\n",
-               priv->bt_ch_announce, priv->bt_sco_active,
-               priv->kill_ack_mask, priv->kill_cts_mask);
+                        "kill_ack_mask: %x, kill_cts_mask: %x\n",
+               priv->bt_ch_announce, priv->kill_ack_mask,
+               priv->kill_cts_mask);
 
        pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
        switch (priv->bt_traffic_load) {
@@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
        DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
        DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
        if (!priv->cfg->base_params->broken_powersave) {
                DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
                                 S_IWUSR | S_IRUSR);
@@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
                DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
        if (priv->cfg->base_params->ucode_tracing)
                DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
-       if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics)
+       if (iwl_bt_statistics(priv))
                DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
        DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
        DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
-       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+       if (iwl_advanced_bt_coexist(priv))
                DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
        if (priv->cfg->base_params->sensitivity_calib_by_driver)
                DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
@@ -1783,7 +1788,6 @@ err:
        iwl_dbgfs_unregister(priv);
        return -ENOMEM;
 }
-EXPORT_SYMBOL(iwl_dbgfs_register);
 
 /**
  * Remove the debugfs files and directories
@@ -1797,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
        debugfs_remove_recursive(priv->debugfs_dir);
        priv->debugfs_dir = NULL;
 }
-EXPORT_SYMBOL(iwl_dbgfs_unregister);
 
 
 
index 8dda67850af45174ed371b820934ccadd5dba1b4..58165c769cf1b976cfb213558717c765b1bbce28 100644 (file)
@@ -34,6 +34,8 @@
 
 #include <linux/pci.h> /* for struct pci_device_id */
 #include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
 #include <net/ieee80211_radiotap.h>
 
 #include "iwl-eeprom.h"
 #include "iwl-prph.h"
 #include "iwl-fh.h"
 #include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
 #include "iwl-agn-hw.h"
 #include "iwl-led.h"
 #include "iwl-power.h"
 #include "iwl-agn-rs.h"
 #include "iwl-agn-tt.h"
 
+#define U32_PAD(n)             ((4-(n))&0x3)
+
 struct iwl_tx_queue;
 
 /* CT-KILL constants */
@@ -136,7 +138,7 @@ struct iwl_queue {
                                * space more than this */
        int high_mark;         /* high watermark, stop queue if free
                                * space less than this */
-} __packed;
+};
 
 /* One for each TFD */
 struct iwl_tx_info {
@@ -507,6 +509,7 @@ struct iwl_station_priv {
        atomic_t pending_frames;
        bool client;
        bool asleep;
+       u8 max_agg_bufsize;
 };
 
 /**
@@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics {
        u32 unknown;
 };
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
 /* management statistics */
 enum iwl_mgmt_stats {
        MANAGEMENT_ASSOC_REQ = 0,
@@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats {
 };
 
 struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
        u32 mgmt[MANAGEMENT_MAX];
        u32 ctrl[CONTROL_MAX];
        u32 data_cnt;
        u64 data_bytes;
-};
-#else
-struct traffic_stats {
-       u64 data_bytes;
-};
 #endif
+};
 
 /*
  * iwl_switch_rxon: "channel switch" structure
@@ -1111,6 +1110,11 @@ struct iwl_event_log {
 /* BT Antenna Coupling Threshold (dB) */
 #define IWL_BT_ANTENNA_COUPLING_THRESHOLD      (35)
 
+/* Firmware reload counter and Timestamp */
+#define IWL_MIN_RELOAD_DURATION                1000 /* 1000 ms */
+#define IWL_MAX_CONTINUE_RELOAD_CNT    4
+
+
 enum iwl_reset {
        IWL_RF_RESET = 0,
        IWL_FW_RESET,
@@ -1139,6 +1143,33 @@ struct iwl_force_reset {
  */
 #define IWLAGN_EXT_BEACON_TIME_POS     22
 
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+       struct list_head list;
+
+       void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
+
+       u8 cmd;
+       bool triggered;
+};
+
 enum iwl_rxon_context_id {
        IWL_RXON_CTX_BSS,
        IWL_RXON_CTX_PAN,
@@ -1236,6 +1267,10 @@ struct iwl_priv {
        /* force reset */
        struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
 
+       /* firmware reload counter and timestamp */
+       unsigned long reload_jiffies;
+       int reload_count;
+
        /* we allocate array of iwl_channel_info for NIC's valid channels.
         *    Access via channel # using indirect index array */
        struct iwl_channel_info *channel_info;  /* channel info array */
@@ -1310,11 +1345,6 @@ struct iwl_priv {
        struct iwl_init_alive_resp card_alive_init;
        struct iwl_alive_resp card_alive;
 
-       unsigned long last_blink_time;
-       u8 last_blink_rate;
-       u8 allow_blinking;
-       u64 led_tpt;
-
        u16 active_rate;
 
        u8 start_calib;
@@ -1463,6 +1493,17 @@ struct iwl_priv {
                        struct iwl_bt_notif_statistics delta_statistics_bt;
                        struct iwl_bt_notif_statistics max_delta_bt;
 #endif
+
+                       /* notification wait support */
+                       struct list_head notif_waits;
+                       spinlock_t notif_wait_lock;
+                       wait_queue_head_t notif_waitq;
+
+                       /* remain-on-channel offload support */
+                       struct ieee80211_channel *hw_roc_channel;
+                       struct delayed_work hw_roc_work;
+                       enum nl80211_channel_type hw_roc_chantype;
+                       int hw_roc_duration;
                } _agn;
 #endif
        };
@@ -1472,7 +1513,6 @@ struct iwl_priv {
        u8 bt_status;
        u8 bt_traffic_load, last_bt_traffic_load;
        bool bt_ch_announce;
-       bool bt_sco_active;
        bool bt_full_concurrent;
        bool bt_ant_couple_ok;
        __le32 kill_ack_mask;
@@ -1547,6 +1587,10 @@ struct iwl_priv {
        bool hw_ready;
 
        struct iwl_event_log event_log;
+
+       struct led_classdev led;
+       unsigned long blink_on, blink_off;
+       bool led_registered;
 }; /*iwl_priv */
 
 static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
index 358cfd7e5af190bf8c17cf059329d73d279e71a6..833194a2c6397dab4a7c41b78cbada46a1622d87 100644 (file)
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
        BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
        return &priv->eeprom[offset];
 }
-EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
 
 static int iwl_init_otp_access(struct iwl_priv *priv)
 {
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
 {
        return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
 }
-EXPORT_SYMBOL(iwl_eeprom_query_addr);
 
 u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
 {
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
                return 0;
        return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
 }
-EXPORT_SYMBOL(iwl_eeprom_query16);
 
 /**
  * iwl_eeprom_init - read EEPROM contents
@@ -509,14 +506,12 @@ err:
 alloc_err:
        return ret;
 }
-EXPORT_SYMBOL(iwl_eeprom_init);
 
 void iwl_eeprom_free(struct iwl_priv *priv)
 {
        kfree(priv->eeprom);
        priv->eeprom = NULL;
 }
-EXPORT_SYMBOL(iwl_eeprom_free);
 
 static void iwl_init_band_reference(const struct iwl_priv *priv,
                        int eep_band, int *eeprom_ch_count,
@@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
 
        return 0;
 }
-EXPORT_SYMBOL(iwl_init_channel_map);
 
 /*
  * iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
        kfree(priv->channel_info);
        priv->channel_count = 0;
 }
-EXPORT_SYMBOL(iwl_free_channel_map);
 
 /**
  * iwl_get_channel_info - Find driver's private channel info
@@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
 
        return NULL;
 }
-EXPORT_SYMBOL(iwl_get_channel_info);
index 9e6f31355eee802b3c3be5f9c81ce977a162450c..98aa8af01192bbe561b1bbf97d208dd3bdd5a54f 100644 (file)
@@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
 #define EEPROM_6050_TX_POWER_VERSION    (4)
 #define EEPROM_6050_EEPROM_VERSION     (0x532)
 
-/* 6x50g2 Specific */
-#define EEPROM_6050G2_TX_POWER_VERSION    (6)
-#define EEPROM_6050G2_EEPROM_VERSION   (0x553)
+/* 6150 Specific */
+#define EEPROM_6150_TX_POWER_VERSION    (6)
+#define EEPROM_6150_EEPROM_VERSION     (0x553)
+
+/* 6x05 Specific */
+#define EEPROM_6005_TX_POWER_VERSION    (6)
+#define EEPROM_6005_EEPROM_VERSION     (0x709)
+
+/* 6x30 Specific */
+#define EEPROM_6030_TX_POWER_VERSION    (6)
+#define EEPROM_6030_EEPROM_VERSION     (0x709)
+
+/* 2x00 Specific */
+#define EEPROM_2000_TX_POWER_VERSION    (6)
+#define EEPROM_2000_EEPROM_VERSION     (0x805)
+
+/* 6x35 Specific */
+#define EEPROM_6035_TX_POWER_VERSION    (6)
+#define EEPROM_6035_EEPROM_VERSION     (0x753)
 
-/* 6x00g2 Specific */
-#define EEPROM_6000G2_TX_POWER_VERSION    (6)
-#define EEPROM_6000G2_EEPROM_VERSION   (0x709)
 
 /* OTP */
 /* lower blocks contain EEPROM image and calibration data */
@@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
 #define OTP_MAX_LL_ITEMS_1000          (3)     /* OTP blocks for 1000 */
 #define OTP_MAX_LL_ITEMS_6x00          (4)     /* OTP blocks for 6x00 */
 #define OTP_MAX_LL_ITEMS_6x50          (7)     /* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00          (4)     /* OTP blocks for 2x00 */
 
 /* 2.4 GHz */
 extern const u8 iwl_eeprom_band_1[14];
index c373b53babeaee1cd1c4e7781151e683e305f2ba..02499f684683eea87d2f962da3d985f3ec346a8a 100644 (file)
@@ -108,12 +108,12 @@ const char *get_cmd_string(u8 cmd)
                IWL_CMD(REPLY_WIPAN_WEPKEY);
                IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
                IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+               IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
        default:
                return "UNKNOWN";
 
        }
 }
-EXPORT_SYMBOL(get_cmd_string);
 
 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
 
@@ -252,7 +252,6 @@ out:
        mutex_unlock(&priv->sync_cmd_mutex);
        return ret;
 }
-EXPORT_SYMBOL(iwl_send_cmd_sync);
 
 int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 {
@@ -261,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 
        return iwl_send_cmd_sync(priv, cmd);
 }
-EXPORT_SYMBOL(iwl_send_cmd);
 
 int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
 {
@@ -273,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
 
        return iwl_send_cmd_sync(priv, &cmd);
 }
-EXPORT_SYMBOL(iwl_send_cmd_pdu);
 
 int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
                           u8 id, u16 len, const void *data,
@@ -292,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
 
        return iwl_send_cmd_async(priv, &cmd);
 }
-EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
index 46ccdf406e8e781318760e40a3a5cae54d2992ed..d7f2a0bb32c9e2ec8018eec464db7c0becb5df1d 100644 (file)
@@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO);
 MODULE_PARM_DESC(led_mode, "0=system default, "
                "1=On(RF On)/Off(RF Off), 2=blinking");
 
-static const struct {
-       u16 tpt;        /* Mb/s */
-       u8 on_time;
-       u8 off_time;
-} blink_tbl[] =
-{
-       {300, 25, 25},
-       {200, 40, 40},
-       {100, 55, 55},
-       {70, 65, 65},
-       {50, 75, 75},
-       {20, 85, 85},
-       {10, 95, 95},
-       {5, 110, 110},
-       {1, 130, 130},
-       {0, 167, 167},
-       /* SOLID_ON */
-       {-1, IWL_LED_SOLID, 0}
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+       { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+       { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+       { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+       { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+       { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+       { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+       { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+       { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+       { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+       { .throughput = 300 * 1024 - 1, .blink_time = 50 },
 };
 
-#define IWL_1MB_RATE (128 * 1024)
-#define IWL_LED_THRESHOLD (16)
-#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
-#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
-
 /*
  * Adjust led blink rate to compensate on a MAC Clock difference on every HW
  * Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -97,133 +85,102 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
 }
 
 /* Set led pattern command */
-static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
+static int iwl_led_cmd(struct iwl_priv *priv,
+                      unsigned long on,
+                      unsigned long off)
 {
        struct iwl_led_cmd led_cmd = {
                .id = IWL_LED_LINK,
                .interval = IWL_DEF_LED_INTRVL
        };
+       int ret;
+
+       if (!test_bit(STATUS_READY, &priv->status))
+               return -EBUSY;
 
-       BUG_ON(idx > IWL_MAX_BLINK_TBL);
+       if (priv->blink_on == on && priv->blink_off == off)
+               return 0;
 
-       IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
+       IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
                        priv->cfg->base_params->led_compensation);
-       led_cmd.on =
-               iwl_blink_compensation(priv, blink_tbl[idx].on_time,
+       led_cmd.on = iwl_blink_compensation(priv, on,
                                priv->cfg->base_params->led_compensation);
-       led_cmd.off =
-               iwl_blink_compensation(priv, blink_tbl[idx].off_time,
+       led_cmd.off = iwl_blink_compensation(priv, off,
                                priv->cfg->base_params->led_compensation);
 
-       return priv->cfg->ops->led->cmd(priv, &led_cmd);
+       ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+       if (!ret) {
+               priv->blink_on = on;
+               priv->blink_off = off;
+       }
+       return ret;
 }
 
-int iwl_led_start(struct iwl_priv *priv)
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
 {
-       return priv->cfg->ops->led->on(priv);
-}
-EXPORT_SYMBOL(iwl_led_start);
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+       unsigned long on = 0;
 
-int iwl_led_associate(struct iwl_priv *priv)
-{
-       IWL_DEBUG_LED(priv, "Associated\n");
-       if (priv->cfg->led_mode == IWL_LED_BLINK)
-               priv->allow_blinking = 1;
-       priv->last_blink_time = jiffies;
+       if (brightness > 0)
+               on = IWL_LED_SOLID;
 
-       return 0;
+       iwl_led_cmd(priv, on, 0);
 }
-EXPORT_SYMBOL(iwl_led_associate);
 
-int iwl_led_disassociate(struct iwl_priv *priv)
+static int iwl_led_blink_set(struct led_classdev *led_cdev,
+                            unsigned long *delay_on,
+                            unsigned long *delay_off)
 {
-       priv->allow_blinking = 0;
+       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
 
-       return 0;
+       return iwl_led_cmd(priv, *delay_on, *delay_off);
 }
-EXPORT_SYMBOL(iwl_led_disassociate);
 
-/*
- * calculate blink rate according to last second Tx/Rx activities
- */
-static int iwl_get_blink_rate(struct iwl_priv *priv)
-{
-       int i;
-       /* count both tx and rx traffic to be able to
-        * handle traffic in either direction
-        */
-       u64 current_tpt = priv->tx_stats.data_bytes +
-                         priv->rx_stats.data_bytes;
-       s64 tpt = current_tpt - priv->led_tpt;
-
-       if (tpt < 0) /* wraparound */
-               tpt = -tpt;
-
-       IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
-               (long long)tpt,
-               (unsigned long long)current_tpt);
-       priv->led_tpt = current_tpt;
-
-       if (!priv->allow_blinking)
-               i = IWL_MAX_BLINK_TBL;
-       else
-               for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
-                       if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
-                               break;
-
-       IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
-       return i;
-}
-
-/*
- * this function called from handler. Since setting Led command can
- * happen very frequent we postpone led command to be called from
- * REPLY handler so we know ucode is up
- */
-void iwl_leds_background(struct iwl_priv *priv)
+void iwl_leds_init(struct iwl_priv *priv)
 {
-       u8 blink_idx;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-               priv->last_blink_time = 0;
-               return;
-       }
-       if (iwl_is_rfkill(priv)) {
-               priv->last_blink_time = 0;
-               return;
+       int mode = led_mode;
+       int ret;
+
+       if (mode == IWL_LED_DEFAULT)
+               mode = priv->cfg->led_mode;
+
+       priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+                                  wiphy_name(priv->hw->wiphy));
+       priv->led.brightness_set = iwl_led_brightness_set;
+       priv->led.blink_set = iwl_led_blink_set;
+       priv->led.max_brightness = 1;
+
+       switch (mode) {
+       case IWL_LED_DEFAULT:
+               WARN_ON(1);
+               break;
+       case IWL_LED_BLINK:
+               priv->led.default_trigger =
+                       ieee80211_create_tpt_led_trigger(priv->hw,
+                                       IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+                                       iwl_blink, ARRAY_SIZE(iwl_blink));
+               break;
+       case IWL_LED_RF_STATE:
+               priv->led.default_trigger =
+                       ieee80211_get_radio_led_name(priv->hw);
+               break;
        }
 
-       if (!priv->allow_blinking) {
-               priv->last_blink_time = 0;
-               if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
-                       priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
-                       iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
-               }
+       ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+       if (ret) {
+               kfree(priv->led.name);
                return;
        }
-       if (!priv->last_blink_time ||
-           !time_after(jiffies, priv->last_blink_time +
-                       msecs_to_jiffies(1000)))
-               return;
-
-       blink_idx = iwl_get_blink_rate(priv);
-
-       /* call only if blink rate change */
-       if (blink_idx != priv->last_blink_rate)
-               iwl_led_pattern(priv, blink_idx);
 
-       priv->last_blink_time = jiffies;
-       priv->last_blink_rate = blink_idx;
+       priv->led_registered = true;
 }
-EXPORT_SYMBOL(iwl_leds_background);
 
-void iwl_leds_init(struct iwl_priv *priv)
+void iwl_leds_exit(struct iwl_priv *priv)
 {
-       priv->last_blink_rate = 0;
-       priv->last_blink_time = 0;
-       priv->allow_blinking = 0;
-       if (led_mode != IWL_LED_DEFAULT &&
-           led_mode != priv->cfg->led_mode)
-               priv->cfg->led_mode = led_mode;
+       if (!priv->led_registered)
+               return;
+
+       led_classdev_unregister(&priv->led);
+       kfree(priv->led.name);
 }
-EXPORT_SYMBOL(iwl_leds_init);
index 9079b33486efcb95425d4711a1ff55671a6f7823..101eef12b3bba9e35f6f95172f667751d579779c 100644 (file)
 struct iwl_priv;
 
 #define IWL_LED_SOLID 11
-#define IWL_LED_NAME_LEN 31
 #define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
 
 #define IWL_LED_ACTIVITY       (0<<1)
 #define IWL_LED_LINK           (1<<1)
 
-enum led_type {
-       IWL_LED_TRG_TX,
-       IWL_LED_TRG_RX,
-       IWL_LED_TRG_ASSOC,
-       IWL_LED_TRG_RADIO,
-       IWL_LED_TRG_MAX,
-};
-
 /*
  * LED mode
- *    IWL_LED_DEFAULT:  use system default
+ *    IWL_LED_DEFAULT:  use device default
  *    IWL_LED_RF_STATE: turn LED on/off based on RF state
  *                     LED ON  = RF ON
  *                     LED OFF = RF OFF
@@ -60,9 +51,6 @@ enum iwl_led_mode {
 };
 
 void iwl_leds_init(struct iwl_priv *priv);
-void iwl_leds_background(struct iwl_priv *priv);
-int iwl_led_start(struct iwl_priv *priv);
-int iwl_led_associate(struct iwl_priv *priv);
-int iwl_led_disassociate(struct iwl_priv *priv);
+void iwl_leds_exit(struct iwl_priv *priv);
 
 #endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
deleted file mode 100644 (file)
index bb1a742..0000000
+++ /dev/null
@@ -1,662 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-legacy.h"
-
-static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (!ctx->is_active)
-               return;
-
-       ctx->qos_data.def_qos_parm.qos_flags = 0;
-
-       if (ctx->qos_data.qos_active)
-               ctx->qos_data.def_qos_parm.qos_flags |=
-                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
-       if (ctx->ht.enabled)
-               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
-       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
-                     ctx->qos_data.qos_active,
-                     ctx->qos_data.def_qos_parm.qos_flags);
-
-       iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
-                              sizeof(struct iwl_qosparam_cmd),
-                              &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct iwl_priv *priv = hw->priv;
-       const struct iwl_channel_info *ch_info;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_channel *channel = conf->channel;
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct iwl_rxon_context *ctx;
-       unsigned long flags = 0;
-       int ret = 0;
-       u16 ch;
-       int scan_active = 0;
-       bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return -EOPNOTSUPP;
-
-       mutex_lock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
-                                       channel->hw_value, changed);
-
-       if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
-                       test_bit(STATUS_SCANNING, &priv->status))) {
-               scan_active = 1;
-               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
-                      IEEE80211_CONF_CHANGE_CHANNEL)) {
-               /* mac80211 uses static for non-HT which is what we want */
-               priv->current_ht_config.smps = conf->smps_mode;
-
-               /*
-                * Recalculate chain counts.
-                *
-                * If monitor mode is enabled then mac80211 will
-                * set up the SM PS mode to OFF if an HT channel is
-                * configured.
-                */
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       for_each_context(priv, ctx)
-                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       /* during scanning mac80211 will delay channel setting until
-        * scan finish with changed = 0
-        */
-       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-               if (scan_active)
-                       goto set_ch_out;
-
-               ch = channel->hw_value;
-               ch_info = iwl_get_channel_info(priv, channel->band, ch);
-               if (!is_channel_valid(ch_info)) {
-                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
-                       ret = -EINVAL;
-                       goto set_ch_out;
-               }
-
-               spin_lock_irqsave(&priv->lock, flags);
-
-               for_each_context(priv, ctx) {
-                       /* Configure HT40 channels */
-                       if (ctx->ht.enabled != conf_is_ht(conf)) {
-                               ctx->ht.enabled = conf_is_ht(conf);
-                               ht_changed[ctx->ctxid] = true;
-                       }
-                       if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
-                       } else
-                               ctx->ht.is_40mhz = false;
-
-                       /*
-                        * Default to no protection. Protection mode will
-                        * later be set from BSS config in iwl_ht_conf
-                        */
-                       ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
-                       /* if we are switching from ht to 2.4 clear flags
-                        * from any ht related info since 2.4 does not
-                        * support ht */
-                       if ((le16_to_cpu(ctx->staging.channel) != ch))
-                               ctx->staging.flags = 0;
-
-                       iwl_set_rxon_channel(priv, channel, ctx);
-                       iwl_set_rxon_ht(priv, ht_conf);
-
-                       iwl_set_flags_for_band(priv, ctx, channel->band,
-                                              ctx->vif);
-               }
-
-               spin_unlock_irqrestore(&priv->lock, flags);
-
-               if (priv->cfg->ops->legacy->update_bcast_stations)
-                       ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
-               /* The list of supported rates and rate mask can be different
-                * for each band; since the band may have changed, reset
-                * the rate mask to what mac80211 lists */
-               iwl_set_rate(priv);
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_PS |
-                       IEEE80211_CONF_CHANGE_IDLE)) {
-               ret = iwl_power_update_mode(priv, false);
-               if (ret)
-                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
-                       priv->tx_power_user_lmt, conf->power_level);
-
-               iwl_set_tx_power(priv, conf->power_level, false);
-       }
-
-       if (!iwl_is_ready(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               goto out;
-       }
-
-       if (scan_active)
-               goto out;
-
-       for_each_context(priv, ctx) {
-               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
-                       iwlcore_commit_rxon(priv, ctx);
-               else
-                       IWL_DEBUG_INFO(priv,
-                               "Not re-sending same RXON configuration.\n");
-               if (ht_changed[ctx->ctxid])
-                       iwl_update_qos(priv, ctx);
-       }
-
-out:
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       mutex_unlock(&priv->mutex);
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       /* IBSS can only be the IWL_RXON_CTX_BSS context */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return;
-
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       spin_lock_irqsave(&priv->lock, flags);
-       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* new association get rid of ibss beacon skb */
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = NULL;
-
-       priv->timestamp = 0;
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl_scan_cancel_timeout(priv, 100);
-       if (!iwl_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               mutex_unlock(&priv->mutex);
-               return;
-       }
-
-       /* we are restarting association process
-        * clear RXON_FILTER_ASSOC_MSK bit
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwlcore_commit_rxon(priv, ctx);
-
-       iwl_set_rate(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_ht_conf(struct iwl_priv *priv,
-                       struct ieee80211_vif *vif)
-{
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct ieee80211_sta *sta;
-       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_ASSOC(priv, "enter:\n");
-
-       if (!ctx->ht.enabled)
-               return;
-
-       ctx->ht.protection =
-               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-       ctx->ht.non_gf_sta_present =
-               !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
-       ht_conf->single_chain_sufficient = false;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               rcu_read_lock();
-               sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (sta) {
-                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-                       int maxstreams;
-
-                       maxstreams = (ht_cap->mcs.tx_params &
-                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-                       maxstreams += 1;
-
-                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
-                           (ht_cap->mcs.rx_mask[2] == 0))
-                               ht_conf->single_chain_sufficient = true;
-                       if (maxstreams <= 1)
-                               ht_conf->single_chain_sufficient = true;
-               } else {
-                       /*
-                        * If at all, this can only happen through a race
-                        * when the AP disconnects us while we're still
-                        * setting up the connection, in that case mac80211
-                        * will soon tell us about that.
-                        */
-                       ht_conf->single_chain_sufficient = true;
-               }
-               rcu_read_unlock();
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               ht_conf->single_chain_sufficient = true;
-               break;
-       default:
-               break;
-       }
-
-       IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_set_no_assoc(struct iwl_priv *priv,
-                                   struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       iwl_led_disassociate(priv);
-       /*
-        * inform the ucode that there is no longer an
-        * association and that no more packets should be
-        * sent
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       ctx->staging.assoc_id = 0;
-       iwlcore_commit_rxon(priv, ctx);
-}
-
-static void iwlcore_beacon_update(struct ieee80211_hw *hw,
-                                 struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       __le64 timestamp;
-       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
-       if (!skb)
-               return;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->beacon_ctx) {
-               IWL_ERR(priv, "update beacon but no beacon context!\n");
-               dev_kfree_skb(skb);
-               return;
-       }
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = skb;
-
-       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
-       priv->timestamp = le64_to_cpu(timestamp);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (!iwl_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-               return;
-       }
-
-       priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-       int ret;
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return;
-
-       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
-       if (!iwl_is_alive(priv))
-               return;
-
-       mutex_lock(&priv->mutex);
-
-       if (changes & BSS_CHANGED_QOS) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&priv->lock, flags);
-               ctx->qos_data.qos_active = bss_conf->qos;
-               iwl_update_qos(priv, ctx);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               /*
-                * the add_interface code must make sure we only ever
-                * have a single interface that could be beaconing at
-                * any time.
-                */
-               if (vif->bss_conf.enable_beacon)
-                       priv->beacon_ctx = ctx;
-               else
-                       priv->beacon_ctx = NULL;
-       }
-
-       if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
-               dev_kfree_skb(priv->beacon_skb);
-               priv->beacon_skb = ieee80211_beacon_get(hw, vif);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
-               iwl_send_rxon_timing(priv, ctx);
-
-       if (changes & BSS_CHANGED_BSSID) {
-               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
-               /*
-                * If there is currently a HW scan going on in the
-                * background then we need to cancel it else the RXON
-                * below/in post_associate will fail.
-                */
-               if (iwl_scan_cancel_timeout(priv, 100)) {
-                       IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
-                       IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
-                       mutex_unlock(&priv->mutex);
-                       return;
-               }
-
-               /* mac80211 only sets assoc when in STATION mode */
-               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-
-                       /* currently needed in a few places */
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-               } else {
-                       ctx->staging.filter_flags &=
-                               ~RXON_FILTER_ASSOC_MSK;
-               }
-
-       }
-
-       /*
-        * This needs to be after setting the BSSID in case
-        * mac80211 decides to do both changes at once because
-        * it will invoke post_associate.
-        */
-       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
-               iwlcore_beacon_update(hw, vif);
-
-       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
-                                  bss_conf->use_short_preamble);
-               if (bss_conf->use_short_preamble)
-                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-       }
-
-       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
-               IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
-                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
-               if (bss_conf->use_cts_prot)
-                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-       }
-
-       if (changes & BSS_CHANGED_BASIC_RATES) {
-               /* XXX use this information
-                *
-                * To do that, remove code from iwl_set_rate() and put something
-                * like this here:
-                *
-               if (A-band)
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates;
-               else
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates >> 4;
-                       ctx->staging.cck_basic_rates =
-                               bss_conf->basic_rates & 0xF;
-                */
-       }
-
-       if (changes & BSS_CHANGED_HT) {
-               iwl_ht_conf(priv, vif);
-
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       if (changes & BSS_CHANGED_ASSOC) {
-               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
-               if (bss_conf->assoc) {
-                       priv->timestamp = bss_conf->timestamp;
-
-                       iwl_led_associate(priv);
-
-                       if (!iwl_is_rfkill(priv))
-                               priv->cfg->ops->legacy->post_associate(priv);
-               } else
-                       iwl_set_no_assoc(priv, vif);
-       }
-
-       if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
-               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
-                                  changes);
-               ret = iwl_send_rxon_assoc(priv, ctx);
-               if (!ret) {
-                       /* Sync active_rxon with latest change. */
-                       memcpy((void *)&ctx->active,
-                               &ctx->staging,
-                               sizeof(struct iwl_rxon_cmd));
-               }
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               if (vif->bss_conf.enable_beacon) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-                       iwl_led_associate(priv);
-                       priv->cfg->ops->legacy->config_ap(priv);
-               } else
-                       iwl_set_no_assoc(priv, vif);
-       }
-
-       if (changes & BSS_CHANGED_IBSS) {
-               ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
-                                                       bss_conf->ibss_joined);
-               if (ret)
-                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
-                               bss_conf->ibss_joined ? "add" : "remove",
-                               bss_conf->bssid);
-       }
-
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_isr_legacy(int irq, void *data)
-{
-       struct iwl_priv *priv = data;
-       u32 inta, inta_mask;
-       u32 inta_fh;
-       unsigned long flags;
-       if (!priv)
-               return IRQ_NONE;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Disable (but don't clear!) interrupts here to avoid
-        *    back-to-back ISRs and sporadic interrupts from our NIC.
-        * If we have something to service, the tasklet will re-enable ints.
-        * If we *don't* have something, we'll re-enable before leaving here. */
-       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
-       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-       /* Discover which interrupts are active/pending */
-       inta = iwl_read32(priv, CSR_INT);
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
-       /* Ignore interrupt if there's nothing in NIC to service.
-        * This may be due to IRQ shared with another device,
-        * or due to sporadic interrupts thrown from our NIC. */
-       if (!inta && !inta_fh) {
-               IWL_DEBUG_ISR(priv,
-                       "Ignore interrupt, inta == 0, inta_fh == 0\n");
-               goto none;
-       }
-
-       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-               /* Hardware disappeared. It might have already raised
-                * an interrupt */
-               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-               goto unplugged;
-       }
-
-       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                     inta, inta_mask, inta_fh);
-
-       inta &= ~CSR_INT_BIT_SCD;
-
-       /* iwl_irq_tasklet() will service interrupts and re-enable them */
-       if (likely(inta || inta_fh))
-               tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_HANDLED;
-
-none:
-       /* re-enable interrupts here since we don't have anything to service. */
-       /* only Re-enable if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_enable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_isr_legacy);
-
-/*
- *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- *  function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-                              struct ieee80211_tx_info *info,
-                              __le16 fc, __le32 *tx_flags)
-{
-       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-               *tx_flags |= TX_CMD_FLG_RTS_MSK;
-               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
-               if (!ieee80211_is_mgmt(fc))
-                       return;
-
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_AUTH):
-               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
-                       break;
-               }
-       } else if (info->control.rates[0].flags &
-                  IEEE80211_TX_RC_USE_CTS_PROTECT) {
-               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-               *tx_flags |= TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlwifi/iwl-legacy.h
deleted file mode 100644 (file)
index 9f7b2f9..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_h__
-#define __iwl_legacy_h__
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-                               struct ieee80211_tx_info *info,
-                               __le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_isr_legacy(int irq, void *data);
-
-#endif /* __iwl_legacy_h__ */
index 1eec18d909d8622680877985fbb797565f84ad6c..576795e2c75b0f978c7623c40c514851bbadf5fc 100644 (file)
@@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
        else
                cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (iwl_advanced_bt_coexist(priv)) {
                if (!priv->cfg->bt_params->bt_sco_disable)
                        cmd->flags |= IWL_POWER_BT_SCO_ENA;
                else
@@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
        else
                cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (iwl_advanced_bt_coexist(priv)) {
                if (!priv->cfg->bt_params->bt_sco_disable)
                        cmd->flags |= IWL_POWER_BT_SCO_ENA;
                else
@@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
 
        if (priv->cfg->base_params->broken_powersave)
                iwl_power_sleep_cam_cmd(priv, cmd);
-       else if (priv->cfg->base_params->supports_idle &&
-                priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+       else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
                iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
        else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
                 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
@@ -428,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_power_set_mode);
 
 int iwl_power_update_mode(struct iwl_priv *priv, bool force)
 {
@@ -437,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
        iwl_power_build_cmd(priv, &cmd);
        return iwl_power_set_mode(priv, &cmd, force);
 }
-EXPORT_SYMBOL(iwl_power_update_mode);
 
 /* initialize to default */
 void iwl_power_initialize(struct iwl_priv *priv)
@@ -451,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
        memset(&priv->power_data.sleep_cmd, 0,
                sizeof(priv->power_data.sleep_cmd));
 }
-EXPORT_SYMBOL(iwl_power_initialize);
index 87a6fd84d4d25827e7c363638fb31b3b5fbef3c8..566e2d979ce37d3767231ed5fd7b66a413d62850 100644 (file)
@@ -37,6 +37,7 @@
 #include "iwl-sta.h"
 #include "iwl-io.h"
 #include "iwl-helpers.h"
+#include "iwl-agn-calib.h"
 /************************** RX-FUNCTIONS ****************************/
 /*
  * Rx theory of operation
@@ -118,7 +119,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
                s = 0;
        return s;
 }
-EXPORT_SYMBOL(iwl_rx_queue_space);
 
 /**
  * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -170,7 +170,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
  exit_unlock:
        spin_unlock_irqrestore(&q->lock, flags);
 }
-EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
 
 int iwl_rx_queue_alloc(struct iwl_priv *priv)
 {
@@ -211,8 +210,6 @@ err_rb:
 err_bd:
        return -ENOMEM;
 }
-EXPORT_SYMBOL(iwl_rx_queue_alloc);
-
 
 void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
                                          struct iwl_rx_mem_buffer *rxb)
@@ -229,40 +226,397 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
        memcpy(&priv->measure_report, report, sizeof(*report));
        priv->measurement_status |= MEASUREMENT_READY;
 }
-EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
 
-void iwl_recover_from_statistics(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt)
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is low and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
 {
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-       if (iwl_is_any_associated(priv)) {
-               if (priv->cfg->ops->lib->check_ack_health) {
-                       if (!priv->cfg->ops->lib->check_ack_health(
-                           priv, pkt)) {
-                               /*
-                                * low ack count detected
-                                * restart Firmware
-                                */
-                               IWL_ERR(priv, "low ack count detected, "
-                                       "restart firmware\n");
-                               if (!iwl_force_reset(priv, IWL_FW_RESET, false))
-                                       return;
-                       }
+       int actual_delta, expected_delta, ba_timeout_delta;
+       struct statistics_tx *cur, *old;
+
+       if (priv->_agn.agg_tids_count)
+               return true;
+
+       if (iwl_bt_statistics(priv)) {
+               cur = &pkt->u.stats_bt.tx;
+               old = &priv->_agn.statistics_bt.tx;
+       } else {
+               cur = &pkt->u.stats.tx;
+               old = &priv->_agn.statistics.tx;
+       }
+
+       actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
+                      le32_to_cpu(old->actual_ack_cnt);
+       expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
+                        le32_to_cpu(old->expected_ack_cnt);
+
+       /* Values should not be negative, but we do not trust the firmware */
+       if (actual_delta <= 0 || expected_delta <= 0)
+               return true;
+
+       ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
+                          le32_to_cpu(old->agg.ba_timeout);
+
+       if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
+           ba_timeout_delta > BA_TIMEOUT_CNT) {
+               IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
+                               actual_delta, expected_delta, ba_timeout_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               /*
+                * This is ifdef'ed on DEBUGFS because otherwise the
+                * statistics aren't available. If DEBUGFS is set but
+                * DEBUG is not, these will just compile out.
+                */
+               IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
+                               priv->_agn.delta_statistics.tx.rx_detected_cnt);
+               IWL_DEBUG_RADIO(priv,
+                               "ack_or_ba_timeout_collision delta %d\n",
+                               priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
+#endif
+
+               if (ba_timeout_delta >= BA_TIMEOUT_MAX)
+                       return false;
+       }
+
+       return true;
+}
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl_good_plcp_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
+{
+       bool rc = true;
+       int combined_plcp_delta;
+       unsigned int plcp_msec;
+       unsigned long plcp_received_jiffies;
+
+       if (priv->cfg->base_params->plcp_delta_threshold ==
+           IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+               IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+               return rc;
+       }
+
+       /*
+        * check for plcp_err and trigger radio reset if it exceeds
+        * the plcp error threshold plcp_delta.
+        */
+       plcp_received_jiffies = jiffies;
+       plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+                                       (long) priv->plcp_jiffies);
+       priv->plcp_jiffies = plcp_received_jiffies;
+       /*
+        * check to make sure plcp_msec is not 0 to prevent division
+        * by zero.
+        */
+       if (plcp_msec) {
+               struct statistics_rx_phy *ofdm;
+               struct statistics_rx_ht_phy *ofdm_ht;
+
+               if (iwl_bt_statistics(priv)) {
+                       ofdm = &pkt->u.stats_bt.rx.ofdm;
+                       ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
+                       combined_plcp_delta =
+                          (le32_to_cpu(ofdm->plcp_err) -
+                          le32_to_cpu(priv->_agn.statistics_bt.
+                                      rx.ofdm.plcp_err)) +
+                          (le32_to_cpu(ofdm_ht->plcp_err) -
+                          le32_to_cpu(priv->_agn.statistics_bt.
+                                      rx.ofdm_ht.plcp_err));
+               } else {
+                       ofdm = &pkt->u.stats.rx.ofdm;
+                       ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
+                       combined_plcp_delta =
+                           (le32_to_cpu(ofdm->plcp_err) -
+                           le32_to_cpu(priv->_agn.statistics.
+                                       rx.ofdm.plcp_err)) +
+                           (le32_to_cpu(ofdm_ht->plcp_err) -
+                           le32_to_cpu(priv->_agn.statistics.
+                                       rx.ofdm_ht.plcp_err));
                }
-               if (priv->cfg->ops->lib->check_plcp_health) {
-                       if (!priv->cfg->ops->lib->check_plcp_health(
-                           priv, pkt)) {
-                               /*
-                                * high plcp error detected
-                                * reset Radio
-                                */
-                               iwl_force_reset(priv, IWL_RF_RESET, false);
-                       }
+
+               if ((combined_plcp_delta > 0) &&
+                   ((combined_plcp_delta * 100) / plcp_msec) >
+                       priv->cfg->base_params->plcp_delta_threshold) {
+                       /*
+                        * if plcp_err exceed the threshold,
+                        * the following data is printed in csv format:
+                        *    Text: plcp_err exceeded %d,
+                        *    Received ofdm.plcp_err,
+                        *    Current ofdm.plcp_err,
+                        *    Received ofdm_ht.plcp_err,
+                        *    Current ofdm_ht.plcp_err,
+                        *    combined_plcp_delta,
+                        *    plcp_msec
+                        */
+                       IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+                               "%u, %u, %u, %u, %d, %u mSecs\n",
+                               priv->cfg->base_params->plcp_delta_threshold,
+                               le32_to_cpu(ofdm->plcp_err),
+                               le32_to_cpu(ofdm->plcp_err),
+                               le32_to_cpu(ofdm_ht->plcp_err),
+                               le32_to_cpu(ofdm_ht->plcp_err),
+                               combined_plcp_delta, plcp_msec);
+
+                       rc = false;
                }
        }
+       return rc;
+}
+
+static void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
+{
+       const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+           !iwl_is_any_associated(priv))
+               return;
+
+       if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
+               IWL_ERR(priv, "low ack count detected, restart firmware\n");
+               if (!iwl_force_reset(priv, IWL_FW_RESET, false))
+                       return;
+       }
+
+       if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt))
+               iwl_force_reset(priv, IWL_RF_RESET, false);
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void iwl_rx_calc_noise(struct iwl_priv *priv)
+{
+       struct statistics_rx_non_phy *rx_info;
+       int num_active_rx = 0;
+       int total_silence = 0;
+       int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+       int last_rx_noise;
+
+       if (iwl_bt_statistics(priv))
+               rx_info = &(priv->_agn.statistics_bt.rx.general.common);
+       else
+               rx_info = &(priv->_agn.statistics.rx.general);
+       bcn_silence_a =
+               le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+       bcn_silence_b =
+               le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+       bcn_silence_c =
+               le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+       if (bcn_silence_a) {
+               total_silence += bcn_silence_a;
+               num_active_rx++;
+       }
+       if (bcn_silence_b) {
+               total_silence += bcn_silence_b;
+               num_active_rx++;
+       }
+       if (bcn_silence_c) {
+               total_silence += bcn_silence_c;
+               num_active_rx++;
+       }
+
+       /* Average among active antennas */
+       if (num_active_rx)
+               last_rx_noise = (total_silence / num_active_rx) - 107;
+       else
+               last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+       IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+                       bcn_silence_a, bcn_silence_b, bcn_silence_c,
+                       last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+                                       __le32 *stats)
+{
+       int i, size;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+       struct statistics_general_common *general, *accum_general;
+       struct statistics_tx *tx, *accum_tx;
+
+       if (iwl_bt_statistics(priv)) {
+               prev_stats = (__le32 *)&priv->_agn.statistics_bt;
+               accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
+               size = sizeof(struct iwl_bt_notif_statistics);
+               general = &priv->_agn.statistics_bt.general.common;
+               accum_general = &priv->_agn.accum_statistics_bt.general.common;
+               tx = &priv->_agn.statistics_bt.tx;
+               accum_tx = &priv->_agn.accum_statistics_bt.tx;
+               delta = (u32 *)&priv->_agn.delta_statistics_bt;
+               max_delta = (u32 *)&priv->_agn.max_delta_bt;
+       } else {
+               prev_stats = (__le32 *)&priv->_agn.statistics;
+               accum_stats = (u32 *)&priv->_agn.accum_statistics;
+               size = sizeof(struct iwl_notif_statistics);
+               general = &priv->_agn.statistics.general.common;
+               accum_general = &priv->_agn.accum_statistics.general.common;
+               tx = &priv->_agn.statistics.tx;
+               accum_tx = &priv->_agn.accum_statistics.tx;
+               delta = (u32 *)&priv->_agn.delta_statistics;
+               max_delta = (u32 *)&priv->_agn.max_delta;
+       }
+       for (i = sizeof(__le32); i < size;
+            i += sizeof(__le32), stats++, prev_stats++, delta++,
+            max_delta++, accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta = (le32_to_cpu(*stats) -
+                               le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
+               }
+       }
+
+       /* reset accumulative statistics for "no-counter" type statistics */
+       accum_general->temperature = general->temperature;
+       accum_general->temperature_m = general->temperature_m;
+       accum_general->ttl_timestamp = general->ttl_timestamp;
+       accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
+       accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
+       accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+       int change;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       if (iwl_bt_statistics(priv)) {
+               IWL_DEBUG_RX(priv,
+                            "Statistics notification received (%d vs %d).\n",
+                            (int)sizeof(struct iwl_bt_notif_statistics),
+                            le32_to_cpu(pkt->len_n_flags) &
+                            FH_RSCSR_FRAME_SIZE_MSK);
+
+               change = ((priv->_agn.statistics_bt.general.common.temperature !=
+                          pkt->u.stats_bt.general.common.temperature) ||
+                          ((priv->_agn.statistics_bt.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+                          (pkt->u.stats_bt.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+#endif
+
+       } else {
+               IWL_DEBUG_RX(priv,
+                            "Statistics notification received (%d vs %d).\n",
+                            (int)sizeof(struct iwl_notif_statistics),
+                            le32_to_cpu(pkt->len_n_flags) &
+                            FH_RSCSR_FRAME_SIZE_MSK);
+
+               change = ((priv->_agn.statistics.general.common.temperature !=
+                          pkt->u.stats.general.common.temperature) ||
+                          ((priv->_agn.statistics.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+                          (pkt->u.stats.flag &
+                          STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+
+       }
+
+       iwl_recover_from_statistics(priv, pkt);
+
+       if (iwl_bt_statistics(priv))
+               memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
+                       sizeof(priv->_agn.statistics_bt));
+       else
+               memcpy(&priv->_agn.statistics, &pkt->u.stats,
+                       sizeof(priv->_agn.statistics));
+
+       set_bit(STATUS_STATISTICS, &priv->status);
+
+       /* Reschedule the statistics timer to occur in
+        * REG_RECALIB_PERIOD seconds to ensure we get a
+        * thermal update even if the uCode doesn't give
+        * us one */
+       mod_timer(&priv->statistics_periodic, jiffies +
+                 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+       if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+           (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+               iwl_rx_calc_noise(priv);
+               queue_work(priv->workqueue, &priv->run_time_calib_work);
+       }
+       if (priv->cfg->ops->lib->temp_ops.temperature && change)
+               priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+void iwl_reply_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               memset(&priv->_agn.accum_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_agn.delta_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_agn.max_delta, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->_agn.accum_statistics_bt, 0,
+                       sizeof(struct iwl_bt_notif_statistics));
+               memset(&priv->_agn.delta_statistics_bt, 0,
+                       sizeof(struct iwl_bt_notif_statistics));
+               memset(&priv->_agn.max_delta_bt, 0,
+                       sizeof(struct iwl_bt_notif_statistics));
+#endif
+               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+       }
+       iwl_rx_statistics(priv, rxb);
+}
+
+void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+                               struct iwl_rx_mem_buffer *rxb)
+
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_missed_beacon_notif *missed_beacon;
+
+       missed_beacon = &pkt->u.missed_beacon;
+       if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+           priv->missed_beacon_threshold) {
+               IWL_DEBUG_CALIB(priv,
+                   "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+                   le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+                   le32_to_cpu(missed_beacon->total_missed_becons),
+                   le32_to_cpu(missed_beacon->num_recvd_beacons),
+                   le32_to_cpu(missed_beacon->num_expected_beacons));
+               if (!test_bit(STATUS_SCANNING, &priv->status))
+                       iwl_init_sensitivity(priv);
+       }
 }
-EXPORT_SYMBOL(iwl_recover_from_statistics);
 
 /*
  * returns non-zero if packet should be dropped
@@ -315,4 +669,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
        }
        return 0;
 }
-EXPORT_SYMBOL(iwl_set_decrypted_flag);
index 12d9363d0afe161774abd5c9b3fd3d95a9e31aa0..faa6d34cb6586b2ac8da23f6e1fd886ddecfe802 100644 (file)
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
        queue_work(priv->workqueue, &priv->abort_scan);
        return 0;
 }
-EXPORT_SYMBOL(iwl_scan_cancel);
 
 /**
  * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
 
        return test_bit(STATUS_SCAN_HW, &priv->status);
 }
-EXPORT_SYMBOL(iwl_scan_cancel_timeout);
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
 static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -257,8 +255,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
        queue_work(priv->workqueue, &priv->scan_completed);
 
        if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
-           priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
+           iwl_advanced_bt_coexist(priv) &&
            priv->bt_status != scan_notif->bt_status) {
                if (scan_notif->bt_status) {
                        /* BT on */
@@ -289,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
        priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
                                        iwl_rx_scan_complete_notif;
 }
-EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
 
 inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
                                     enum ieee80211_band band,
@@ -302,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
                return IWL_ACTIVE_DWELL_TIME_24 +
                        IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
 }
-EXPORT_SYMBOL(iwl_get_active_dwell_time);
 
 u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
                               enum ieee80211_band band,
@@ -334,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
 
        return passive;
 }
-EXPORT_SYMBOL(iwl_get_passive_dwell_time);
 
 void iwl_init_scan_params(struct iwl_priv *priv)
 {
@@ -344,7 +338,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
        if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
                priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
 }
-EXPORT_SYMBOL(iwl_init_scan_params);
 
 static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
                                          struct ieee80211_vif *vif,
@@ -440,7 +433,6 @@ out_unlock:
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_mac_hw_scan);
 
 /*
  * internal short scan, this function should only been called while associated.
@@ -537,7 +529,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
 
        return (u16)len;
 }
-EXPORT_SYMBOL(iwl_fill_probe_req);
 
 static void iwl_bg_abort_scan(struct work_struct *work)
 {
@@ -622,7 +613,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
        INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
        INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
 }
-EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
 
 void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
 {
@@ -636,4 +626,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
                mutex_unlock(&priv->mutex);
        }
 }
-EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
index 49493d176515b8f1a01509ac99ebb125e20482b0..bc90a12408a3e850d8a02fc8c2dc5469f2f6b50c 100644 (file)
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_send_add_sta);
 
 static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
                                   struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        return sta_id;
 
 }
-EXPORT_SYMBOL_GPL(iwl_prep_station);
 
 #define STA_WAIT_TIMEOUT (HZ/2)
 
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        *sta_id_r = sta_id;
        return ret;
 }
-EXPORT_SYMBOL(iwl_add_station_common);
 
 /**
  * iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -513,7 +510,6 @@ out_err:
        spin_unlock_irqrestore(&priv->sta_lock, flags);
        return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(iwl_remove_station);
 
 /**
  * iwl_clear_ucode_stations - clear ucode station table bits
@@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
        if (!cleared)
                IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
 }
-EXPORT_SYMBOL(iwl_clear_ucode_stations);
 
 /**
  * iwl_restore_stations() - Restore driver known stations to device
@@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        else
                IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
 }
-EXPORT_SYMBOL(iwl_restore_stations);
 
 void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
@@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                        priv->stations[sta_id].sta.sta.addr, ret);
        iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
 }
-EXPORT_SYMBOL(iwl_reprogram_ap_sta);
 
 int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
 {
@@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
 
        return WEP_INVALID_OFFSET;
 }
-EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
 
 void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
 {
@@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
        }
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 }
-EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        }
        return ret;
 }
-EXPORT_SYMBOL(iwl_send_lq_cmd);
 
 int iwl_mac_sta_remove(struct ieee80211_hw *hw,
                       struct ieee80211_vif *vif,
@@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
        mutex_unlock(&priv->mutex);
        return ret;
 }
-EXPORT_SYMBOL(iwl_mac_sta_remove);
index 073b6ce6141c274880d8b6ea78bb1181cbc5f6a0..277c9175dcf66a2dfeb36855659d2d21b79cde5a 100644 (file)
@@ -84,7 +84,23 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
        }
        txq->need_update = 0;
 }
-EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+
+/**
+ * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
+       struct iwl_queue *q = &txq->q;
+
+       if (q->n_bd == 0)
+               return;
+
+        while (q->write_ptr != q->read_ptr) {
+               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+               q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+}
 
 /**
  * iwl_tx_queue_free - Deallocate DMA queue.
@@ -97,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
 {
        struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
        struct device *dev = &priv->pci_dev->dev;
        int i;
 
-       if (q->n_bd == 0)
-               return;
-
-       /* first, empty all BD's */
-       for (; q->write_ptr != q->read_ptr;
-            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+       iwl_tx_queue_unmap(priv, txq_id);
 
        /* De-alloc array of command/tx buffers */
        for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -131,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
        /* 0-fill queue descriptor structure */
        memset(txq, 0, sizeof(*txq));
 }
-EXPORT_SYMBOL(iwl_tx_queue_free);
 
 /**
- * iwl_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
  */
-void iwl_cmd_queue_free(struct iwl_priv *priv)
+void iwl_cmd_queue_unmap(struct iwl_priv *priv)
 {
        struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
        struct iwl_queue *q = &txq->q;
-       struct device *dev = &priv->pci_dev->dev;
        int i;
        bool huge = false;
 
        if (q->n_bd == 0)
                return;
 
-       for (; q->read_ptr != q->write_ptr;
-            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+       while (q->read_ptr != q->write_ptr) {
                /* we have no way to tell if it is a huge cmd ATM */
                i = get_cmd_index(q, q->read_ptr, 0);
 
-               if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+               if (txq->meta[i].flags & CMD_SIZE_HUGE)
                        huge = true;
-                       continue;
-               }
+               else
+                       pci_unmap_single(priv->pci_dev,
+                                        dma_unmap_addr(&txq->meta[i], mapping),
+                                        dma_unmap_len(&txq->meta[i], len),
+                                        PCI_DMA_BIDIRECTIONAL);
 
-               pci_unmap_single(priv->pci_dev,
-                                dma_unmap_addr(&txq->meta[i], mapping),
-                                dma_unmap_len(&txq->meta[i], len),
-                                PCI_DMA_BIDIRECTIONAL);
+            q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
+
        if (huge) {
                i = q->n_window;
                pci_unmap_single(priv->pci_dev,
@@ -174,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
                                 dma_unmap_len(&txq->meta[i], len),
                                 PCI_DMA_BIDIRECTIONAL);
        }
+}
+
+/**
+ * iwl_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_cmd_queue_free(struct iwl_priv *priv)
+{
+       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+       struct device *dev = &priv->pci_dev->dev;
+       int i;
+
+       iwl_cmd_queue_unmap(priv);
 
        /* De-alloc array of command/tx buffers */
        for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -193,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
        /* 0-fill queue descriptor structure */
        memset(txq, 0, sizeof(*txq));
 }
-EXPORT_SYMBOL(iwl_cmd_queue_free);
 
 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
  * DMA services
@@ -233,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q)
                s = 0;
        return s;
 }
-EXPORT_SYMBOL(iwl_queue_space);
 
 
 /**
@@ -384,7 +401,6 @@ out_free_arrays:
 
        return -ENOMEM;
 }
-EXPORT_SYMBOL(iwl_tx_queue_init);
 
 void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                        int slots_num, u32 txq_id)
@@ -404,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
        /* Tell device where to find queue */
        priv->cfg->ops->lib->txq_init(priv, txq);
 }
-EXPORT_SYMBOL(iwl_tx_queue_reset);
 
 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
 
@@ -641,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        }
        meta->flags = 0;
 }
-EXPORT_SYMBOL(iwl_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
deleted file mode 100644 (file)
index 371abbf..0000000
+++ /dev/null
@@ -1,4327 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME       "iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-#include "iwl-legacy.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION        \
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION  IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT  "Copyright(c) 2003-2010 Intel Corporation"
-#define DRV_AUTHOR     "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
-       .sw_crypto = 1,
-       .restart_fw = 1,
-       /* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39  is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN      - Force MAIN antenna
- * IWL_ANTENNA_AUX       - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
-       switch (iwl3945_mod_params.antenna) {
-       case IWL_ANTENNA_DIVERSITY:
-               return 0;
-
-       case IWL_ANTENNA_MAIN:
-               if (eeprom->antenna_switch_type)
-                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
-               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
-       case IWL_ANTENNA_AUX:
-               if (eeprom->antenna_switch_type)
-                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
-       }
-
-       /* bad antenna selector value */
-       IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
-               iwl3945_mod_params.antenna);
-
-       return 0;               /* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
-                                  struct ieee80211_key_conf *keyconf,
-                                  u8 sta_id)
-{
-       unsigned long flags;
-       __le16 key_flags = 0;
-       int ret;
-
-       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
-       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
-       if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
-               key_flags |= STA_KEY_MULTICAST_MSK;
-
-       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-       keyconf->hw_key_idx = keyconf->keyidx;
-       key_flags &= ~STA_KEY_FLG_INVALID;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
-              keyconf->keylen);
-
-       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
-              keyconf->keylen);
-
-       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-                       == STA_KEY_FLG_NO_ENC)
-               priv->stations[sta_id].sta.key.key_offset =
-                                iwl_get_free_ucode_key_index(priv);
-       /* else, we are overriding an existing key => no need to allocated room
-       * in uCode. */
-
-       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-               "no space for a new key");
-
-       priv->stations[sta_id].sta.key.key_flags = key_flags;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-       IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
-       ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
-                                 struct ieee80211_key_conf *keyconf,
-                                 u8 sta_id)
-{
-       return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
-                                 struct ieee80211_key_conf *keyconf,
-                                 u8 sta_id)
-{
-       return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
-       unsigned long flags;
-       struct iwl_addsta_cmd sta_cmd;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
-       memset(&priv->stations[sta_id].sta.key, 0,
-               sizeof(struct iwl4965_keyinfo));
-       priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
-       return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
-                       struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
-       int ret = 0;
-
-       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
-       switch (keyconf->cipher) {
-       case WLAN_CIPHER_SUITE_CCMP:
-               ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
-               break;
-       case WLAN_CIPHER_SUITE_TKIP:
-               ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
-               break;
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
-               ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
-               break;
-       default:
-               IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
-                       keyconf->cipher);
-               ret = -EINVAL;
-       }
-
-       IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
-                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-                     sta_id, ret);
-
-       return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
-       int ret = -EOPNOTSUPP;
-
-       return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
-                               struct ieee80211_key_conf *key)
-{
-       if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-           key->cipher == WLAN_CIPHER_SUITE_WEP104)
-               return -EOPNOTSUPP;
-
-       IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
-       return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
-       struct list_head *element;
-
-       IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
-                      priv->frames_count);
-
-       while (!list_empty(&priv->free_frames)) {
-               element = priv->free_frames.next;
-               list_del(element);
-               kfree(list_entry(element, struct iwl3945_frame, list));
-               priv->frames_count--;
-       }
-
-       if (priv->frames_count) {
-               IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
-                           priv->frames_count);
-               priv->frames_count = 0;
-       }
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
-       struct iwl3945_frame *frame;
-       struct list_head *element;
-       if (list_empty(&priv->free_frames)) {
-               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
-               if (!frame) {
-                       IWL_ERR(priv, "Could not allocate frame!\n");
-                       return NULL;
-               }
-
-               priv->frames_count++;
-               return frame;
-       }
-
-       element = priv->free_frames.next;
-       list_del(element);
-       return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
-       memset(frame, 0, sizeof(*frame));
-       list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-                               struct ieee80211_hdr *hdr,
-                               int left)
-{
-
-       if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
-               return 0;
-
-       if (priv->beacon_skb->len > left)
-               return 0;
-
-       memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
-       return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
-       struct iwl3945_frame *frame;
-       unsigned int frame_size;
-       int rc;
-       u8 rate;
-
-       frame = iwl3945_get_free_frame(priv);
-
-       if (!frame) {
-               IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
-                         "command.\n");
-               return -ENOMEM;
-       }
-
-       rate = iwl_rate_get_lowest_plcp(priv,
-                               &priv->contexts[IWL_RXON_CTX_BSS]);
-
-       frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
-       rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
-                             &frame->u.cmd[0]);
-
-       iwl3945_free_frame(priv, frame);
-
-       return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
-       if (priv->_3945.shared_virt)
-               dma_free_coherent(&priv->pci_dev->dev,
-                                 sizeof(struct iwl3945_shared),
-                                 priv->_3945.shared_virt,
-                                 priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
-                                     struct ieee80211_tx_info *info,
-                                     struct iwl_device_cmd *cmd,
-                                     struct sk_buff *skb_frag,
-                                     int sta_id)
-{
-       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-       struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
-       tx_cmd->sec_ctl = 0;
-
-       switch (keyinfo->cipher) {
-       case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
-               IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
-               break;
-
-       case WLAN_CIPHER_SUITE_TKIP:
-               break;
-
-       case WLAN_CIPHER_SUITE_WEP104:
-               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-               /* fall through */
-       case WLAN_CIPHER_SUITE_WEP40:
-               tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
-                   (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
-               memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
-               IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
-                            "with key %d\n", info->control.hw_key->hw_key_idx);
-               break;
-
-       default:
-               IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
-               break;
-       }
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
-                                 struct iwl_device_cmd *cmd,
-                                 struct ieee80211_tx_info *info,
-                                 struct ieee80211_hdr *hdr, u8 std_id)
-{
-       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-       __le32 tx_flags = tx_cmd->tx_flags;
-       __le16 fc = hdr->frame_control;
-
-       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-               tx_flags |= TX_CMD_FLG_ACK_MSK;
-               if (ieee80211_is_mgmt(fc))
-                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-               if (ieee80211_is_probe_resp(fc) &&
-                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
-                       tx_flags |= TX_CMD_FLG_TSF_MSK;
-       } else {
-               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-       }
-
-       tx_cmd->sta_id = std_id;
-       if (ieee80211_has_morefrags(fc))
-               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
-       if (ieee80211_is_data_qos(fc)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tx_cmd->tid_tspec = qc[0] & 0xf;
-               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
-       } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-       }
-
-       priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
-
-       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
-       if (ieee80211_is_mgmt(fc)) {
-               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
-               else
-                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
-       } else {
-               tx_cmd->timeout.pm_frame_timeout = 0;
-       }
-
-       tx_cmd->driver_txop = 0;
-       tx_cmd->tx_flags = tx_flags;
-       tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct iwl3945_tx_cmd *tx_cmd;
-       struct iwl_tx_queue *txq = NULL;
-       struct iwl_queue *q = NULL;
-       struct iwl_device_cmd *out_cmd;
-       struct iwl_cmd_meta *out_meta;
-       dma_addr_t phys_addr;
-       dma_addr_t txcmd_phys;
-       int txq_id = skb_get_queue_mapping(skb);
-       u16 len, idx, hdr_len;
-       u8 id;
-       u8 unicast;
-       u8 sta_id;
-       u8 tid = 0;
-       __le16 fc;
-       u8 wait_write_ptr = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       if (iwl_is_rfkill(priv)) {
-               IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
-               goto drop_unlock;
-       }
-
-       if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
-               IWL_ERR(priv, "ERROR: No TX rate available.\n");
-               goto drop_unlock;
-       }
-
-       unicast = !is_multicast_ether_addr(hdr->addr1);
-       id = 0;
-
-       fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (ieee80211_is_auth(fc))
-               IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
-       else if (ieee80211_is_assoc_req(fc))
-               IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
-       else if (ieee80211_is_reassoc_req(fc))
-               IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       hdr_len = ieee80211_hdrlen(fc);
-
-       /* Find index into station table for destination station */
-       sta_id = iwl_sta_id_or_broadcast(
-                       priv, &priv->contexts[IWL_RXON_CTX_BSS],
-                       info->control.sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-                              hdr->addr1);
-               goto drop;
-       }
-
-       IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
-       if (ieee80211_is_data_qos(fc)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-               if (unlikely(tid >= MAX_TID_COUNT))
-                       goto drop;
-       }
-
-       /* Descriptor for chosen Tx queue */
-       txq = &priv->txq[txq_id];
-       q = &txq->q;
-
-       if ((iwl_queue_space(q) < q->high_mark))
-               goto drop;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       idx = get_cmd_index(q, q->write_ptr, 0);
-
-       /* Set up driver data for this TFD */
-       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-       txq->txb[q->write_ptr].skb = skb;
-       txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       /* Init first empty entry in queue's array of Tx/cmd buffers */
-       out_cmd = txq->cmd[idx];
-       out_meta = &txq->meta[idx];
-       tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
-       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-       memset(tx_cmd, 0, sizeof(*tx_cmd));
-
-       /*
-        * Set up the Tx-command (not MAC!) header.
-        * Store the chosen Tx queue and TFD index within the sequence field;
-        * after Tx, uCode's Tx response will return this value so driver can
-        * locate the frame within the tx queue and do post-tx processing.
-        */
-       out_cmd->hdr.cmd = REPLY_TX;
-       out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
-                               INDEX_TO_SEQ(q->write_ptr)));
-
-       /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
-       if (info->control.hw_key)
-               iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
-       /* TODO need this for burst mode later on */
-       iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
-       /* set is_hcca to 0; it probably will never be implemented */
-       iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
-       /* Total # bytes to be transmitted */
-       len = (u16)skb->len;
-       tx_cmd->len = cpu_to_le16(len);
-
-       iwl_dbg_log_tx_data_frame(priv, len, hdr);
-       iwl_update_stats(priv, true, fc, len);
-       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
-       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
-       if (!ieee80211_has_morefrags(hdr->frame_control)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
-
-       IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
-                    le16_to_cpu(out_cmd->hdr.sequence));
-       IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-       iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
-       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
-                          ieee80211_hdrlen(fc));
-
-       /*
-        * Use the first empty entry in this queue's command buffer array
-        * to contain the Tx command and MAC header concatenated together
-        * (payload data will be in another buffer).
-        * Size of this varies, due to varying MAC header length.
-        * If end is not dword aligned, we'll have 2 extra bytes at the end
-        * of the MAC header (device reads on dword boundaries).
-        * We'll tell device about this padding later.
-        */
-       len = sizeof(struct iwl3945_tx_cmd) +
-                       sizeof(struct iwl_cmd_header) + hdr_len;
-       len = (len + 3) & ~3;
-
-       /* Physical address of this Tx command's header (not MAC header!),
-        * within command buffer array. */
-       txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
-                                   len, PCI_DMA_TODEVICE);
-       /* we do not map meta data ... so we can safely access address to
-        * provide to unmap command*/
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, len);
-
-       /* Add buffer containing Tx command and MAC(!) header to TFD's
-        * first entry */
-       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                  txcmd_phys, len, 1, 0);
-
-
-       /* Set up TFD's 2nd entry to point directly to remainder of skb,
-        * if any (802.11 null frames have no payload). */
-       len = skb->len - hdr_len;
-       if (len) {
-               phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
-                                          len, PCI_DMA_TODEVICE);
-               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                          phys_addr, len,
-                                                          0, U32_PAD(len));
-       }
-
-
-       /* Tell device the write index *just past* this latest filled TFD */
-       q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_txq_update_write_ptr(priv, txq);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if ((iwl_queue_space(q) < q->high_mark)
-           && priv->mac80211_registered) {
-               if (wait_write_ptr) {
-                       spin_lock_irqsave(&priv->lock, flags);
-                       txq->need_update = 1;
-                       iwl_txq_update_write_ptr(priv, txq);
-                       spin_unlock_irqrestore(&priv->lock, flags);
-               }
-
-               iwl_stop_queue(priv, txq);
-       }
-
-       return 0;
-
-drop_unlock:
-       spin_unlock_irqrestore(&priv->lock, flags);
-drop:
-       return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
-                              struct ieee80211_measurement_params *params,
-                              u8 type)
-{
-       struct iwl_spectrum_cmd spectrum;
-       struct iwl_rx_packet *pkt;
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
-               .data = (void *)&spectrum,
-               .flags = CMD_WANT_SKB,
-       };
-       u32 add_time = le64_to_cpu(params->start_time);
-       int rc;
-       int spectrum_resp_status;
-       int duration = le16_to_cpu(params->duration);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
-               add_time = iwl_usecs_to_beacons(priv,
-                       le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
-                       le16_to_cpu(ctx->timing.beacon_interval));
-
-       memset(&spectrum, 0, sizeof(spectrum));
-
-       spectrum.channel_count = cpu_to_le16(1);
-       spectrum.flags =
-           RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
-       spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
-       cmd.len = sizeof(spectrum);
-       spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
-               spectrum.start_time =
-                       iwl_add_beacon_time(priv,
-                               priv->_3945.last_beacon_time, add_time,
-                               le16_to_cpu(ctx->timing.beacon_interval));
-       else
-               spectrum.start_time = 0;
-
-       spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
-       spectrum.channels[0].channel = params->channel;
-       spectrum.channels[0].type = type;
-       if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
-               spectrum.flags |= RXON_FLG_BAND_24G_MSK |
-                   RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
-       rc = iwl_send_cmd_sync(priv, &cmd);
-       if (rc)
-               return rc;
-
-       pkt = (struct iwl_rx_packet *)cmd.reply_page;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
-               rc = -EIO;
-       }
-
-       spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
-       switch (spectrum_resp_status) {
-       case 0:         /* Command will be handled */
-               if (pkt->u.spectrum.id != 0xff) {
-                       IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
-                                               pkt->u.spectrum.id);
-                       priv->measurement_status &= ~MEASUREMENT_READY;
-               }
-               priv->measurement_status |= MEASUREMENT_ACTIVE;
-               rc = 0;
-               break;
-
-       case 1:         /* Command will not be handled */
-               rc = -EAGAIN;
-               break;
-       }
-
-       iwl_free_pages(priv, cmd.reply_page);
-
-       return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
-                              struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_alive_resp *palive;
-       struct delayed_work *pwork;
-
-       palive = &pkt->u.alive_frame;
-
-       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
-                      "0x%01X 0x%01X\n",
-                      palive->is_valid, palive->ver_type,
-                      palive->ver_subtype);
-
-       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
-               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-               memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
-                      sizeof(struct iwl_alive_resp));
-               pwork = &priv->init_alive_start;
-       } else {
-               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-               memcpy(&priv->card_alive, &pkt->u.alive_frame,
-                      sizeof(struct iwl_alive_resp));
-               pwork = &priv->alive_start;
-               iwl3945_disable_events(priv);
-       }
-
-       /* We delay the ALIVE response by 5ms to
-        * give the HW RF Kill time to activate... */
-       if (palive->is_valid == UCODE_VALID_OK)
-               queue_delayed_work(priv->workqueue, pwork,
-                                  msecs_to_jiffies(5));
-       else
-               IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
-                                struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
-       IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_bg_beacon_update(struct work_struct *work)
-{
-       struct iwl_priv *priv =
-               container_of(work, struct iwl_priv, beacon_update);
-       struct sk_buff *beacon;
-
-       /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
-       beacon = ieee80211_beacon_get(priv->hw,
-                       priv->contexts[IWL_RXON_CTX_BSS].vif);
-
-       if (!beacon) {
-               IWL_ERR(priv, "update beacon failed\n");
-               return;
-       }
-
-       mutex_lock(&priv->mutex);
-       /* new beacon skb is allocated every time; dispose previous.*/
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = beacon;
-       mutex_unlock(&priv->mutex);
-
-       iwl3945_send_beacon_cmd(priv);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_DEBUG
-       u8 rate = beacon->beacon_notify_hdr.rate;
-
-       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-               "tsf %d %d rate %d\n",
-               le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
-               beacon->beacon_notify_hdr.failure_frame,
-               le32_to_cpu(beacon->ibss_mgr_status),
-               le32_to_cpu(beacon->high_tsf),
-               le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
-       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-       if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
-           (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
-               queue_work(priv->workqueue, &priv->beacon_update);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
-       unsigned long status = priv->status;
-
-       IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
-                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
-                         (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-       if (flags & HW_CARD_DISABLED)
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
-       iwl_scan_cancel(priv);
-
-       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
-            test_bit(STATUS_RF_KILL_HW, &priv->status)))
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-                               test_bit(STATUS_RF_KILL_HW, &priv->status));
-       else
-               wake_up_interruptible(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
-       priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
-       priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
-       priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
-       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
-       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-                       iwl_rx_spectrum_measure_notif;
-       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
-       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-           iwl_rx_pm_debug_statistics_notif;
-       priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
-       /*
-        * The same handler is used for both the REPLY to a discrete
-        * statistics request from the host as well as for the periodic
-        * statistics notifications (after received beacons) from the uCode.
-        */
-       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
-       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
-       iwl_setup_rx_scan_handlers(priv);
-       priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
-       /* Set up hardware specific Rx handlers */
-       iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt.  The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
- *   were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
- *                            iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx()         Detach iwl_rx_mem_buffers from pool up to the
- *                            READ INDEX, detaching the SKB from the pool.
- *                            Moves the packet buffer from queue to rx_used.
- *                            Calls iwl3945_rx_queue_restock to refill any empty
- *                            slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
-                                         dma_addr_t dma_addr)
-{
-       return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct list_head *element;
-       struct iwl_rx_mem_buffer *rxb;
-       unsigned long flags;
-       int write;
-
-       spin_lock_irqsave(&rxq->lock, flags);
-       write = rxq->write & ~0x7;
-       while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
-               /* Get next free Rx buffer, remove from free list */
-               element = rxq->rx_free.next;
-               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-               list_del(element);
-
-               /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
-               rxq->queue[rxq->write] = rxb;
-               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
-               rxq->free_count--;
-       }
-       spin_unlock_irqrestore(&rxq->lock, flags);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               queue_work(priv->workqueue, &priv->rx_replenish);
-
-
-       /* If we've added more space for the firmware to place data, tell it.
-        * Increment device's write pointer in multiples of 8. */
-       if ((rxq->write_actual != (rxq->write & ~0x7))
-           || (abs(rxq->write - rxq->read) > 7)) {
-               spin_lock_irqsave(&rxq->lock, flags);
-               rxq->need_update = 1;
-               spin_unlock_irqrestore(&rxq->lock, flags);
-               iwl_rx_queue_update_write_ptr(priv, rxq);
-       }
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct list_head *element;
-       struct iwl_rx_mem_buffer *rxb;
-       struct page *page;
-       unsigned long flags;
-       gfp_t gfp_mask = priority;
-
-       while (1) {
-               spin_lock_irqsave(&rxq->lock, flags);
-
-               if (list_empty(&rxq->rx_used)) {
-                       spin_unlock_irqrestore(&rxq->lock, flags);
-                       return;
-               }
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (priv->hw_params.rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
-               /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
-                       break;
-               }
-
-               spin_lock_irqsave(&rxq->lock, flags);
-               if (list_empty(&rxq->rx_used)) {
-                       spin_unlock_irqrestore(&rxq->lock, flags);
-                       __free_pages(page, priv->hw_params.rx_page_order);
-                       return;
-               }
-               element = rxq->rx_used.next;
-               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-               list_del(element);
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               rxb->page = page;
-               /* Get physical address of RB/SKB */
-               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-
-               spin_lock_irqsave(&rxq->lock, flags);
-
-               list_add_tail(&rxb->list, &rxq->rx_free);
-               rxq->free_count++;
-               priv->alloc_rxb_page++;
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-       }
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       unsigned long flags;
-       int i;
-       spin_lock_irqsave(&rxq->lock, flags);
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
-       /* Fill the rx_used queue with _all_ of the Rx buffers */
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
-               /* In the reset function, these buffers may have been allocated
-                * to an SKB, so we need to unmap and free potential storage */
-               if (rxq->pool[i].page != NULL) {
-                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       __iwl_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-       }
-
-       /* Set us so that we have processed and used all buffers, but have
-        * not restocked the Rx queue with fresh buffers */
-       rxq->read = rxq->write = 0;
-       rxq->write_actual = 0;
-       rxq->free_count = 0;
-       spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
-       struct iwl_priv *priv = data;
-       unsigned long flags;
-
-       iwl3945_rx_allocate(priv, GFP_KERNEL);
-
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl3945_rx_queue_restock(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
-       iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
-       iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       int i;
-       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-               if (rxq->pool[i].page != NULL) {
-                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       __iwl_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-       }
-
-       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-                         rxq->bd_dma);
-       dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
-                         rxq->rb_stts, rxq->rb_stts_dma);
-       rxq->bd = NULL;
-       rxq->rb_stts  = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/*      0   1   2   3   4   5   6   7   8   9 */
-        0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
-       20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
-       26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
-       29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
-       32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
-       34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
-       36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
-       37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
-       38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
-       39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- *   (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
-       /* 1000:1 or higher just report as 60 dB */
-       if (sig_ratio >= 1000)
-               return 60;
-
-       /* 100:1 or higher, divide by 10 and use table,
-        *   add 20 dB to make up for divide by 10 */
-       if (sig_ratio >= 100)
-               return 20 + (int)ratio2dB[sig_ratio/10];
-
-       /* We shouldn't see this */
-       if (sig_ratio < 1)
-               return 0;
-
-       /* Use table for ratios 1:1 - 99:1 */
-       return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
-       struct iwl_rx_mem_buffer *rxb;
-       struct iwl_rx_packet *pkt;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       u32 r, i;
-       int reclaim;
-       unsigned long flags;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty = 0;
-
-       /* uCode's read index (stored in shared DRAM) indicates the last Rx
-        * buffer that the driver may process (last buffer filled by ucode). */
-       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
-       i = rxq->read;
-
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-       /* Rx interrupt, but nothing sent from uCode */
-       if (i == r)
-               IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
-       while (i != r) {
-               int len;
-
-               rxb = rxq->queue[i];
-
-               /* If an RXB doesn't have a Rx queue slot associated with it,
-                * then a bug has been introduced in the queue refilling
-                * routines -- catch it here */
-               BUG_ON(rxb == NULL);
-
-               rxq->queue[i] = NULL;
-
-               pci_unmap_page(priv->pci_dev, rxb->page_dma,
-                              PAGE_SIZE << priv->hw_params.rx_page_order,
-                              PCI_DMA_FROMDEVICE);
-               pkt = rxb_addr(rxb);
-
-               len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-               len += sizeof(u32); /* account for status word */
-               trace_iwlwifi_dev_rx(priv, pkt, len);
-
-               /* Reclaim a command buffer only if this packet is a response
-                *   to a (driver-originated) command.
-                * If the packet (e.g. Rx frame) originated from uCode,
-                *   there is no command buffer to reclaim.
-                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-                *   but apparently a few don't get set; catch them here. */
-               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-                       (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
-                       (pkt->hdr.cmd != REPLY_TX);
-
-               /* Based on type of command response or notification,
-                *   handle those that need handling via function in
-                *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
-               if (priv->rx_handlers[pkt->hdr.cmd]) {
-                       IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
-                               get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-                       priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
-                       priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
-               } else {
-                       /* No handling needed */
-                       IWL_DEBUG_RX(priv,
-                               "r %d i %d No handler needed for %s, 0x%02x\n",
-                               r, i, get_cmd_string(pkt->hdr.cmd),
-                               pkt->hdr.cmd);
-               }
-
-               /*
-                * XXX: After here, we should always check rxb->page
-                * against NULL before touching it or its virtual
-                * memory (pkt). Because some rx_handler might have
-                * already taken or freed the pages.
-                */
-
-               if (reclaim) {
-                       /* Invoke any callbacks, transfer the buffer to caller,
-                        * and fire off the (possibly) blocking iwl_send_cmd()
-                        * as we reclaim the driver command queue */
-                       if (rxb->page)
-                               iwl_tx_cmd_complete(priv, rxb);
-                       else
-                               IWL_WARN(priv, "Claim null rxb?\n");
-               }
-
-               /* Reuse the page if possible. For notification packets and
-                * SKBs that fail to Rx correctly, add them back into the
-                * rx_free list for reuse later. */
-               spin_lock_irqsave(&rxq->lock, flags);
-               if (rxb->page != NULL) {
-                       rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
-                               0, PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       list_add_tail(&rxb->list, &rxq->rx_free);
-                       rxq->free_count++;
-               } else
-                       list_add_tail(&rxb->list, &rxq->rx_used);
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode won't assert. */
-               if (fill_rx) {
-                       count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               iwl3945_rx_replenish_now(priv);
-                               count = 0;
-                       }
-               }
-       }
-
-       /* Backtrack one entry */
-       rxq->read = i;
-       if (fill_rx)
-               iwl3945_rx_replenish_now(priv);
-       else
-               iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl_synchronize_irq(struct iwl_priv *priv)
-{
-       /* wait to make sure we flush pending tasklet*/
-       synchronize_irq(priv->pci_dev->irq);
-       tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *desc_lookup(int i)
-{
-       switch (i) {
-       case 1:
-               return "FAIL";
-       case 2:
-               return "BAD_PARAM";
-       case 3:
-               return "BAD_CHECKSUM";
-       case 4:
-               return "NMI_INTERRUPT";
-       case 5:
-               return "SYSASSERT";
-       case 6:
-               return "FATAL_ERROR";
-       }
-
-       return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
-       u32 i;
-       u32 desc, time, count, base, data1;
-       u32 blink1, blink2, ilink1, ilink2;
-
-       base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
-       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
-               return;
-       }
-
-
-       count = iwl_read_targ_mem(priv, base);
-
-       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
-               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
-               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
-                       priv->status, count);
-       }
-
-       IWL_ERR(priv, "Desc       Time       asrtPC  blink2 "
-                 "ilink1  nmiPC   Line\n");
-       for (i = ERROR_START_OFFSET;
-            i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
-            i += ERROR_ELEM_SIZE) {
-               desc = iwl_read_targ_mem(priv, base + i);
-               time =
-                   iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
-               blink1 =
-                   iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
-               blink2 =
-                   iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
-               ilink1 =
-                   iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
-               ilink2 =
-                   iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
-               data1 =
-                   iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
-               IWL_ERR(priv,
-                       "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
-                       desc_lookup(desc), desc, time, blink1, blink2,
-                       ilink1, ilink2, data1);
-               trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
-                                       0, blink1, blink2, ilink1, ilink2);
-       }
-}
-
-#define EVENT_START_OFFSET  (6 * sizeof(u32))
-
-/**
- * iwl3945_print_event_log - Dump error event log to syslog
- *
- */
-static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
-                                 u32 num_events, u32 mode,
-                                 int pos, char **buf, size_t bufsz)
-{
-       u32 i;
-       u32 base;       /* SRAM byte address of event log header */
-       u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
-       u32 ptr;        /* SRAM byte address of log data */
-       u32 ev, time, data; /* event log data */
-       unsigned long reg_flags;
-
-       if (num_events == 0)
-               return pos;
-
-       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-
-       if (mode == 0)
-               event_size = 2 * sizeof(u32);
-       else
-               event_size = 3 * sizeof(u32);
-
-       ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
-       /* Make sure device is powered up for SRAM reads */
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-
-       /* Set starting address; reads will auto-increment */
-       _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
-       rmb();
-
-       /* "time" is actually "data" for mode 0 (no timestamp).
-        * place event id # at far right for easier visual parsing. */
-       for (i = 0; i < num_events; i++) {
-               ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (mode == 0) {
-                       /* data, ev */
-                       if (bufsz) {
-                               pos += scnprintf(*buf + pos, bufsz - pos,
-                                               "0x%08x:%04u\n",
-                                               time, ev);
-                       } else {
-                               IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
-                               trace_iwlwifi_dev_ucode_event(priv, 0,
-                                                             time, ev);
-                       }
-               } else {
-                       data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-                       if (bufsz) {
-                               pos += scnprintf(*buf + pos, bufsz - pos,
-                                               "%010u:0x%08x:%04u\n",
-                                                time, data, ev);
-                       } else {
-                               IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
-                                       time, data, ev);
-                               trace_iwlwifi_dev_ucode_event(priv, time,
-                                                             data, ev);
-                       }
-               }
-       }
-
-       /* Allow device to power down */
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-       return pos;
-}
-
-/**
- * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
- */
-static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
-                                     u32 num_wraps, u32 next_entry,
-                                     u32 size, u32 mode,
-                                     int pos, char **buf, size_t bufsz)
-{
-       /*
-        * display the newest DEFAULT_LOG_ENTRIES entries
-        * i.e the entries just before the next ont that uCode would fill.
-        */
-       if (num_wraps) {
-               if (next_entry < size) {
-                       pos = iwl3945_print_event_log(priv,
-                                            capacity - (size - next_entry),
-                                            size - next_entry, mode,
-                                            pos, buf, bufsz);
-                       pos = iwl3945_print_event_log(priv, 0,
-                                                     next_entry, mode,
-                                                     pos, buf, bufsz);
-               } else
-                       pos = iwl3945_print_event_log(priv, next_entry - size,
-                                                     size, mode,
-                                                     pos, buf, bufsz);
-       } else {
-               if (next_entry < size)
-                       pos = iwl3945_print_event_log(priv, 0,
-                                                     next_entry, mode,
-                                                     pos, buf, bufsz);
-               else
-                       pos = iwl3945_print_event_log(priv, next_entry - size,
-                                                     size, mode,
-                                                     pos, buf, bufsz);
-       }
-       return pos;
-}
-
-#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
-
-int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
-                           char **buf, bool display)
-{
-       u32 base;       /* SRAM byte address of event log header */
-       u32 capacity;   /* event log capacity in # entries */
-       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
-       u32 num_wraps;  /* # times uCode wrapped to top of log */
-       u32 next_entry; /* index of next entry to be written by uCode */
-       u32 size;       /* # entries that we'll print */
-       int pos = 0;
-       size_t bufsz = 0;
-
-       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
-               return  -EINVAL;
-       }
-
-       /* event log header */
-       capacity = iwl_read_targ_mem(priv, base);
-       mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
-       num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
-       next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
-       if (capacity > priv->cfg->base_params->max_event_log_size) {
-               IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
-                       capacity, priv->cfg->base_params->max_event_log_size);
-               capacity = priv->cfg->base_params->max_event_log_size;
-       }
-
-       if (next_entry > priv->cfg->base_params->max_event_log_size) {
-               IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
-                       next_entry, priv->cfg->base_params->max_event_log_size);
-               next_entry = priv->cfg->base_params->max_event_log_size;
-       }
-
-       size = num_wraps ? capacity : next_entry;
-
-       /* bail out if nothing in log */
-       if (size == 0) {
-               IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
-               return pos;
-       }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
-               size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
-                       ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
-#else
-       size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
-               ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
-
-       IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
-                 size);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (display) {
-               if (full_log)
-                       bufsz = capacity * 48;
-               else
-                       bufsz = size * 48;
-               *buf = kmalloc(bufsz, GFP_KERNEL);
-               if (!*buf)
-                       return -ENOMEM;
-       }
-       if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
-               /* if uCode has wrapped back to top of log,
-                * start at the oldest entry,
-                * i.e the next one that uCode would fill.
-                */
-               if (num_wraps)
-                       pos = iwl3945_print_event_log(priv, next_entry,
-                                               capacity - next_entry, mode,
-                                               pos, buf, bufsz);
-
-               /* (then/else) start at top of log */
-               pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
-                                             pos, buf, bufsz);
-       } else
-               pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
-                                                   next_entry, size, mode,
-                                                   pos, buf, bufsz);
-#else
-       pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
-                                           next_entry, size, mode,
-                                           pos, buf, bufsz);
-#endif
-       return pos;
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
-       u32 inta, handled = 0;
-       u32 inta_fh;
-       unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       u32 inta_mask;
-#endif
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Ack/clear/reset pending uCode interrupts.
-        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
-        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
-       inta = iwl_read32(priv, CSR_INT);
-       iwl_write32(priv, CSR_INT, inta);
-
-       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
-        * Any new interrupts that happen after this, either while we're
-        * in this tasklet, or later, will show up in next ISR/tasklet. */
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-       iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
-               /* just for debug */
-               inta_mask = iwl_read32(priv, CSR_INT_MASK);
-               IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                             inta, inta_mask, inta_fh);
-       }
-#endif
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
-        * atomic, make sure that inta covers all the interrupts that
-        * we've discovered, even if FH interrupt came in just after
-        * reading CSR_INT. */
-       if (inta_fh & CSR39_FH_INT_RX_MASK)
-               inta |= CSR_INT_BIT_FH_RX;
-       if (inta_fh & CSR39_FH_INT_TX_MASK)
-               inta |= CSR_INT_BIT_FH_TX;
-
-       /* Now service all interrupt bits discovered above. */
-       if (inta & CSR_INT_BIT_HW_ERR) {
-               IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
-
-               /* Tell the device to stop sending interrupts */
-               iwl_disable_interrupts(priv);
-
-               priv->isr_stats.hw++;
-               iwl_irq_handle_error(priv);
-
-               handled |= CSR_INT_BIT_HW_ERR;
-
-               return;
-       }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
-               /* NIC fires this, but we don't use it, redundant with WAKEUP */
-               if (inta & CSR_INT_BIT_SCD) {
-                       IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
-                                     "the frame/frames.\n");
-                       priv->isr_stats.sch++;
-               }
-
-               /* Alive notification via Rx interrupt will do the real work */
-               if (inta & CSR_INT_BIT_ALIVE) {
-                       IWL_DEBUG_ISR(priv, "Alive interrupt\n");
-                       priv->isr_stats.alive++;
-               }
-       }
-#endif
-       /* Safely ignore these bits for debug checks below */
-       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
-       /* Error detected by uCode */
-       if (inta & CSR_INT_BIT_SW_ERR) {
-               IWL_ERR(priv, "Microcode SW error detected. "
-                       "Restarting 0x%X.\n", inta);
-               priv->isr_stats.sw++;
-               iwl_irq_handle_error(priv);
-               handled |= CSR_INT_BIT_SW_ERR;
-       }
-
-       /* uCode wakes up after power-down sleep */
-       if (inta & CSR_INT_BIT_WAKEUP) {
-               IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
-               iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
-               iwl_txq_update_write_ptr(priv, &priv->txq[0]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[1]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[2]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[3]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[4]);
-               iwl_txq_update_write_ptr(priv, &priv->txq[5]);
-
-               priv->isr_stats.wakeup++;
-               handled |= CSR_INT_BIT_WAKEUP;
-       }
-
-       /* All uCode command responses, including Tx command responses,
-        * Rx "responses" (frame-received notification), and other
-        * notifications from uCode come through here*/
-       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-               iwl3945_rx_handle(priv);
-               priv->isr_stats.rx++;
-               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
-       }
-
-       if (inta & CSR_INT_BIT_FH_TX) {
-               IWL_DEBUG_ISR(priv, "Tx interrupt\n");
-               priv->isr_stats.tx++;
-
-               iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
-               iwl_write_direct32(priv, FH39_TCSR_CREDIT
-                                       (FH39_SRVC_CHNL), 0x0);
-               handled |= CSR_INT_BIT_FH_TX;
-       }
-
-       if (inta & ~handled) {
-               IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
-               priv->isr_stats.unhandled++;
-       }
-
-       if (inta & ~priv->inta_mask) {
-               IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
-                        inta & ~priv->inta_mask);
-               IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
-       }
-
-       /* Re-enable all interrupts */
-       /* only Re-enable if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
-               inta = iwl_read32(priv, CSR_INT);
-               inta_mask = iwl_read32(priv, CSR_INT_MASK);
-               inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-               IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
-                       "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
-       }
-#endif
-}
-
-static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
-                                              struct ieee80211_vif *vif,
-                                              enum ieee80211_band band,
-                                              struct iwl3945_scan_channel *scan_ch)
-{
-       const struct ieee80211_supported_band *sband;
-       u16 passive_dwell = 0;
-       u16 active_dwell = 0;
-       int added = 0;
-       u8 channel = 0;
-
-       sband = iwl_get_hw_mode(priv, band);
-       if (!sband) {
-               IWL_ERR(priv, "invalid band\n");
-               return added;
-       }
-
-       active_dwell = iwl_get_active_dwell_time(priv, band, 0);
-       passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
-
-       if (passive_dwell <= active_dwell)
-               passive_dwell = active_dwell + 1;
-
-
-       channel = iwl_get_single_channel_number(priv, band);
-
-       if (channel) {
-               scan_ch->channel = channel;
-               scan_ch->type = 0;      /* passive */
-               scan_ch->active_dwell = cpu_to_le16(active_dwell);
-               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-               /* Set txpower levels to defaults */
-               scan_ch->tpc.dsp_atten = 110;
-               if (band == IEEE80211_BAND_5GHZ)
-                       scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
-               else
-                       scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
-               added++;
-       } else
-               IWL_ERR(priv, "no valid channel found\n");
-       return added;
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
-                                        enum ieee80211_band band,
-                                    u8 is_active, u8 n_probes,
-                                    struct iwl3945_scan_channel *scan_ch,
-                                    struct ieee80211_vif *vif)
-{
-       struct ieee80211_channel *chan;
-       const struct ieee80211_supported_band *sband;
-       const struct iwl_channel_info *ch_info;
-       u16 passive_dwell = 0;
-       u16 active_dwell = 0;
-       int added, i;
-
-       sband = iwl_get_hw_mode(priv, band);
-       if (!sband)
-               return 0;
-
-       active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
-       passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
-
-       if (passive_dwell <= active_dwell)
-               passive_dwell = active_dwell + 1;
-
-       for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
-               chan = priv->scan_request->channels[i];
-
-               if (chan->band != band)
-                       continue;
-
-               scan_ch->channel = chan->hw_value;
-
-               ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
-               if (!is_channel_valid(ch_info)) {
-                       IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
-                                      scan_ch->channel);
-                       continue;
-               }
-
-               scan_ch->active_dwell = cpu_to_le16(active_dwell);
-               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-               /* If passive , set up for auto-switch
-                *  and use long active_dwell time.
-                */
-               if (!is_active || is_channel_passive(ch_info) ||
-                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
-                       scan_ch->type = 0;      /* passive */
-                       if (IWL_UCODE_API(priv->ucode_ver) == 1)
-                               scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
-               } else {
-                       scan_ch->type = 1;      /* active */
-               }
-
-               /* Set direct probe bits. These may be used both for active
-                * scan channels (probes gets sent right away),
-                * or for passive channels (probes get se sent only after
-                * hearing clear Rx packet).*/
-               if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
-                       if (n_probes)
-                               scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
-               } else {
-                       /* uCode v1 does not allow setting direct probe bits on
-                        * passive channel. */
-                       if ((scan_ch->type & 1) && n_probes)
-                               scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
-               }
-
-               /* Set txpower levels to defaults */
-               scan_ch->tpc.dsp_atten = 110;
-               /* scan_pwr_info->tpc.dsp_atten; */
-
-               /*scan_pwr_info->tpc.tx_gain; */
-               if (band == IEEE80211_BAND_5GHZ)
-                       scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
-               else {
-                       scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
-                       /* NOTE: if we were doing 6Mb OFDM for scans we'd use
-                        * power level:
-                        * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
-                        */
-               }
-
-               IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
-                              scan_ch->channel,
-                              (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
-                              (scan_ch->type & 1) ?
-                              active_dwell : passive_dwell);
-
-               scan_ch++;
-               added++;
-       }
-
-       IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
-       return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
-                             struct ieee80211_rate *rates)
-{
-       int i;
-
-       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-               rates[i].bitrate = iwl3945_rates[i].ieee * 5;
-               rates[i].hw_value = i; /* Rate scaling will work on indexes */
-               rates[i].hw_value_short = i;
-               rates[i].flags = 0;
-               if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
-                       /*
-                        * If CCK != 1M then set short preamble rate flag.
-                        */
-                       rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
-                               0 : IEEE80211_RATE_SHORT_PREAMBLE;
-               }
-       }
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-       iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- *     looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-       u32 val;
-       u32 save_len = len;
-       int rc = 0;
-       u32 errcnt;
-
-       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-       iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-                              IWL39_RTC_INST_LOWER_BOUND);
-
-       errcnt = 0;
-       for (; len > 0; len -= sizeof(u32), image++) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "uCode INST section is invalid at "
-                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
-                                 save_len - len, val, le32_to_cpu(*image));
-                       rc = -EIO;
-                       errcnt++;
-                       if (errcnt >= 20)
-                               break;
-               }
-       }
-
-
-       if (!errcnt)
-               IWL_DEBUG_INFO(priv,
-                       "ucode image in INSTRUCTION memory is good\n");
-
-       return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-       u32 val;
-       int rc = 0;
-       u32 errcnt = 0;
-       u32 i;
-
-       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-                       i + IWL39_RTC_INST_LOWER_BOUND);
-               val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
-                       IWL_ERR(priv, "uCode INST section is invalid at "
-                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
-                                 i, val, *image);
-#endif
-                       rc = -EIO;
-                       errcnt++;
-                       if (errcnt >= 3)
-                               break;
-               }
-       }
-
-       return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
-       __le32 *image;
-       u32 len;
-       int rc = 0;
-
-       /* Try bootstrap */
-       image = (__le32 *)priv->ucode_boot.v_addr;
-       len = priv->ucode_boot.len;
-       rc = iwl3945_verify_inst_sparse(priv, image, len);
-       if (rc == 0) {
-               IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       /* Try initialize */
-       image = (__le32 *)priv->ucode_init.v_addr;
-       len = priv->ucode_init.len;
-       rc = iwl3945_verify_inst_sparse(priv, image, len);
-       if (rc == 0) {
-               IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       /* Try runtime/protocol */
-       image = (__le32 *)priv->ucode_code.v_addr;
-       len = priv->ucode_code.len;
-       rc = iwl3945_verify_inst_sparse(priv, image, len);
-       if (rc == 0) {
-               IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
-       /* Since nothing seems to match, show first several data entries in
-        * instruction SRAM, so maybe visual inspection will give a clue.
-        * Selection of bootstrap image (vs. other images) is arbitrary. */
-       image = (__le32 *)priv->ucode_boot.v_addr;
-       len = priv->ucode_boot.len;
-       rc = iwl3945_verify_inst_full(priv, image, len);
-
-       return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
-       /* Remove all resets to allow NIC to operate */
-       iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item)                                                \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{                                                                      \
-       return le32_to_cpu(ucode->u.v1.item);                           \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
-       return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
-       return (u8 *) ucode->u.v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
-       const struct iwl_ucode_header *ucode;
-       int ret = -EINVAL, index;
-       const struct firmware *ucode_raw;
-       /* firmware file name contains uCode/driver compatibility version */
-       const char *name_pre = priv->cfg->fw_name_pre;
-       const unsigned int api_max = priv->cfg->ucode_api_max;
-       const unsigned int api_min = priv->cfg->ucode_api_min;
-       char buf[25];
-       u8 *src;
-       size_t len;
-       u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
-       /* Ask kernel firmware_class module to get the boot firmware off disk.
-        * request_firmware() is synchronous, file is in memory on return. */
-       for (index = api_max; index >= api_min; index--) {
-               sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
-               ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
-               if (ret < 0) {
-                       IWL_ERR(priv, "%s firmware file req failed: %d\n",
-                                 buf, ret);
-                       if (ret == -ENOENT)
-                               continue;
-                       else
-                               goto error;
-               } else {
-                       if (index < api_max)
-                               IWL_ERR(priv, "Loaded firmware %s, "
-                                       "which is deprecated. "
-                                       " Please use API v%u instead.\n",
-                                         buf, api_max);
-                       IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
-                                      "(%zd bytes) from disk\n",
-                                      buf, ucode_raw->size);
-                       break;
-               }
-       }
-
-       if (ret < 0)
-               goto error;
-
-       /* Make sure that we got at least our header! */
-       if (ucode_raw->size <  iwl3945_ucode_get_header_size(1)) {
-               IWL_ERR(priv, "File size way too small!\n");
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       /* Data from ucode file:  header followed by uCode images */
-       ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
-       priv->ucode_ver = le32_to_cpu(ucode->ver);
-       api_ver = IWL_UCODE_API(priv->ucode_ver);
-       inst_size = iwl3945_ucode_get_inst_size(ucode);
-       data_size = iwl3945_ucode_get_data_size(ucode);
-       init_size = iwl3945_ucode_get_init_size(ucode);
-       init_data_size = iwl3945_ucode_get_init_data_size(ucode);
-       boot_size = iwl3945_ucode_get_boot_size(ucode);
-       src = iwl3945_ucode_get_data(ucode);
-
-       /* api_ver should match the api version forming part of the
-        * firmware filename ... but we don't check for that and only rely
-        * on the API version read from firmware header from here on forward */
-
-       if (api_ver < api_min || api_ver > api_max) {
-               IWL_ERR(priv, "Driver unable to support your firmware API. "
-                         "Driver supports v%u, firmware is v%u.\n",
-                         api_max, api_ver);
-               priv->ucode_ver = 0;
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (api_ver != api_max)
-               IWL_ERR(priv, "Firmware has old API version. Expected %u, "
-                         "got %u. New firmware can be obtained "
-                         "from http://www.intellinuxwireless.org.\n",
-                         api_max, api_ver);
-
-       IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
-               IWL_UCODE_MAJOR(priv->ucode_ver),
-               IWL_UCODE_MINOR(priv->ucode_ver),
-               IWL_UCODE_API(priv->ucode_ver),
-               IWL_UCODE_SERIAL(priv->ucode_ver));
-
-       snprintf(priv->hw->wiphy->fw_version,
-                sizeof(priv->hw->wiphy->fw_version),
-                "%u.%u.%u.%u",
-                IWL_UCODE_MAJOR(priv->ucode_ver),
-                IWL_UCODE_MINOR(priv->ucode_ver),
-                IWL_UCODE_API(priv->ucode_ver),
-                IWL_UCODE_SERIAL(priv->ucode_ver));
-
-       IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
-                      priv->ucode_ver);
-       IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
-                      inst_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
-                      data_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
-                      init_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
-                      init_data_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
-                      boot_size);
-
-
-       /* Verify size of file vs. image size info in file's header */
-       if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
-               inst_size + data_size + init_size +
-               init_data_size + boot_size) {
-
-               IWL_DEBUG_INFO(priv,
-                       "uCode file size %zd does not match expected size\n",
-                       ucode_raw->size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       /* Verify that uCode images will fit in card's SRAM */
-       if (inst_size > IWL39_MAX_INST_SIZE) {
-               IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
-                              inst_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       if (data_size > IWL39_MAX_DATA_SIZE) {
-               IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
-                              data_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (init_size > IWL39_MAX_INST_SIZE) {
-               IWL_DEBUG_INFO(priv,
-                               "uCode init instr len %d too large to fit in\n",
-                               init_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (init_data_size > IWL39_MAX_DATA_SIZE) {
-               IWL_DEBUG_INFO(priv,
-                               "uCode init data len %d too large to fit in\n",
-                               init_data_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (boot_size > IWL39_MAX_BSM_SIZE) {
-               IWL_DEBUG_INFO(priv,
-                               "uCode boot instr len %d too large to fit in\n",
-                               boot_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       /* Allocate ucode buffers for card's bus-master loading ... */
-
-       /* Runtime instructions and 2 copies of data:
-        * 1) unmodified from disk
-        * 2) backup cache for save/restore during power-downs */
-       priv->ucode_code.len = inst_size;
-       iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
-       priv->ucode_data.len = data_size;
-       iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
-       priv->ucode_data_backup.len = data_size;
-       iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
-       if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
-           !priv->ucode_data_backup.v_addr)
-               goto err_pci_alloc;
-
-       /* Initialization instructions and data */
-       if (init_size && init_data_size) {
-               priv->ucode_init.len = init_size;
-               iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
-               priv->ucode_init_data.len = init_data_size;
-               iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
-               if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
-                       goto err_pci_alloc;
-       }
-
-       /* Bootstrap (instructions only, no data) */
-       if (boot_size) {
-               priv->ucode_boot.len = boot_size;
-               iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
-               if (!priv->ucode_boot.v_addr)
-                       goto err_pci_alloc;
-       }
-
-       /* Copy images into buffers for card's bus-master reads ... */
-
-       /* Runtime instructions (first block of data in file) */
-       len = inst_size;
-       IWL_DEBUG_INFO(priv,
-               "Copying (but not loading) uCode instr len %zd\n", len);
-       memcpy(priv->ucode_code.v_addr, src, len);
-       src += len;
-
-       IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
-               priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
-       /* Runtime data (2nd block)
-        * NOTE:  Copy into backup buffer will be done in iwl3945_up()  */
-       len = data_size;
-       IWL_DEBUG_INFO(priv,
-               "Copying (but not loading) uCode data len %zd\n", len);
-       memcpy(priv->ucode_data.v_addr, src, len);
-       memcpy(priv->ucode_data_backup.v_addr, src, len);
-       src += len;
-
-       /* Initialization instructions (3rd block) */
-       if (init_size) {
-               len = init_size;
-               IWL_DEBUG_INFO(priv,
-                       "Copying (but not loading) init instr len %zd\n", len);
-               memcpy(priv->ucode_init.v_addr, src, len);
-               src += len;
-       }
-
-       /* Initialization data (4th block) */
-       if (init_data_size) {
-               len = init_data_size;
-               IWL_DEBUG_INFO(priv,
-                       "Copying (but not loading) init data len %zd\n", len);
-               memcpy(priv->ucode_init_data.v_addr, src, len);
-               src += len;
-       }
-
-       /* Bootstrap instructions (5th block) */
-       len = boot_size;
-       IWL_DEBUG_INFO(priv,
-               "Copying (but not loading) boot instr len %zd\n", len);
-       memcpy(priv->ucode_boot.v_addr, src, len);
-
-       /* We have our copies now, allow OS release its copies */
-       release_firmware(ucode_raw);
-       return 0;
-
- err_pci_alloc:
-       IWL_ERR(priv, "failed to allocate pci memory\n");
-       ret = -ENOMEM;
-       iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
-       release_firmware(ucode_raw);
-
- error:
-       return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-
-       /* bits 31:0 for 3945 */
-       pinst = priv->ucode_code.p_addr;
-       pdata = priv->ucode_data_backup.p_addr;
-
-       /* Tell bootstrap uCode where to find image to load */
-       iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
-                                priv->ucode_data.len);
-
-       /* Inst byte count must be last to set up, bit 31 signals uCode
-        *   that all new ptr/size info is in place */
-       iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
-                                priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
-       IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
-       return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
-       /* Check alive response for "valid" sign from uCode */
-       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-               goto restart;
-       }
-
-       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "initialize" alive if code weren't properly loaded.  */
-       if (iwl3945_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
-               goto restart;
-       }
-
-       /* Send pointers to protocol/runtime uCode image ... init code will
-        * load and launch runtime uCode, which will send us another "Alive"
-        * notification. */
-       IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-       if (iwl3945_set_ucode_ptrs(priv)) {
-               /* Runtime instruction load won't happen;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
-               goto restart;
-       }
-       return;
-
- restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- *                   from protocol/runtime uCode (initialization uCode's
- *                   Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
-       int thermal_spin = 0;
-       u32 rfkill;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
-       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Alive failed.\n");
-               goto restart;
-       }
-
-       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "runtime" alive if code weren't properly loaded.  */
-       if (iwl3945_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
-               goto restart;
-       }
-
-       rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
-       IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
-       if (rfkill & 0x1) {
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-               /* if RFKILL is not on, then wait for thermal
-                * sensor in adapter to kick in */
-               while (iwl3945_hw_get_temperature(priv) == 0) {
-                       thermal_spin++;
-                       udelay(10);
-               }
-
-               if (thermal_spin)
-                       IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
-                                      thermal_spin * 10);
-       } else
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-       /* After the ALIVE response, we can send commands to 3945 uCode */
-       set_bit(STATUS_ALIVE, &priv->status);
-
-       /* Enable watchdog to monitor the driver tx queues */
-       iwl_setup_watchdog(priv);
-
-       if (iwl_is_rfkill(priv))
-               return;
-
-       ieee80211_wake_queues(priv->hw);
-
-       priv->active_rate = IWL_RATES_MASK;
-
-       iwl_power_update_mode(priv, true);
-
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
-               struct iwl3945_rxon_cmd *active_rxon =
-                               (struct iwl3945_rxon_cmd *)(&ctx->active);
-
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       } else {
-               /* Initialize our rx_config data */
-               iwl_connection_init_rx_config(priv, ctx);
-       }
-
-       /* Configure Bluetooth device coexistence support */
-       priv->cfg->ops->hcmd->send_bt_config(priv);
-
-       /* Configure the adapter for unassociated operation */
-       iwl3945_commit_rxon(priv, ctx);
-
-       iwl3945_reg_txpower_periodic(priv);
-
-       iwl_leds_init(priv);
-
-       IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-       set_bit(STATUS_READY, &priv->status);
-       wake_up_interruptible(&priv->wait_command_queue);
-
-       return;
-
- restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       int exit_pending;
-
-       IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
-       iwl_scan_cancel_timeout(priv, 200);
-
-       exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
-        * to prevent rearm timer */
-       del_timer_sync(&priv->watchdog);
-
-       /* Station information will now be cleared in device */
-       iwl_clear_ucode_stations(priv, NULL);
-       iwl_dealloc_bcast_stations(priv);
-       iwl_clear_driver_stations(priv);
-
-       /* Unblock any waiting calls */
-       wake_up_interruptible_all(&priv->wait_command_queue);
-
-       /* Wipe out the EXIT_PENDING status bit if we are not actually
-        * exiting the module */
-       if (!exit_pending)
-               clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* stop and reset the on-board processor */
-       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-       /* tell the device to stop sending interrupts */
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       iwl_synchronize_irq(priv);
-
-       if (priv->mac80211_registered)
-               ieee80211_stop_queues(priv->hw);
-
-       /* If we have not previously called iwl3945_init() then
-        * clear all bits but the RF Kill bits and return */
-       if (!iwl_is_init(priv)) {
-               priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-                                       STATUS_RF_KILL_HW |
-                              test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-                                       STATUS_GEO_CONFIGURED |
-                               test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-                                       STATUS_EXIT_PENDING;
-               goto exit;
-       }
-
-       /* ...otherwise clear out all the status bits but the RF Kill
-        * bit and continue taking the NIC down. */
-       priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-                               STATUS_RF_KILL_HW |
-                       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-                               STATUS_GEO_CONFIGURED |
-                       test_bit(STATUS_FW_ERROR, &priv->status) <<
-                               STATUS_FW_ERROR |
-                       test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-                               STATUS_EXIT_PENDING;
-
-       iwl3945_hw_txq_ctx_stop(priv);
-       iwl3945_hw_rxq_stop(priv);
-
-       /* Power-down device's busmaster DMA clocks */
-       iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-       udelay(5);
-
-       /* Stop the device, and put it in low power state */
-       iwl_apm_stop(priv);
-
- exit:
-       memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-       priv->beacon_skb = NULL;
-
-       /* clear out any free frames */
-       iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
-       mutex_lock(&priv->mutex);
-       __iwl3945_down(priv);
-       mutex_unlock(&priv->mutex);
-
-       iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       unsigned long flags;
-       u8 sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Unable to prepare broadcast station\n");
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-               return -EINVAL;
-       }
-
-       priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
-       priv->stations[sta_id].used |= IWL_STA_BCAST;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
-       int rc, i;
-
-       rc = iwl3945_alloc_bcast_station(priv);
-       if (rc)
-               return rc;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-               return -EIO;
-       }
-
-       if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
-               IWL_ERR(priv, "ucode not available for device bring up\n");
-               return -EIO;
-       }
-
-       /* If platform's RF_KILL switch is NOT set to KILL */
-       if (iwl_read32(priv, CSR_GP_CNTRL) &
-                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-       else {
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-               IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
-               return -ENODEV;
-       }
-
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
-       rc = iwl3945_hw_nic_init(priv);
-       if (rc) {
-               IWL_ERR(priv, "Unable to int nic\n");
-               return rc;
-       }
-
-       /* make sure rfkill handshake bits are cleared */
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-       /* clear (again), then enable host interrupts */
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-       iwl_enable_interrupts(priv);
-
-       /* really make sure rfkill handshake bits are cleared */
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
-       /* Copy original ucode data image from disk into backup cache.
-        * This will be used to initialize the on-board processor's
-        * data SRAM for a clean start when the runtime program first loads. */
-       memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
-              priv->ucode_data.len);
-
-       /* We return success when we resume from suspend and rf_kill is on. */
-       if (test_bit(STATUS_RF_KILL_HW, &priv->status))
-               return 0;
-
-       for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
-               /* load bootstrap state machine,
-                * load bootstrap program into processor's memory,
-                * prepare to load the "initialize" uCode */
-               rc = priv->cfg->ops->lib->load_ucode(priv);
-
-               if (rc) {
-                       IWL_ERR(priv,
-                               "Unable to set up bootstrap uCode: %d\n", rc);
-                       continue;
-               }
-
-               /* start card; "initialize" will load runtime ucode */
-               iwl3945_nic_start(priv);
-
-               IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
-               return 0;
-       }
-
-       set_bit(STATUS_EXIT_PENDING, &priv->status);
-       __iwl3945_down(priv);
-       clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* tried to restart and config the device for as long as our
-        * patience could withstand */
-       IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
-       return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, init_alive_start.work);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-       iwl3945_init_alive_start(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, alive_start.work);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-       iwl3945_alive_start(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change.  This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
-       bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
-       bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
-                       & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
-       if (new_rfkill != old_rfkill) {
-               if (new_rfkill)
-                       set_bit(STATUS_RF_KILL_HW, &priv->status);
-               else
-                       clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
-               IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
-                               new_rfkill ? "disable radio" : "enable radio");
-       }
-
-       /* Keep this running, even if radio now enabled.  This will be
-        * cancelled in mac_start() if system decides to start again */
-       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-                          round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_SCAN_CMD,
-               .len = sizeof(struct iwl3945_scan_cmd),
-               .flags = CMD_SIZE_HUGE,
-       };
-       struct iwl3945_scan_cmd *scan;
-       u8 n_probes = 0;
-       enum ieee80211_band band;
-       bool is_active = false;
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->scan_cmd) {
-               priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
-                                        IWL_MAX_SCAN_SIZE, GFP_KERNEL);
-               if (!priv->scan_cmd) {
-                       IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
-                       return -ENOMEM;
-               }
-       }
-       scan = priv->scan_cmd;
-       memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
-       scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
-       scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
-       if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
-               u16 interval = 0;
-               u32 extra;
-               u32 suspend_time = 100;
-               u32 scan_suspend_time = 100;
-               unsigned long flags;
-
-               IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
-               spin_lock_irqsave(&priv->lock, flags);
-               if (priv->is_internal_short_scan)
-                       interval = 0;
-               else
-                       interval = vif->bss_conf.beacon_int;
-               spin_unlock_irqrestore(&priv->lock, flags);
-
-               scan->suspend_time = 0;
-               scan->max_out_time = cpu_to_le32(200 * 1024);
-               if (!interval)
-                       interval = suspend_time;
-               /*
-                * suspend time format:
-                *  0-19: beacon interval in usec (time before exec.)
-                * 20-23: 0
-                * 24-31: number of beacons (suspend between channels)
-                */
-
-               extra = (suspend_time / interval) << 24;
-               scan_suspend_time = 0xFF0FFFFF &
-                   (extra | ((suspend_time % interval) * 1024));
-
-               scan->suspend_time = cpu_to_le32(scan_suspend_time);
-               IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
-                              scan_suspend_time, interval);
-       }
-
-       if (priv->is_internal_short_scan) {
-               IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
-       } else if (priv->scan_request->n_ssids) {
-               int i, p = 0;
-               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
-               for (i = 0; i < priv->scan_request->n_ssids; i++) {
-                       /* always does wildcard anyway */
-                       if (!priv->scan_request->ssids[i].ssid_len)
-                               continue;
-                       scan->direct_scan[p].id = WLAN_EID_SSID;
-                       scan->direct_scan[p].len =
-                               priv->scan_request->ssids[i].ssid_len;
-                       memcpy(scan->direct_scan[p].ssid,
-                              priv->scan_request->ssids[i].ssid,
-                              priv->scan_request->ssids[i].ssid_len);
-                       n_probes++;
-                       p++;
-               }
-               is_active = true;
-       } else
-               IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
-       /* We don't build a direct scan probe request; the uCode will do
-        * that based on the direct_mask added to each channel entry */
-       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
-       scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-       /* flags + rate selection */
-
-       switch (priv->scan_band) {
-       case IEEE80211_BAND_2GHZ:
-               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
-               scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
-               band = IEEE80211_BAND_2GHZ;
-               break;
-       case IEEE80211_BAND_5GHZ:
-               scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
-               band = IEEE80211_BAND_5GHZ;
-               break;
-       default:
-               IWL_WARN(priv, "Invalid scan band\n");
-               return -EIO;
-       }
-
-       /*
-        * If active scaning is requested but a certain channel
-        * is marked passive, we can do active scanning if we
-        * detect transmissions.
-        */
-       scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
-                                       IWL_GOOD_CRC_TH_DISABLED;
-
-       if (!priv->is_internal_short_scan) {
-               scan->tx_cmd.len = cpu_to_le16(
-                       iwl_fill_probe_req(priv,
-                               (struct ieee80211_mgmt *)scan->data,
-                               vif->addr,
-                               priv->scan_request->ie,
-                               priv->scan_request->ie_len,
-                               IWL_MAX_SCAN_SIZE - sizeof(*scan)));
-       } else {
-               /* use bcast addr, will not be transmitted but must be valid */
-               scan->tx_cmd.len = cpu_to_le16(
-                       iwl_fill_probe_req(priv,
-                               (struct ieee80211_mgmt *)scan->data,
-                               iwl_bcast_addr, NULL, 0,
-                               IWL_MAX_SCAN_SIZE - sizeof(*scan)));
-       }
-       /* select Rx antennas */
-       scan->flags |= iwl3945_get_antenna_flags(priv);
-
-       if (priv->is_internal_short_scan) {
-               scan->channel_count =
-                       iwl3945_get_single_channel_for_scan(priv, vif, band,
-                               (void *)&scan->data[le16_to_cpu(
-                               scan->tx_cmd.len)]);
-       } else {
-               scan->channel_count =
-                       iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
-                               (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
-       }
-
-       if (scan->channel_count == 0) {
-               IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
-               return -EIO;
-       }
-
-       cmd.len += le16_to_cpu(scan->tx_cmd.len) +
-           scan->channel_count * sizeof(struct iwl3945_scan_channel);
-       cmd.data = scan;
-       scan->len = cpu_to_le16(cmd.len);
-
-       set_bit(STATUS_SCAN_HW, &priv->status);
-       ret = iwl_send_cmd_sync(priv, &cmd);
-       if (ret)
-               clear_bit(STATUS_SCAN_HW, &priv->status);
-       return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       /*
-        * Since setting the RXON may have been deferred while
-        * performing the scan, fire one off if needed
-        */
-       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-               iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
-       struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
-               struct iwl_rxon_context *ctx;
-               mutex_lock(&priv->mutex);
-               for_each_context(priv, ctx)
-                       ctx->vif = NULL;
-               priv->is_open = 0;
-               mutex_unlock(&priv->mutex);
-               iwl3945_down(priv);
-               ieee80211_restart_hw(priv->hw);
-       } else {
-               iwl3945_down(priv);
-
-               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-                       return;
-
-               mutex_lock(&priv->mutex);
-               __iwl3945_up(priv);
-               mutex_unlock(&priv->mutex);
-       }
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, rx_replenish);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-       iwl3945_rx_replenish(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
-       int rc = 0;
-       struct ieee80211_conf *conf = NULL;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (!ctx->vif || !priv->is_open)
-               return;
-
-       if (ctx->vif->type == NL80211_IFTYPE_AP) {
-               IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
-               return;
-       }
-
-       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-                       ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       iwl_scan_cancel_timeout(priv, 200);
-
-       conf = ieee80211_get_hw_conf(priv->hw);
-
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwl3945_commit_rxon(priv, ctx);
-
-       rc = iwl_send_rxon_timing(priv, ctx);
-       if (rc)
-               IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
-                           "Attempting to continue.\n");
-
-       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-       ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
-       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-                       ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
-       if (ctx->vif->bss_conf.use_short_preamble)
-               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-               if (ctx->vif->bss_conf.use_short_slot)
-                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-       }
-
-       iwl3945_commit_rxon(priv, ctx);
-
-       switch (ctx->vif->type) {
-       case NL80211_IFTYPE_STATION:
-               iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               iwl3945_send_beacon_cmd(priv);
-               break;
-       default:
-               IWL_ERR(priv, "%s Should not be called in %d mode\n",
-                       __func__, ctx->vif->type);
-               break;
-       }
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT    (2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       /* we should be verifying the device is ready to be opened */
-       mutex_lock(&priv->mutex);
-
-       /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
-        * ucode filename and max sizes are card-specific. */
-
-       if (!priv->ucode_code.len) {
-               ret = iwl3945_read_ucode(priv);
-               if (ret) {
-                       IWL_ERR(priv, "Could not read microcode: %d\n", ret);
-                       mutex_unlock(&priv->mutex);
-                       goto out_release_irq;
-               }
-       }
-
-       ret = __iwl3945_up(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       if (ret)
-               goto out_release_irq;
-
-       IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
-       /* Wait for START_ALIVE from ucode. Otherwise callbacks from
-        * mac80211 will not be run successfully. */
-       ret = wait_event_interruptible_timeout(priv->wait_command_queue,
-                       test_bit(STATUS_READY, &priv->status),
-                       UCODE_READY_TIMEOUT);
-       if (!ret) {
-               if (!test_bit(STATUS_READY, &priv->status)) {
-                       IWL_ERR(priv,
-                               "Wait for START_ALIVE timeout after %dms.\n",
-                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
-                       ret = -ETIMEDOUT;
-                       goto out_release_irq;
-               }
-       }
-
-       /* ucode is running and will send rfkill notifications,
-        * no need to poll the killswitch state anymore */
-       cancel_delayed_work(&priv->_3945.rfkill_poll);
-
-       iwl_led_start(priv);
-
-       priv->is_open = 1;
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
-
-out_release_irq:
-       priv->is_open = 0;
-       IWL_DEBUG_MAC80211(priv, "leave - failed\n");
-       return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (!priv->is_open) {
-               IWL_DEBUG_MAC80211(priv, "leave - skip\n");
-               return;
-       }
-
-       priv->is_open = 0;
-
-       iwl3945_down(priv);
-
-       flush_workqueue(priv->workqueue);
-
-       /* start polling the killswitch state again */
-       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-                          round_jiffies_relative(2 * HZ));
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-       if (iwl3945_tx_skb(priv, skb))
-               dev_kfree_skb_any(skb);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return NETDEV_TX_OK;
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif = ctx->vif;
-       int rc = 0;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       /* The following should be done only at AP bring up */
-       if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
-               /* RXON - unassoc (to set timing command) */
-               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-               iwl3945_commit_rxon(priv, ctx);
-
-               /* RXON Timing */
-               rc = iwl_send_rxon_timing(priv, ctx);
-               if (rc)
-                       IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
-                                       "Attempting to continue.\n");
-
-               ctx->staging.assoc_id = 0;
-
-               if (vif->bss_conf.use_short_preamble)
-                       ctx->staging.flags |=
-                               RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &=
-                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-                       if (vif->bss_conf.use_short_slot)
-                               ctx->staging.flags |=
-                                       RXON_FLG_SHORT_SLOT_MSK;
-                       else
-                               ctx->staging.flags &=
-                                       ~RXON_FLG_SHORT_SLOT_MSK;
-               }
-               /* restore RXON assoc */
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               iwl3945_commit_rxon(priv, ctx);
-       }
-       iwl3945_send_beacon_cmd(priv);
-
-       /* FIXME - we need to add code here to detect a totally new
-        * configuration, reset the AP, unassoc, rxon timing, assoc,
-        * clear sta table, add BCAST sta... */
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                              struct ieee80211_vif *vif,
-                              struct ieee80211_sta *sta,
-                              struct ieee80211_key_conf *key)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret = 0;
-       u8 sta_id = IWL_INVALID_STATION;
-       u8 static_key;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (iwl3945_mod_params.sw_crypto) {
-               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-               return -EOPNOTSUPP;
-       }
-
-       static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
-
-       if (!static_key) {
-               sta_id = iwl_sta_id_or_broadcast(
-                               priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
-               if (sta_id == IWL_INVALID_STATION)
-                       return -EINVAL;
-       }
-
-       mutex_lock(&priv->mutex);
-       iwl_scan_cancel_timeout(priv, 100);
-
-       switch (cmd) {
-       case SET_KEY:
-               if (static_key)
-                       ret = iwl3945_set_static_key(priv, key);
-               else
-                       ret = iwl3945_set_dynamic_key(priv, key, sta_id);
-               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-               break;
-       case DISABLE_KEY:
-               if (static_key)
-                       ret = iwl3945_remove_static_key(priv);
-               else
-                       ret = iwl3945_clear_sta_key_info(priv, sta_id);
-               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       mutex_unlock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif,
-                              struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
-       int ret;
-       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-       u8 sta_id;
-
-       IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
-                       sta->addr);
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-                       sta->addr);
-       sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
-       ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
-                                    sta->addr, is_ap, sta, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-                       sta->addr, ret);
-               /* Should we return success if return code is EEXIST ? */
-               mutex_unlock(&priv->mutex);
-               return ret;
-       }
-
-       sta_priv->common.sta_id = sta_id;
-
-       /* Initialize rate scaling */
-       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-                      sta->addr);
-       iwl3945_rs_rate_init(priv, sta, sta_id);
-       mutex_unlock(&priv->mutex);
-
-       return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
-                                    unsigned int changed_flags,
-                                    unsigned int *total_flags,
-                                    u64 multicast)
-{
-       struct iwl_priv *priv = hw->priv;
-       __le32 filter_or = 0, filter_nand = 0;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag)        do { \
-       if (*total_flags & (test))              \
-               filter_or |= (flag);            \
-       else                                    \
-               filter_nand |= (flag);          \
-       } while (0)
-
-       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-                       changed_flags, *total_flags);
-
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
-       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-       mutex_lock(&priv->mutex);
-
-       ctx->staging.filter_flags &= ~filter_nand;
-       ctx->staging.filter_flags |= filter_or;
-
-       /*
-        * Not committing directly because hardware can perform a scan,
-        * but even if hw is ready, committing here breaks for some reason,
-        * we'll eventually commit the filter flags change anyway.
-        */
-
-       mutex_unlock(&priv->mutex);
-
-       /*
-        * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_connection_init_rx_config()
-        * since we currently do not support programming multicast
-        * filters into the device.
-        */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t show_debug_level(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
-}
-static ssize_t store_debug_level(struct device *d,
-                               struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       unsigned long val;
-       int ret;
-
-       ret = strict_strtoul(buf, 0, &val);
-       if (ret)
-               IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
-       else {
-               priv->debug_level = val;
-               if (iwl_alloc_traffic_mem(priv))
-                       IWL_ERR(priv,
-                               "Not enough memory to generate traffic log\n");
-       }
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-                       show_debug_level, store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_DEBUG */
-
-static ssize_t show_temperature(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       if (!iwl_is_alive(priv))
-               return -EAGAIN;
-
-       return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
-
-static ssize_t show_tx_power(struct device *d,
-                            struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t store_tx_power(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       char *p = (char *)buf;
-       u32 val;
-
-       val = simple_strtoul(p, &p, 10);
-       if (p == buf)
-               IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
-       else
-               iwl3945_hw_reg_set_txpower(priv, val);
-
-       return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-
-static ssize_t show_flags(struct device *d,
-                         struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t store_flags(struct device *d,
-                          struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       u32 flags = simple_strtoul(buf, NULL, 0);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       mutex_lock(&priv->mutex);
-       if (le32_to_cpu(ctx->staging.flags) != flags) {
-               /* Cancel any currently running scans... */
-               if (iwl_scan_cancel_timeout(priv, 100))
-                       IWL_WARN(priv, "Could not cancel scan.\n");
-               else {
-                       IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
-                                      flags);
-                       ctx->staging.flags = cpu_to_le32(flags);
-                       iwl3945_commit_rxon(priv, ctx);
-               }
-       }
-       mutex_unlock(&priv->mutex);
-
-       return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
-
-static ssize_t show_filter_flags(struct device *d,
-                                struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       return sprintf(buf, "0x%04X\n",
-               le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t store_filter_flags(struct device *d,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
-       mutex_lock(&priv->mutex);
-       if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
-               /* Cancel any currently running scans... */
-               if (iwl_scan_cancel_timeout(priv, 100))
-                       IWL_WARN(priv, "Could not cancel scan.\n");
-               else {
-                       IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
-                                      "0x%04X\n", filter_flags);
-                       ctx->staging.filter_flags =
-                               cpu_to_le32(filter_flags);
-                       iwl3945_commit_rxon(priv, ctx);
-               }
-       }
-       mutex_unlock(&priv->mutex);
-
-       return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
-                  store_filter_flags);
-
-static ssize_t show_measurement(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_spectrum_notification measure_report;
-       u32 size = sizeof(measure_report), len = 0, ofs = 0;
-       u8 *data = (u8 *)&measure_report;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       if (!(priv->measurement_status & MEASUREMENT_READY)) {
-               spin_unlock_irqrestore(&priv->lock, flags);
-               return 0;
-       }
-       memcpy(&measure_report, &priv->measure_report, size);
-       priv->measurement_status = 0;
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       while (size && (PAGE_SIZE - len)) {
-               hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-                                  PAGE_SIZE - len, 1);
-               len = strlen(buf);
-               if (PAGE_SIZE - len)
-                       buf[len++] = '\n';
-
-               ofs += 16;
-               size -= min(size, 16U);
-       }
-
-       return len;
-}
-
-static ssize_t store_measurement(struct device *d,
-                                struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_measurement_params params = {
-               .channel = le16_to_cpu(ctx->active.channel),
-               .start_time = cpu_to_le64(priv->_3945.last_tsf),
-               .duration = cpu_to_le16(1),
-       };
-       u8 type = IWL_MEASURE_BASIC;
-       u8 buffer[32];
-       u8 channel;
-
-       if (count) {
-               char *p = buffer;
-               strncpy(buffer, buf, min(sizeof(buffer), count));
-               channel = simple_strtoul(p, NULL, 0);
-               if (channel)
-                       params.channel = channel;
-
-               p = buffer;
-               while (*p && *p != ' ')
-                       p++;
-               if (*p)
-                       type = simple_strtoul(p + 1, NULL, 0);
-       }
-
-       IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
-                      "channel %d (for '%s')\n", type, params.channel, buf);
-       iwl3945_get_measurement(priv, &params, type);
-
-       return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
-                  show_measurement, store_measurement);
-
-static ssize_t store_retry_rate(struct device *d,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       priv->retry_rate = simple_strtoul(buf, NULL, 0);
-       if (priv->retry_rate <= 0)
-               priv->retry_rate = 1;
-
-       return count;
-}
-
-static ssize_t show_retry_rate(struct device *d,
-                              struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
-                  store_retry_rate);
-
-
-static ssize_t show_channels(struct device *d,
-                            struct device_attribute *attr, char *buf)
-{
-       /* all this shit doesn't belong into sysfs anyway */
-       return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
-
-static ssize_t show_antenna(struct device *d,
-                           struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       if (!iwl_is_alive(priv))
-               return -EAGAIN;
-
-       return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t store_antenna(struct device *d,
-                            struct device_attribute *attr,
-                            const char *buf, size_t count)
-{
-       struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
-       int ant;
-
-       if (count == 0)
-               return 0;
-
-       if (sscanf(buf, "%1i", &ant) != 1) {
-               IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
-               return count;
-       }
-
-       if ((ant >= 0) && (ant <= 2)) {
-               IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
-               iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
-       } else
-               IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
-       return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
-
-static ssize_t show_status(struct device *d,
-                          struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       if (!iwl_is_alive(priv))
-               return -EAGAIN;
-       return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
-
-static ssize_t dump_error_log(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       char *p = (char *)buf;
-
-       if (p[0] == '1')
-               iwl3945_dump_nic_error_log(priv);
-
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
-       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
-       init_waitqueue_head(&priv->wait_command_queue);
-
-       INIT_WORK(&priv->restart, iwl3945_bg_restart);
-       INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
-       INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
-       INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
-       INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
-       INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
-       iwl_setup_scan_deferred_work(priv);
-
-       iwl3945_hw_setup_deferred_work(priv);
-
-       init_timer(&priv->watchdog);
-       priv->watchdog.data = (unsigned long)priv;
-       priv->watchdog.function = iwl_bg_watchdog;
-
-       tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
-                    iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
-       iwl3945_hw_cancel_deferred_work(priv);
-
-       cancel_delayed_work_sync(&priv->init_alive_start);
-       cancel_delayed_work(&priv->alive_start);
-       cancel_work_sync(&priv->beacon_update);
-
-       iwl_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
-       &dev_attr_antenna.attr,
-       &dev_attr_channels.attr,
-       &dev_attr_dump_errors.attr,
-       &dev_attr_flags.attr,
-       &dev_attr_filter_flags.attr,
-       &dev_attr_measurement.attr,
-       &dev_attr_retry_rate.attr,
-       &dev_attr_status.attr,
-       &dev_attr_temperature.attr,
-       &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_DEBUG
-       &dev_attr_debug_level.attr,
-#endif
-       NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
-       .name = NULL,           /* put in device directory */
-       .attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
-       .tx = iwl3945_mac_tx,
-       .start = iwl3945_mac_start,
-       .stop = iwl3945_mac_stop,
-       .add_interface = iwl_mac_add_interface,
-       .remove_interface = iwl_mac_remove_interface,
-       .change_interface = iwl_mac_change_interface,
-       .config = iwl_legacy_mac_config,
-       .configure_filter = iwl3945_configure_filter,
-       .set_key = iwl3945_mac_set_key,
-       .conf_tx = iwl_mac_conf_tx,
-       .reset_tsf = iwl_legacy_mac_reset_tsf,
-       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
-       .hw_scan = iwl_mac_hw_scan,
-       .sta_add = iwl3945_mac_sta_add,
-       .sta_remove = iwl_mac_sta_remove,
-       .tx_last_beacon = iwl_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
-       int ret;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
-       priv->retry_rate = 1;
-       priv->beacon_skb = NULL;
-
-       spin_lock_init(&priv->sta_lock);
-       spin_lock_init(&priv->hcmd_lock);
-
-       INIT_LIST_HEAD(&priv->free_frames);
-
-       mutex_init(&priv->mutex);
-       mutex_init(&priv->sync_cmd_mutex);
-
-       priv->ieee_channels = NULL;
-       priv->ieee_rates = NULL;
-       priv->band = IEEE80211_BAND_2GHZ;
-
-       priv->iw_mode = NL80211_IFTYPE_STATION;
-       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
-       /* initialize force reset */
-       priv->force_reset[IWL_RF_RESET].reset_duration =
-               IWL_DELAY_NEXT_FORCE_RF_RESET;
-       priv->force_reset[IWL_FW_RESET].reset_duration =
-               IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
-
-       priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
-       priv->tx_power_next = IWL_DEFAULT_TX_POWER;
-
-       if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
-               IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
-                        eeprom->version);
-               ret = -EINVAL;
-               goto err;
-       }
-       ret = iwl_init_channel_map(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-               goto err;
-       }
-
-       /* Set up txpower settings in driver for all channels */
-       if (iwl3945_txpower_set_from_eeprom(priv)) {
-               ret = -EIO;
-               goto err_free_channel_map;
-       }
-
-       ret = iwlcore_init_geos(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-               goto err_free_channel_map;
-       }
-       iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
-       return 0;
-
-err_free_channel_map:
-       iwl_free_channel_map(priv);
-err:
-       return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST      200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
-       int ret;
-       struct ieee80211_hw *hw = priv->hw;
-
-       hw->rate_control_algorithm = "iwl-3945-rs";
-       hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
-       hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-       /* Tell mac80211 our characteristics */
-       hw->flags = IEEE80211_HW_SIGNAL_DBM |
-                   IEEE80211_HW_SPECTRUM_MGMT;
-
-       if (!priv->cfg->base_params->broken_powersave)
-               hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-                            IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
-       hw->wiphy->interface_modes =
-               priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
-       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
-       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
-       /* we create the 802.11 header and a zero-length SSID element */
-       hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
-       /* Default value; 4 EDCA QOS priorities */
-       hw->queues = 4;
-
-       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &priv->bands[IEEE80211_BAND_2GHZ];
-
-       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &priv->bands[IEEE80211_BAND_5GHZ];
-
-       ret = ieee80211_register_hw(priv->hw);
-       if (ret) {
-               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-               return ret;
-       }
-       priv->mac80211_registered = 1;
-
-       return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int err = 0, i;
-       struct iwl_priv *priv;
-       struct ieee80211_hw *hw;
-       struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
-       struct iwl3945_eeprom *eeprom;
-       unsigned long flags;
-
-       /***********************
-        * 1. Allocating HW data
-        * ********************/
-
-       /* mac80211 allocates memory for this device instance, including
-        *   space for this driver's private structure */
-       hw = iwl_alloc_all(cfg);
-       if (hw == NULL) {
-               pr_err("Can not allocate network device\n");
-               err = -ENOMEM;
-               goto out;
-       }
-       priv = hw->priv;
-       SET_IEEE80211_DEV(hw, &pdev->dev);
-
-       priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
-       /* 3945 has only one valid context */
-       priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
-       for (i = 0; i < NUM_IWL_RXON_CTX; i++)
-               priv->contexts[i].ctxid = i;
-
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
-       priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
-       priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
-       priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
-       priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
-               BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC);
-       priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
-       priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
-       priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
-       /*
-        * Disabling hardware scan means that mac80211 will perform scans
-        * "the hard way", rather than using device's scan.
-        */
-       if (iwl3945_mod_params.disable_hw_scan) {
-               dev_printk(KERN_DEBUG, &(pdev->dev),
-                       "sw scan support is deprecated\n");
-               iwl3945_hw_ops.hw_scan = NULL;
-       }
-
-
-       IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-       priv->cfg = cfg;
-       priv->pci_dev = pdev;
-       priv->inta_mask = CSR_INI_SET_MASK;
-
-       if (iwl_alloc_traffic_mem(priv))
-               IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
-       /***************************
-        * 2. Initializing PCI bus
-        * *************************/
-       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-                               PCIE_LINK_STATE_CLKPM);
-
-       if (pci_enable_device(pdev)) {
-               err = -ENODEV;
-               goto out_ieee80211_free_hw;
-       }
-
-       pci_set_master(pdev);
-
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (err) {
-               IWL_WARN(priv, "No suitable DMA available.\n");
-               goto out_pci_disable_device;
-       }
-
-       pci_set_drvdata(pdev, priv);
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err)
-               goto out_pci_disable_device;
-
-       /***********************
-        * 3. Read REV Register
-        * ********************/
-       priv->hw_base = pci_iomap(pdev, 0, 0);
-       if (!priv->hw_base) {
-               err = -ENODEV;
-               goto out_pci_release_regions;
-       }
-
-       IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
-                       (unsigned long long) pci_resource_len(pdev, 0));
-       IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
-       /* We disable the RETRY_TIMEOUT register (0x41) to keep
-        * PCI Tx retries from interfering with C3 CPU state */
-       pci_write_config_byte(pdev, 0x41, 0x00);
-
-       /* these spin locks will be used in apm_ops.init and EEPROM access
-        * we should init now
-        */
-       spin_lock_init(&priv->reg_lock);
-       spin_lock_init(&priv->lock);
-
-       /*
-        * stop and reset the on-board processor just in case it is in a
-        * strange state ... like being left stranded by a primary kernel
-        * and this is now the kdump kernel trying to start up
-        */
-       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-       /***********************
-        * 4. Read EEPROM
-        * ********************/
-
-       /* Read the EEPROM */
-       err = iwl_eeprom_init(priv);
-       if (err) {
-               IWL_ERR(priv, "Unable to init EEPROM\n");
-               goto out_iounmap;
-       }
-       /* MAC Address location in EEPROM same for 3945/4965 */
-       eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
-       SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
-       /***********************
-        * 5. Setup HW Constants
-        * ********************/
-       /* Device-specific setup */
-       if (iwl3945_hw_set_hw_params(priv)) {
-               IWL_ERR(priv, "failed to set hw settings\n");
-               goto out_eeprom_free;
-       }
-
-       /***********************
-        * 6. Setup priv
-        * ********************/
-
-       err = iwl3945_init_drv(priv);
-       if (err) {
-               IWL_ERR(priv, "initializing driver failed\n");
-               goto out_unset_hw_params;
-       }
-
-       IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
-               priv->cfg->name);
-
-       /***********************
-        * 7. Setup Services
-        * ********************/
-
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       pci_enable_msi(priv->pci_dev);
-
-       err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
-                         IRQF_SHARED, DRV_NAME, priv);
-       if (err) {
-               IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
-               goto out_disable_msi;
-       }
-
-       err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-       if (err) {
-               IWL_ERR(priv, "failed to create sysfs device attributes\n");
-               goto out_release_irq;
-       }
-
-       iwl_set_rxon_channel(priv,
-                            &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
-                            &priv->contexts[IWL_RXON_CTX_BSS]);
-       iwl3945_setup_deferred_work(priv);
-       iwl3945_setup_rx_handlers(priv);
-       iwl_power_initialize(priv);
-
-       /*********************************
-        * 8. Setup and Register mac80211
-        * *******************************/
-
-       iwl_enable_interrupts(priv);
-
-       err = iwl3945_setup_mac(priv);
-       if (err)
-               goto  out_remove_sysfs;
-
-       err = iwl_dbgfs_register(priv, DRV_NAME);
-       if (err)
-               IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
-       /* Start monitoring the killswitch */
-       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-                          2 * HZ);
-
-       return 0;
-
- out_remove_sysfs:
-       destroy_workqueue(priv->workqueue);
-       priv->workqueue = NULL;
-       sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
-       free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
-       pci_disable_msi(priv->pci_dev);
-       iwlcore_free_geos(priv);
-       iwl_free_channel_map(priv);
- out_unset_hw_params:
-       iwl3945_unset_hw_params(priv);
- out_eeprom_free:
-       iwl_eeprom_free(priv);
- out_iounmap:
-       pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
-       pci_release_regions(pdev);
- out_pci_disable_device:
-       pci_set_drvdata(pdev, NULL);
-       pci_disable_device(pdev);
- out_ieee80211_free_hw:
-       iwl_free_traffic_mem(priv);
-       ieee80211_free_hw(priv->hw);
- out:
-       return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
-       struct iwl_priv *priv = pci_get_drvdata(pdev);
-       unsigned long flags;
-
-       if (!priv)
-               return;
-
-       IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
-       iwl_dbgfs_unregister(priv);
-
-       set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       if (priv->mac80211_registered) {
-               ieee80211_unregister_hw(priv->hw);
-               priv->mac80211_registered = 0;
-       } else {
-               iwl3945_down(priv);
-       }
-
-       /*
-        * Make sure device is reset to low power before unloading driver.
-        * This may be redundant with iwl_down(), but there are paths to
-        * run iwl_down() without calling apm_ops.stop(), and there are
-        * paths to avoid running iwl_down() at all before leaving driver.
-        * This (inexpensive) call *makes sure* device is reset.
-        */
-       iwl_apm_stop(priv);
-
-       /* make sure we flush any pending irq or
-        * tasklet for the driver
-        */
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl_synchronize_irq(priv);
-
-       sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
-       cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
-       iwl3945_dealloc_ucode_pci(priv);
-
-       if (priv->rxq.bd)
-               iwl3945_rx_queue_free(priv, &priv->rxq);
-       iwl3945_hw_txq_ctx_free(priv);
-
-       iwl3945_unset_hw_params(priv);
-
-       /*netif_stop_queue(dev); */
-       flush_workqueue(priv->workqueue);
-
-       /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
-        * priv->workqueue... so we can't take down the workqueue
-        * until now... */
-       destroy_workqueue(priv->workqueue);
-       priv->workqueue = NULL;
-       iwl_free_traffic_mem(priv);
-
-       free_irq(pdev->irq, priv);
-       pci_disable_msi(pdev);
-
-       pci_iounmap(pdev, priv->hw_base);
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-
-       iwl_free_channel_map(priv);
-       iwlcore_free_geos(priv);
-       kfree(priv->scan_cmd);
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
-       .name = DRV_NAME,
-       .id_table = iwl3945_hw_card_ids,
-       .probe = iwl3945_pci_probe,
-       .remove = __devexit_p(iwl3945_pci_remove),
-       .driver.pm = IWL_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
-       int ret;
-       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
-       pr_info(DRV_COPYRIGHT "\n");
-
-       ret = iwl3945_rate_control_register();
-       if (ret) {
-               pr_err("Unable to register rate control algorithm: %d\n", ret);
-               return ret;
-       }
-
-       ret = pci_register_driver(&iwl3945_driver);
-       if (ret) {
-               pr_err("Unable to initialize PCI module\n");
-               goto error_register;
-       }
-
-       return ret;
-
-error_register:
-       iwl3945_rate_control_unregister();
-       return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
-       pci_unregister_driver(&iwl3945_driver);
-       iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
-                "using software crypto (default 1 [software])\n");
-#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
-                  int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan,
-                "disable hardware scanning (default 0) (deprecated)");
-module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
index 5a4982271e96a8f9cb57fb6c22610549b1dbcb0b..ed57e44028009e0b6dd515169416caa4d213029b 100644 (file)
@@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
                        return -EINVAL;
                }
 
-               freq = ieee80211_channel_to_frequency(umac_bss->channel);
+               freq = ieee80211_channel_to_frequency(umac_bss->channel,
+                                                     band->band);
                channel = ieee80211_get_channel(wiphy, freq);
                signal = umac_bss->rssi * 100;
 
index a944893ae3ca78d9b3371102ffc85ad9b5cea65e..9a57cf6a488fabba03d00c1f3d8d57970f1fd233 100644 (file)
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
        switch (le32_to_cpu(complete->status)) {
        case UMAC_ASSOC_COMPLETE_SUCCESS:
                chan = ieee80211_get_channel(wiphy,
-                       ieee80211_channel_to_frequency(complete->channel));
+                       ieee80211_channel_to_frequency(complete->channel,
+                               complete->band == UMAC_BAND_2GHZ ?
+                                       IEEE80211_BAND_2GHZ :
+                                       IEEE80211_BAND_5GHZ));
                if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
                        /* Associated to a unallowed channel, disassociate. */
                        __iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
                goto err;
        }
 
-       freq = ieee80211_channel_to_frequency(umac_bss->channel);
+       freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
        channel = ieee80211_get_channel(wiphy, freq);
        signal = umac_bss->rssi * 100;
 
index 698a1f7694ed9eac019b580a9fe534a169661955..30ef0351bfc443fd889505d5c7421ce55b6ddadc 100644 (file)
@@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
                /* No channel, no luck */
                if (chan_no != -1) {
                        struct wiphy *wiphy = priv->wdev->wiphy;
-                       int freq = ieee80211_channel_to_frequency(chan_no);
+                       int freq = ieee80211_channel_to_frequency(chan_no,
+                                                       IEEE80211_BAND_2GHZ);
                        struct ieee80211_channel *channel =
                                ieee80211_get_channel(wiphy, freq);
 
@@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
        lbs_deb_enter(LBS_DEB_CFG80211);
 
        survey->channel = ieee80211_get_channel(wiphy,
-               ieee80211_channel_to_frequency(priv->channel));
+               ieee80211_channel_to_frequency(priv->channel,
+                                              IEEE80211_BAND_2GHZ));
 
        ret = lbs_get_rssi(priv, &signal, &noise);
        if (ret == 0) {
index 78c4da150a745d9cf6bf8db5de96054c9fb53b9b..7e8a658b7670ee580b5a8a84094beb9c6de86dd7 100644 (file)
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
        if (priv->current_addr[0] == 0xff)
                memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
 
-       memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
-       if (priv->mesh_dev)
-               memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
+       if (!priv->copied_hwaddr) {
+               memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
+               if (priv->mesh_dev)
+                       memcpy(priv->mesh_dev->dev_addr,
+                               priv->current_addr, ETH_ALEN);
+               priv->copied_hwaddr = 1;
+       }
 
 out:
        lbs_deb_leave(LBS_DEB_CMD);
index 18dd9a02c459cea96c2158ac2bfc2764abed2087..bc461eb396604cb746b3259c4656ee693caa1dd2 100644 (file)
@@ -90,6 +90,7 @@ struct lbs_private {
        void *card;
        u8 fw_ready;
        u8 surpriseremoved;
+       u8 setup_fw_on_resume;
        int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
        void (*reset_card) (struct lbs_private *priv);
        int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
        u32 fwcapinfo;
        u16 regioncode;
        u8 current_addr[ETH_ALEN];
+       u8 copied_hwaddr;
 
        /* Command download */
        u8 dnld_sent;
index 00600239a053ff8e6e38383c015de42d30401f7f..f6c2cd665f4932c351f0ee915ba4a27c59a57147 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/firmware.h>
 #include <linux/jiffies.h>
-#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
-#include <linux/semaphore.h>
 #include <linux/slab.h>
 #include <linux/spi/libertas_spi.h>
 #include <linux/spi/spi.h>
 #include "dev.h"
 #include "if_spi.h"
 
+struct if_spi_packet {
+       struct list_head                list;
+       u16                             blen;
+       u8                              buffer[0] __attribute__((aligned(4)));
+};
+
 struct if_spi_card {
        struct spi_device               *spi;
        struct lbs_private              *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
        unsigned long                   spu_reg_delay;
 
        /* Handles all SPI communication (except for FW load) */
-       struct task_struct              *spi_thread;
-       int                             run_thread;
-
-       /* Used to wake up the spi_thread */
-       struct semaphore                spi_ready;
-       struct semaphore                spi_thread_terminated;
+       struct workqueue_struct         *workqueue;
+       struct work_struct              packet_work;
 
        u8                              cmd_buffer[IF_SPI_CMD_BUF_SIZE];
+
+       /* A buffer of incoming packets from libertas core.
+        * Since we can't sleep in hw_host_to_card, we have to buffer
+        * them. */
+       struct list_head                cmd_packet_list;
+       struct list_head                data_packet_list;
+
+       /* Protects cmd_packet_list and data_packet_list */
+       spinlock_t                      buffer_lock;
 };
 
 static void free_if_spi_card(struct if_spi_card *card)
 {
+       struct list_head *cursor, *next;
+       struct if_spi_packet *packet;
+
+       list_for_each_safe(cursor, next, &card->cmd_packet_list) {
+               packet = container_of(cursor, struct if_spi_packet, list);
+               list_del(&packet->list);
+               kfree(packet);
+       }
+       list_for_each_safe(cursor, next, &card->data_packet_list) {
+               packet = container_of(cursor, struct if_spi_packet, list);
+               list_del(&packet->list);
+               kfree(packet);
+       }
        spi_set_drvdata(card->spi, NULL);
        kfree(card);
 }
@@ -622,7 +644,7 @@ out:
 /*
  * SPI Transfer Thread
  *
- * The SPI thread handles all SPI transfers, so there is no need for a lock.
+ * The SPI worker handles all SPI transfers, so there is no need for a lock.
  */
 
 /* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
        return err;
 }
 
+/* Move data or a command from the host to the card. */
+static void if_spi_h2c(struct if_spi_card *card,
+                       struct if_spi_packet *packet, int type)
+{
+       int err = 0;
+       u16 int_type, port_reg;
+
+       switch (type) {
+       case MVMS_DAT:
+               int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
+               port_reg = IF_SPI_DATA_RDWRPORT_REG;
+               break;
+       case MVMS_CMD:
+               int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
+               port_reg = IF_SPI_CMD_RDWRPORT_REG;
+               break;
+       default:
+               lbs_pr_err("can't transfer buffer of type %d\n", type);
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* Write the data to the card */
+       err = spu_write(card, port_reg, packet->buffer, packet->blen);
+       if (err)
+               goto out;
+
+out:
+       kfree(packet);
+
+       if (err)
+               lbs_pr_err("%s: error %d\n", __func__, err);
+}
+
 /* Inform the host about a card event */
 static void if_spi_e2h(struct if_spi_card *card)
 {
@@ -766,71 +822,88 @@ out:
                lbs_pr_err("%s: error %d\n", __func__, err);
 }
 
-static int lbs_spi_thread(void *data)
+static void if_spi_host_to_card_worker(struct work_struct *work)
 {
        int err;
-       struct if_spi_card *card = data;
+       struct if_spi_card *card;
        u16 hiStatus;
+       unsigned long flags;
+       struct if_spi_packet *packet;
 
-       while (1) {
-               /* Wait to be woken up by one of two things.  First, our ISR
-                * could tell us that something happened on the WLAN.
-                * Secondly, libertas could call hw_host_to_card with more
-                * data, which we might be able to send.
-                */
-               do {
-                       err = down_interruptible(&card->spi_ready);
-                       if (!card->run_thread) {
-                               up(&card->spi_thread_terminated);
-                               do_exit(0);
-                       }
-               } while (err == -EINTR);
+       card = container_of(work, struct if_spi_card, packet_work);
 
-               /* Read the host interrupt status register to see what we
-                * can do. */
-               err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
-                                       &hiStatus);
-               if (err) {
-                       lbs_pr_err("I/O error\n");
+       lbs_deb_enter(LBS_DEB_SPI);
+
+       /* Read the host interrupt status register to see what we
+        * can do. */
+       err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
+                               &hiStatus);
+       if (err) {
+               lbs_pr_err("I/O error\n");
+               goto err;
+       }
+
+       if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
+               err = if_spi_c2h_cmd(card);
+               if (err)
                        goto err;
-               }
+       }
+       if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
+               err = if_spi_c2h_data(card);
+               if (err)
+                       goto err;
+       }
 
-               if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
-                       err = if_spi_c2h_cmd(card);
-                       if (err)
-                               goto err;
-               }
-               if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
-                       err = if_spi_c2h_data(card);
-                       if (err)
-                               goto err;
+       /* workaround: in PS mode, the card does not set the Command
+        * Download Ready bit, but it sets TX Download Ready. */
+       if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
+          (card->priv->psstate != PS_STATE_FULL_POWER &&
+           (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
+               /* This means two things. First of all,
+                * if there was a previous command sent, the card has
+                * successfully received it.
+                * Secondly, it is now ready to download another
+                * command.
+                */
+               lbs_host_to_card_done(card->priv);
+
+               /* Do we have any command packets from the host to
+                * send? */
+               packet = NULL;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               if (!list_empty(&card->cmd_packet_list)) {
+                       packet = (struct if_spi_packet *)(card->
+                                       cmd_packet_list.next);
+                       list_del(&packet->list);
                }
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
 
-               /* workaround: in PS mode, the card does not set the Command
-                * Download Ready bit, but it sets TX Download Ready. */
-               if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
-                  (card->priv->psstate != PS_STATE_FULL_POWER &&
-                   (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
-                       lbs_host_to_card_done(card->priv);
+               if (packet)
+                       if_spi_h2c(card, packet, MVMS_CMD);
+       }
+       if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
+               /* Do we have any data packets from the host to
+                * send? */
+               packet = NULL;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               if (!list_empty(&card->data_packet_list)) {
+                       packet = (struct if_spi_packet *)(card->
+                                       data_packet_list.next);
+                       list_del(&packet->list);
                }
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
 
-               if (hiStatus & IF_SPI_HIST_CARD_EVENT)
-                       if_spi_e2h(card);
+               if (packet)
+                       if_spi_h2c(card, packet, MVMS_DAT);
+       }
+       if (hiStatus & IF_SPI_HIST_CARD_EVENT)
+               if_spi_e2h(card);
 
 err:
-               if (err)
-                       lbs_pr_err("%s: got error %d\n", __func__, err);
-       }
-}
+       if (err)
+               lbs_pr_err("%s: got error %d\n", __func__, err);
 
-/* Block until lbs_spi_thread thread has terminated */
-static void if_spi_terminate_spi_thread(struct if_spi_card *card)
-{
-       /* It would be nice to use kthread_stop here, but that function
-        * can't wake threads waiting for a semaphore. */
-       card->run_thread = 0;
-       up(&card->spi_ready);
-       down(&card->spi_thread_terminated);
+       lbs_deb_leave(LBS_DEB_SPI);
 }
 
 /*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
                                u8 type, u8 *buf, u16 nb)
 {
        int err = 0;
+       unsigned long flags;
        struct if_spi_card *card = priv->card;
+       struct if_spi_packet *packet;
+       u16 blen;
 
        lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
 
-       nb = ALIGN(nb, 4);
+       if (nb == 0) {
+               lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
+               err = -EINVAL;
+               goto out;
+       }
+       blen = ALIGN(nb, 4);
+       packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
+       if (!packet) {
+               err = -ENOMEM;
+               goto out;
+       }
+       packet->blen = blen;
+       memcpy(packet->buffer, buf, nb);
+       memset(packet->buffer + nb, 0, blen - nb);
 
        switch (type) {
        case MVMS_CMD:
-               err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
+               priv->dnld_sent = DNLD_CMD_SENT;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               list_add_tail(&packet->list, &card->cmd_packet_list);
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
                break;
        case MVMS_DAT:
-               err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
+               priv->dnld_sent = DNLD_DATA_SENT;
+               spin_lock_irqsave(&card->buffer_lock, flags);
+               list_add_tail(&packet->list, &card->data_packet_list);
+               spin_unlock_irqrestore(&card->buffer_lock, flags);
                break;
        default:
                lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
                break;
        }
 
+       /* Queue spi xfer work */
+       queue_work(card->workqueue, &card->packet_work);
+out:
        lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
        return err;
 }
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
  * Host Interrupts
  *
  * Service incoming interrupts from the WLAN device. We can't sleep here, so
- * don't try to talk on the SPI bus, just wake up the SPI thread.
+ * don't try to talk on the SPI bus, just queue the SPI xfer work.
  */
 static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
 {
        struct if_spi_card *card = dev_id;
 
-       up(&card->spi_ready);
+       queue_work(card->workqueue, &card->packet_work);
+
        return IRQ_HANDLED;
 }
 
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
  * SPI callbacks
  */
 
-static int __devinit if_spi_probe(struct spi_device *spi)
+static int if_spi_init_card(struct if_spi_card *card)
 {
-       struct if_spi_card *card;
-       struct lbs_private *priv = NULL;
-       struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
-       int err = 0, i;
+       struct spi_device *spi = card->spi;
+       int err, i;
        u32 scratch;
-       struct sched_param param = { .sched_priority = 1 };
        const struct firmware *helper = NULL;
        const struct firmware *mainfw = NULL;
 
        lbs_deb_enter(LBS_DEB_SPI);
 
-       if (!pdata) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       if (pdata->setup) {
-               err = pdata->setup(spi);
-               if (err)
-                       goto out;
-       }
-
-       /* Allocate card structure to represent this specific device */
-       card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
-       if (!card) {
-               err = -ENOMEM;
-               goto out;
-       }
-       spi_set_drvdata(spi, card);
-       card->pdata = pdata;
-       card->spi = spi;
-       card->prev_xfer_time = jiffies;
-
-       sema_init(&card->spi_ready, 0);
-       sema_init(&card->spi_thread_terminated, 0);
-
-       /* Initialize the SPI Interface Unit */
-       err = spu_init(card, pdata->use_dummy_writes);
+       err = spu_init(card, card->pdata->use_dummy_writes);
        if (err)
-               goto free_card;
+               goto out;
        err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
        if (err)
-               goto free_card;
+               goto out;
 
-       /* Firmware load */
        err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
        if (err)
-               goto free_card;
+               goto out;
        if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
                lbs_deb_spi("Firmware is already loaded for "
                            "Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
                        lbs_pr_err("Unsupported chip_id: 0x%02x\n",
                                        card->card_id);
                        err = -ENODEV;
-                       goto free_card;
+                       goto out;
                }
 
                err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
                                        &mainfw);
                if (err) {
                        lbs_pr_err("failed to find firmware (%d)\n", err);
-                       goto free_card;
+                       goto out;
                }
 
                lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,14 +1035,67 @@ static int __devinit if_spi_probe(struct spi_device *spi)
                                spi->max_speed_hz);
                err = if_spi_prog_helper_firmware(card, helper);
                if (err)
-                       goto free_card;
+                       goto out;
                err = if_spi_prog_main_firmware(card, mainfw);
                if (err)
-                       goto free_card;
+                       goto out;
                lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
        }
 
        err = spu_set_interrupt_mode(card, 0, 1);
+       if (err)
+               goto out;
+
+out:
+       if (helper)
+               release_firmware(helper);
+       if (mainfw)
+               release_firmware(mainfw);
+
+       lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
+
+       return err;
+}
+
+static int __devinit if_spi_probe(struct spi_device *spi)
+{
+       struct if_spi_card *card;
+       struct lbs_private *priv = NULL;
+       struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+       int err = 0;
+
+       lbs_deb_enter(LBS_DEB_SPI);
+
+       if (!pdata) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (pdata->setup) {
+               err = pdata->setup(spi);
+               if (err)
+                       goto out;
+       }
+
+       /* Allocate card structure to represent this specific device */
+       card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
+       if (!card) {
+               err = -ENOMEM;
+               goto teardown;
+       }
+       spi_set_drvdata(spi, card);
+       card->pdata = pdata;
+       card->spi = spi;
+       card->prev_xfer_time = jiffies;
+
+       INIT_LIST_HEAD(&card->cmd_packet_list);
+       INIT_LIST_HEAD(&card->data_packet_list);
+       spin_lock_init(&card->buffer_lock);
+
+       /* Initialize the SPI Interface Unit */
+
+       /* Firmware load */
+       err = if_spi_init_card(card);
        if (err)
                goto free_card;
 
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
        priv->fw_ready = 1;
 
        /* Initialize interrupt handling stuff. */
-       card->run_thread = 1;
-       card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread");
-       if (IS_ERR(card->spi_thread)) {
-               card->run_thread = 0;
-               err = PTR_ERR(card->spi_thread);
-               lbs_pr_err("error creating SPI thread: err=%d\n", err);
-               goto remove_card;
-       }
-       if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
-               lbs_pr_err("Error setting scheduler, using default.\n");
+       card->workqueue = create_workqueue("libertas_spi");
+       INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
 
        err = request_irq(spi->irq, if_spi_host_interrupt,
                        IRQF_TRIGGER_FALLING, "libertas_spi", card);
        if (err) {
                lbs_pr_err("can't get host irq line-- request_irq failed\n");
-               goto terminate_thread;
+               goto terminate_workqueue;
        }
 
-       /* poke the IRQ handler so that we don't miss the first interrupt */
-       up(&card->spi_ready);
-
        /* Start the card.
         * This will call register_netdev, and we'll start
         * getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
 
 release_irq:
        free_irq(spi->irq, card);
-terminate_thread:
-       if_spi_terminate_spi_thread(card);
-remove_card:
+terminate_workqueue:
+       flush_workqueue(card->workqueue);
+       destroy_workqueue(card->workqueue);
        lbs_remove_card(priv); /* will call free_netdev */
 free_card:
        free_if_spi_card(card);
+teardown:
+       if (pdata->teardown)
+               pdata->teardown(spi);
 out:
-       if (helper)
-               release_firmware(helper);
-       if (mainfw)
-               release_firmware(mainfw);
-
        lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
        return err;
 }
@@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
        lbs_remove_card(priv); /* will call free_netdev */
 
        free_irq(spi->irq, card);
-       if_spi_terminate_spi_thread(card);
+       flush_workqueue(card->workqueue);
+       destroy_workqueue(card->workqueue);
        if (card->pdata->teardown)
                card->pdata->teardown(spi);
        free_if_spi_card(card);
index 6836a6dd985331d7462034b808ffcf422c6e587d..ca8149cd5bd9803b2bd55c05f81266647ac28d20 100644 (file)
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
        return 0;
 }
 
+/**
+ * @brief This function gets the HW spec from the firmware and sets
+ *        some basic parameters.
+ *
+ *  @param priv    A pointer to struct lbs_private structure
+ *  @return        0 or -1
+ */
+static int lbs_setup_firmware(struct lbs_private *priv)
+{
+       int ret = -1;
+       s16 curlevel = 0, minlevel = 0, maxlevel = 0;
+
+       lbs_deb_enter(LBS_DEB_FW);
+
+       /* Read MAC address from firmware */
+       memset(priv->current_addr, 0xff, ETH_ALEN);
+       ret = lbs_update_hw_spec(priv);
+       if (ret)
+               goto done;
+
+       /* Read power levels if available */
+       ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
+       if (ret == 0) {
+               priv->txpower_cur = curlevel;
+               priv->txpower_min = minlevel;
+               priv->txpower_max = maxlevel;
+       }
+
+       /* Send cmd to FW to enable 11D function */
+       ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
+       lbs_set_mac_control(priv);
+done:
+       lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+       return ret;
+}
+
 int lbs_suspend(struct lbs_private *priv)
 {
        int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
                        lbs_pr_err("deep sleep activation failed: %d\n", ret);
        }
 
-       lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_resume);
-
-/**
- * @brief This function gets the HW spec from the firmware and sets
- *        some basic parameters.
- *
- *  @param priv    A pointer to struct lbs_private structure
- *  @return       0 or -1
- */
-static int lbs_setup_firmware(struct lbs_private *priv)
-{
-       int ret = -1;
-       s16 curlevel = 0, minlevel = 0, maxlevel = 0;
-
-       lbs_deb_enter(LBS_DEB_FW);
-
-       /* Read MAC address from firmware */
-       memset(priv->current_addr, 0xff, ETH_ALEN);
-       ret = lbs_update_hw_spec(priv);
-       if (ret)
-               goto done;
-
-       /* Read power levels if available */
-       ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
-       if (ret == 0) {
-               priv->txpower_cur = curlevel;
-               priv->txpower_min = minlevel;
-               priv->txpower_max = maxlevel;
-       }
+       if (priv->setup_fw_on_resume)
+               ret = lbs_setup_firmware(priv);
 
-       /* Send cmd to FW to enable 11D function */
-       ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
-
-       lbs_set_mac_control(priv);
-done:
        lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
        return ret;
 }
+EXPORT_SYMBOL_GPL(lbs_resume);
 
 /**
  *  This function handles the timeout of command sending.
index 9278b3c8ee306c4b59c610c156257daf14d47179..d4005081f1dfde09bd2845f4cf85a9d712dd25e1 100644 (file)
@@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
        lbtf_deb_leave(LBTF_DEB_MAIN);
 }
 
-static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct lbtf_private *priv = hw->priv;
 
@@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         * there are no buffered multicast frames to send
         */
        ieee80211_stop_queues(priv->hw);
-       return NETDEV_TX_OK;
 }
 
 static void lbtf_tx_work(struct work_struct *work)
index 454f045ddff33118525f93142e53349622e015c3..56f439d58013650a840510278a3c87caa197bd47 100644 (file)
@@ -541,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
 }
 
 
-static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        bool ack;
        struct ieee80211_tx_info *txi;
@@ -551,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (skb->len < 10) {
                /* Should not happen; just a sanity check for addr1 use */
                dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
        ack = mac80211_hwsim_tx_frame(hw, skb);
@@ -571,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
                txi->flags |= IEEE80211_TX_STAT_ACK;
        ieee80211_tx_status_irqsafe(hw, skb);
-       return NETDEV_TX_OK;
 }
 
 
@@ -943,7 +942,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
 static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
                                       enum ieee80211_ampdu_mlme_action action,
-                                      struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                                      struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                                      u8 buf_size)
 {
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
index 9ecf8407cb1b070825d098869c7e5317d499d019..df5959f36d0b42f92d0367dc3350105d468078c7 100644 (file)
@@ -232,6 +232,9 @@ struct mwl8k_priv {
        struct completion firmware_loading_complete;
 };
 
+#define MAX_WEP_KEY_LEN         13
+#define NUM_WEP_KEYS            4
+
 /* Per interface specific private data */
 struct mwl8k_vif {
        struct list_head list;
@@ -242,8 +245,21 @@ struct mwl8k_vif {
 
        /* Non AMPDU sequence number assigned by driver.  */
        u16 seqno;
+
+       /* Saved WEP keys */
+       struct {
+               u8 enabled;
+               u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
+       } wep_key_conf[NUM_WEP_KEYS];
+
+       /* BSSID */
+       u8 bssid[ETH_ALEN];
+
+       /* A flag to indicate is HW crypto is enabled for this bssid */
+       bool is_hw_crypto_enabled;
 };
 #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
+#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
 
 struct mwl8k_sta {
        /* Index into station database. Returned by UPDATE_STADB.  */
@@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
 #define MWL8K_CMD_SET_RATEADAPT_MODE   0x0203
 #define MWL8K_CMD_BSS_START            0x1100          /* per-vif */
 #define MWL8K_CMD_SET_NEW_STN          0x1111          /* per-vif */
+#define MWL8K_CMD_UPDATE_ENCRYPTION    0x1122          /* per-vif */
 #define MWL8K_CMD_UPDATE_STADB         0x1123
 
 static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
                MWL8K_CMDNAME(SET_RATEADAPT_MODE);
                MWL8K_CMDNAME(BSS_START);
                MWL8K_CMDNAME(SET_NEW_STN);
+               MWL8K_CMDNAME(UPDATE_ENCRYPTION);
                MWL8K_CMDNAME(UPDATE_STADB);
        default:
                snprintf(buf, bufsize, "0x%x", cmd);
@@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
                skb_pull(skb, sizeof(*tr) - hdrlen);
 }
 
-static inline void mwl8k_add_dma_header(struct sk_buff *skb)
+static void
+mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
 {
        struct ieee80211_hdr *wh;
        int hdrlen;
+       int reqd_hdrlen;
        struct mwl8k_dma_data *tr;
 
        /*
@@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
        wh = (struct ieee80211_hdr *)skb->data;
 
        hdrlen = ieee80211_hdrlen(wh->frame_control);
-       if (hdrlen != sizeof(*tr))
-               skb_push(skb, sizeof(*tr) - hdrlen);
+       reqd_hdrlen = sizeof(*tr);
+
+       if (hdrlen != reqd_hdrlen)
+               skb_push(skb, reqd_hdrlen - hdrlen);
 
        if (ieee80211_is_data_qos(wh->frame_control))
-               hdrlen -= 2;
+               hdrlen -= IEEE80211_QOS_CTL_LEN;
 
        tr = (struct mwl8k_dma_data *)skb->data;
        if (wh != &tr->wh)
@@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
         * payload".  That is, everything except for the 802.11 header.
         * This includes all crypto material including the MIC.
         */
-       tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr));
+       tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
 }
 
+static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *wh;
+       struct ieee80211_tx_info *tx_info;
+       struct ieee80211_key_conf *key_conf;
+       int data_pad;
+
+       wh = (struct ieee80211_hdr *)skb->data;
+
+       tx_info = IEEE80211_SKB_CB(skb);
+
+       key_conf = NULL;
+       if (ieee80211_is_data(wh->frame_control))
+               key_conf = tx_info->control.hw_key;
+
+       /*
+        * Make sure the packet header is in the DMA header format (4-address
+        * without QoS), the necessary crypto padding between the header and the
+        * payload has already been provided by mac80211, but it doesn't add tail
+        * padding when HW crypto is enabled.
+        *
+        * We have the following trailer padding requirements:
+        * - WEP: 4 trailer bytes (ICV)
+        * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
+        * - CCMP: 8 trailer bytes (MIC)
+        */
+       data_pad = 0;
+       if (key_conf != NULL) {
+               switch (key_conf->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+                       data_pad = 4;
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       data_pad = 12;
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       data_pad = 8;
+                       break;
+               }
+       }
+       mwl8k_add_dma_header(skb, data_pad);
+}
 
 /*
  * Packet reception for 88w8366 AP firmware.
@@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
 
 #define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST    0x80
 
+/* 8366 AP rx_status bits */
+#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK          0x80
+#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR       0xFF
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR      0x02
+#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR       0x04
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR      0x08
+
 static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
 {
        struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
        } else {
                status->band = IEEE80211_BAND_2GHZ;
        }
-       status->freq = ieee80211_channel_to_frequency(rxd->channel);
+       status->freq = ieee80211_channel_to_frequency(rxd->channel,
+                                                     status->band);
 
        *qos = rxd->qos_control;
 
+       if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
+           (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
+           (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
+               status->flag |= RX_FLAG_MMIC_ERROR;
+
        return le16_to_cpu(rxd->pkt_len);
 }
 
@@ -876,6 +954,11 @@ struct mwl8k_rxd_sta {
 #define MWL8K_STA_RATE_INFO_MCS_FORMAT         0x0001
 
 #define MWL8K_STA_RX_CTRL_OWNED_BY_HOST                0x02
+#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR                0x04
+/* ICV=0 or MIC=1 */
+#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE         0x08
+/* Key is uploaded only in failure case */
+#define MWL8K_STA_RX_CTRL_KEY_INDEX                    0x30
 
 static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
 {
@@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
        } else {
                status->band = IEEE80211_BAND_2GHZ;
        }
-       status->freq = ieee80211_channel_to_frequency(rxd->channel);
+       status->freq = ieee80211_channel_to_frequency(rxd->channel,
+                                                     status->band);
 
        *qos = rxd->qos_control;
+       if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
+           (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
+               status->flag |= RX_FLAG_MMIC_ERROR;
 
        return le16_to_cpu(rxd->pkt_len);
 }
@@ -1092,9 +1179,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
                ieee80211_queue_work(hw, &priv->finalize_join_worker);
 }
 
+static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
+                                                  u8 *bssid)
+{
+       struct mwl8k_vif *mwl8k_vif;
+
+       list_for_each_entry(mwl8k_vif,
+                           vif_list, list) {
+               if (memcmp(bssid, mwl8k_vif->bssid,
+                          ETH_ALEN) == 0)
+                       return mwl8k_vif;
+       }
+
+       return NULL;
+}
+
 static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
 {
        struct mwl8k_priv *priv = hw->priv;
+       struct mwl8k_vif *mwl8k_vif = NULL;
        struct mwl8k_rx_queue *rxq = priv->rxq + index;
        int processed;
 
@@ -1104,6 +1207,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
                void *rxd;
                int pkt_len;
                struct ieee80211_rx_status status;
+               struct ieee80211_hdr *wh;
                __le16 qos;
 
                skb = rxq->buf[rxq->head].skb;
@@ -1130,8 +1234,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
 
                rxq->rxd_count--;
 
-               skb_put(skb, pkt_len);
-               mwl8k_remove_dma_header(skb, qos);
+               wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
                /*
                 * Check for a pending join operation.  Save a
@@ -1141,6 +1244,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
                if (mwl8k_capture_bssid(priv, (void *)skb->data))
                        mwl8k_save_beacon(hw, skb);
 
+               if (ieee80211_has_protected(wh->frame_control)) {
+
+                       /* Check if hw crypto has been enabled for
+                        * this bss. If yes, set the status flags
+                        * accordingly
+                        */
+                       mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
+                                                               wh->addr1);
+
+                       if (mwl8k_vif != NULL &&
+                           mwl8k_vif->is_hw_crypto_enabled == true) {
+                               /*
+                                * When MMIC ERROR is encountered
+                                * by the firmware, payload is
+                                * dropped and only 32 bytes of
+                                * mwl8k Firmware header is sent
+                                * to the host.
+                                *
+                                * We need to add four bytes of
+                                * key information.  In it
+                                * MAC80211 expects keyidx set to
+                                * 0 for triggering Counter
+                                * Measure of MMIC failure.
+                                */
+                               if (status.flag & RX_FLAG_MMIC_ERROR) {
+                                       struct mwl8k_dma_data *tr;
+                                       tr = (struct mwl8k_dma_data *)skb->data;
+                                       memset((void *)&(tr->data), 0, 4);
+                                       pkt_len += 4;
+                               }
+
+                               if (!ieee80211_is_auth(wh->frame_control))
+                                       status.flag |= RX_FLAG_IV_STRIPPED |
+                                                      RX_FLAG_DECRYPTED |
+                                                      RX_FLAG_MMIC_STRIPPED;
+                       }
+               }
+
+               skb_put(skb, pkt_len);
+               mwl8k_remove_dma_header(skb, qos);
                memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
                ieee80211_rx_irqsafe(hw, skb);
 
@@ -1392,6 +1535,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
 
                info = IEEE80211_SKB_CB(skb);
                ieee80211_tx_info_clear_status(info);
+
+               /* Rate control is happening in the firmware.
+                * Ensure no tx rate is being reported.
+                */
+                info->status.rates[0].idx = -1;
+                info->status.rates[0].count = 1;
+
                if (MWL8K_TXD_SUCCESS(status))
                        info->flags |= IEEE80211_TX_STAT_ACK;
 
@@ -1423,7 +1573,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
        txq->txd = NULL;
 }
 
-static int
+static void
 mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
@@ -1443,7 +1593,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        else
                qos = 0;
 
-       mwl8k_add_dma_header(skb);
+       if (priv->ap_fw)
+               mwl8k_encapsulate_tx_frame(skb);
+       else
+               mwl8k_add_dma_header(skb, 0);
+
        wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
        tx_info = IEEE80211_SKB_CB(skb);
@@ -1481,7 +1635,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
                wiphy_debug(hw->wiphy,
                            "failed to dma map skb, dropping TX frame.\n");
                dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
        spin_lock_bh(&priv->tx_lock);
@@ -1518,8 +1672,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        mwl8k_tx_start(priv);
 
        spin_unlock_bh(&priv->tx_lock);
-
-       return NETDEV_TX_OK;
 }
 
 
@@ -1974,8 +2126,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
        cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
        cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
        cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
-       for (i = 0; i < MWL8K_TX_QUEUES; i++)
-               cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
+
+       /*
+        * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
+        * that order. Firmware has Q3 as highest priority and Q0 as lowest
+        * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
+        * priority is interpreted the right way in firmware.
+        */
+       for (i = 0; i < MWL8K_TX_QUEUES; i++) {
+               int j = MWL8K_TX_QUEUES - 1 - i;
+               cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
+       }
+
        cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
                                 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
                                 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
@@ -3098,6 +3260,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
        return rc;
 }
 
+/*
+ * CMD_UPDATE_ENCRYPTION.
+ */
+
+#define MAX_ENCR_KEY_LENGTH    16
+#define MIC_KEY_LENGTH         8
+
+struct mwl8k_cmd_update_encryption {
+       struct mwl8k_cmd_pkt header;
+
+       __le32 action;
+       __le32 reserved;
+       __u8 mac_addr[6];
+       __u8 encr_type;
+
+} __attribute__((packed));
+
+struct mwl8k_cmd_set_key {
+       struct mwl8k_cmd_pkt header;
+
+       __le32 action;
+       __le32 reserved;
+       __le16 length;
+       __le16 key_type_id;
+       __le32 key_info;
+       __le32 key_id;
+       __le16 key_len;
+       __u8 key_material[MAX_ENCR_KEY_LENGTH];
+       __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
+       __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+       __le16 tkip_rsc_low;
+       __le32 tkip_rsc_high;
+       __le16 tkip_tsc_low;
+       __le32 tkip_tsc_high;
+       __u8 mac_addr[6];
+} __attribute__((packed));
+
+enum {
+       MWL8K_ENCR_ENABLE,
+       MWL8K_ENCR_SET_KEY,
+       MWL8K_ENCR_REMOVE_KEY,
+       MWL8K_ENCR_SET_GROUP_KEY,
+};
+
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP       0
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE   1
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP      4
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED     7
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES       8
+
+enum {
+       MWL8K_ALG_WEP,
+       MWL8K_ALG_TKIP,
+       MWL8K_ALG_CCMP,
+};
+
+#define MWL8K_KEY_FLAG_TXGROUPKEY      0x00000004
+#define MWL8K_KEY_FLAG_PAIRWISE                0x00000008
+#define MWL8K_KEY_FLAG_TSC_VALID       0x00000040
+#define MWL8K_KEY_FLAG_WEP_TXKEY       0x01000000
+#define MWL8K_KEY_FLAG_MICKEY_VALID    0x02000000
+
+static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
+                                             struct ieee80211_vif *vif,
+                                             u8 *addr,
+                                             u8 encr_type)
+{
+       struct mwl8k_cmd_update_encryption *cmd;
+       int rc;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+       cmd->header.length = cpu_to_le16(sizeof(*cmd));
+       cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
+       memcpy(cmd->mac_addr, addr, ETH_ALEN);
+       cmd->encr_type = encr_type;
+
+       rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+       kfree(cmd);
+
+       return rc;
+}
+
+static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
+                                               u8 *addr,
+                                               struct ieee80211_key_conf *key)
+{
+       cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+       cmd->header.length = cpu_to_le16(sizeof(*cmd));
+       cmd->length = cpu_to_le16(sizeof(*cmd) -
+                               offsetof(struct mwl8k_cmd_set_key, length));
+       cmd->key_id = cpu_to_le32(key->keyidx);
+       cmd->key_len = cpu_to_le16(key->keylen);
+       memcpy(cmd->mac_addr, addr, ETH_ALEN);
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
+               if (key->keyidx == 0)
+                       cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
+
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
+               cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+                       ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+                       : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+               cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
+                                               | MWL8K_KEY_FLAG_TSC_VALID);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
+               cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+                       ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+                       : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       return 0;
+}
+
+static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
+                                               struct ieee80211_vif *vif,
+                                               u8 *addr,
+                                               struct ieee80211_key_conf *key)
+{
+       struct mwl8k_cmd_set_key *cmd;
+       int rc;
+       int keymlen;
+       u32 action;
+       u8 idx;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+       if (rc < 0)
+               goto done;
+
+       idx = key->keyidx;
+
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+               action = MWL8K_ENCR_SET_KEY;
+       else
+               action = MWL8K_ENCR_SET_GROUP_KEY;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               if (!mwl8k_vif->wep_key_conf[idx].enabled) {
+                       memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
+                                               sizeof(*key) + key->keylen);
+                       mwl8k_vif->wep_key_conf[idx].enabled = 1;
+               }
+
+               keymlen = 0;
+               action = MWL8K_ENCR_SET_KEY;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               keymlen = key->keylen;
+               break;
+       default:
+               rc = -ENOTSUPP;
+               goto done;
+       }
+
+       memcpy(cmd->key_material, key->key, keymlen);
+       cmd->action = cpu_to_le32(action);
+
+       rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+       kfree(cmd);
+
+       return rc;
+}
+
+static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
+                                               struct ieee80211_vif *vif,
+                                               u8 *addr,
+                                               struct ieee80211_key_conf *key)
+{
+       struct mwl8k_cmd_set_key *cmd;
+       int rc;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+       if (rc < 0)
+               goto done;
+
+       if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+                       WLAN_CIPHER_SUITE_WEP104)
+               mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
+
+       cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
+
+       rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+       kfree(cmd);
+
+       return rc;
+}
+
+static int mwl8k_set_key(struct ieee80211_hw *hw,
+                        enum set_key_cmd cmd_param,
+                        struct ieee80211_vif *vif,
+                        struct ieee80211_sta *sta,
+                        struct ieee80211_key_conf *key)
+{
+       int rc = 0;
+       u8 encr_type;
+       u8 *addr;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               return -EOPNOTSUPP;
+
+       if (sta == NULL)
+               addr = hw->wiphy->perm_addr;
+       else
+               addr = sta->addr;
+
+       if (cmd_param == SET_KEY) {
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
+               if (rc)
+                       goto out;
+
+               if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
+                               || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
+                       encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
+               else
+                       encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
+
+               rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
+                                                               encr_type);
+               if (rc)
+                       goto out;
+
+               mwl8k_vif->is_hw_crypto_enabled = true;
+
+       } else {
+               rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
+
+               if (rc)
+                       goto out;
+
+               mwl8k_vif->is_hw_crypto_enabled = false;
+
+       }
+out:
+       return rc;
+}
+
 /*
  * CMD_UPDATE_STADB.
  */
@@ -3310,22 +3740,19 @@ static void mwl8k_rx_poll(unsigned long data)
 /*
  * Core driver operations.
  */
-static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        int index = skb_get_queue_mapping(skb);
-       int rc;
 
        if (!priv->radio_on) {
                wiphy_debug(hw->wiphy,
                            "dropped TX frame since radio disabled\n");
                dev_kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
-       rc = mwl8k_txq_xmit(hw, index, skb);
-
-       return rc;
+       mwl8k_txq_xmit(hw, index, skb);
 }
 
 static int mwl8k_start(struct ieee80211_hw *hw)
@@ -3469,6 +3896,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
        mwl8k_vif->vif = vif;
        mwl8k_vif->macid = macid;
        mwl8k_vif->seqno = 0;
+       memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
+       mwl8k_vif->is_hw_crypto_enabled = false;
 
        /* Set the mac address.  */
        mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3528,9 +3957,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
                if (rc)
                        goto out;
 
-               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
-               if (!rc)
-                       rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+               if (rc)
+                       wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+               rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+               if (rc)
+                       wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
        } else {
                rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
                if (rc)
@@ -3866,18 +4299,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
 {
        struct mwl8k_priv *priv = hw->priv;
        int ret;
+       int i;
+       struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+       struct ieee80211_key_conf *key;
 
        if (!priv->ap_fw) {
                ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
                if (ret >= 0) {
                        MWL8K_STA(sta)->peer_id = ret;
-                       return 0;
+                       ret = 0;
                }
 
-               return ret;
+       } else {
+               ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
        }
 
-       return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
+       for (i = 0; i < NUM_WEP_KEYS; i++) {
+               key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
+               if (mwl8k_vif->wep_key_conf[i].enabled)
+                       mwl8k_set_key(hw, SET_KEY, vif, sta, key);
+       }
+       return ret;
 }
 
 static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3894,12 +4336,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                if (!priv->wmm_enabled)
                        rc = mwl8k_cmd_set_wmm_mode(hw, 1);
 
-               if (!rc)
-                       rc = mwl8k_cmd_set_edca_params(hw, queue,
+               if (!rc) {
+                       int q = MWL8K_TX_QUEUES - 1 - queue;
+                       rc = mwl8k_cmd_set_edca_params(hw, q,
                                                       params->cw_min,
                                                       params->cw_max,
                                                       params->aifs,
                                                       params->txop);
+               }
 
                mwl8k_fw_unlock(hw);
        }
@@ -3932,7 +4376,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
 static int
 mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                   enum ieee80211_ampdu_mlme_action action,
-                  struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                  u8 buf_size)
 {
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
@@ -3955,6 +4400,7 @@ static const struct ieee80211_ops mwl8k_ops = {
        .bss_info_changed       = mwl8k_bss_info_changed,
        .prepare_multicast      = mwl8k_prepare_multicast,
        .configure_filter       = mwl8k_configure_filter,
+       .set_key                = mwl8k_set_key,
        .set_rts_threshold      = mwl8k_set_rts_threshold,
        .sta_add                = mwl8k_sta_add,
        .sta_remove             = mwl8k_sta_remove,
@@ -4332,7 +4778,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
        hw->queues = MWL8K_TX_QUEUES;
 
        /* Set rssi values to dBm */
-       hw->flags |= IEEE80211_HW_SIGNAL_DBM;
+       hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
        hw->vif_data_size = sizeof(struct mwl8k_vif);
        hw->sta_data_size = sizeof(struct mwl8k_sta);
 
index 86cb54c842e745ffd7863457f23f45185440f556..e99ca1c1e0d8053689d7cbcabb5d07733dda03b1 100644 (file)
@@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
 
        freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
        channel = ieee80211_get_channel(wiphy, freq);
+       if (!channel) {
+               printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
+                       bss->a.channel, freq);
+               return; /* Then ignore it for now */
+       }
        timestamp = 0;
        capability = le16_to_cpu(bss->a.capabilities);
        beacon_interval = le16_to_cpu(bss->a.beacon_interv);
index 35b09aa0529bf6cb6f142680d045c0e894c44df9..13d750da930119bc7bdfc4faf4ad322c6cf467e2 100644 (file)
@@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = {
        { .bitrate = 540, .hw_value = 11, },
 };
 
+static struct p54_rssi_db_entry p54_rssi_default = {
+       /*
+        * The defaults are taken from usb-logs of the
+        * vendor driver. So, they should be safe to
+        * use in case we can't get a match from the
+        * rssi <-> dBm conversion database.
+        */
+       .mul = 130,
+       .add = -398,
+};
+
 #define CHAN_HAS_CAL           BIT(0)
 #define CHAN_HAS_LIMIT         BIT(1)
 #define CHAN_HAS_CURVE         BIT(2)
@@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq)
        return -1;
 }
 
+static int same_band(u16 freq, u16 freq2)
+{
+       return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
+}
+
 static int p54_compare_channels(const void *_a,
                                const void *_b)
 {
        const struct p54_channel_entry *a = _a;
        const struct p54_channel_entry *b = _b;
 
-       return a->index - b->index;
+       return a->freq - b->freq;
+}
+
+static int p54_compare_rssichan(const void *_a,
+                               const void *_b)
+{
+       const struct p54_rssi_db_entry *a = _a;
+       const struct p54_rssi_db_entry *b = _b;
+
+       return a->freq - b->freq;
 }
 
 static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
@@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev,
 
        for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
                           (i < list->entries); i++) {
+               struct p54_channel_entry *chan = &list->channels[i];
 
-               if (list->channels[i].band != band)
+               if (chan->band != band)
                        continue;
 
-               if (list->channels[i].data != CHAN_HAS_ALL) {
-                       wiphy_err(dev->wiphy,
-                                 "%s%s%s is/are missing for channel:%d [%d MHz].\n",
-                                 (list->channels[i].data & CHAN_HAS_CAL ? "" :
+               if (chan->data != CHAN_HAS_ALL) {
+                       wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
+                                 "channel:%d [%d MHz].\n",
+                                 (chan->data & CHAN_HAS_CAL ? "" :
                                   " [iqauto calibration data]"),
-                                 (list->channels[i].data & CHAN_HAS_LIMIT ? "" :
+                                 (chan->data & CHAN_HAS_LIMIT ? "" :
                                   " [output power limits]"),
-                                 (list->channels[i].data & CHAN_HAS_CURVE ? "" :
+                                 (chan->data & CHAN_HAS_CURVE ? "" :
                                   " [curve data]"),
-                                 list->channels[i].index, list->channels[i].freq);
+                                 chan->index, chan->freq);
                        continue;
                }
 
-               tmp->channels[j].band = list->channels[i].band;
-               tmp->channels[j].center_freq = list->channels[i].freq;
+               tmp->channels[j].band = chan->band;
+               tmp->channels[j].center_freq = chan->freq;
                j++;
        }
 
@@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
                }
        }
 
-       /* sort the list by the channel index */
+       /* sort the channel list by frequency */
        sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
             p54_compare_channels, NULL);
 
@@ -410,33 +436,121 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
 static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
        "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
 
-static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
-                            u16 type)
+static int p54_parse_rssical(struct ieee80211_hw *dev,
+                            u8 *data, int len, u16 type)
 {
        struct p54_common *priv = dev->priv;
-       int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0;
-       int entry_size = sizeof(struct pda_rssi_cal_entry) + offset;
-       int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
-       int i;
+       struct p54_rssi_db_entry *entry;
+       size_t db_len, entries;
+       int offset = 0, i;
+
+       if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+               entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
+               if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
+                       wiphy_err(dev->wiphy, "rssical size mismatch.\n");
+                       goto err_data;
+               }
+       } else {
+               /*
+                * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
+                * have an empty two byte header.
+                */
+               if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
+                       offset += 2;
 
-       if (len != (entry_size * num_entries)) {
-               wiphy_err(dev->wiphy,
-                         "unknown rssi calibration data packing type:(%x) len:%d.\n",
-                         type, len);
+               entries = (len - offset) /
+                       sizeof(struct pda_rssi_cal_ext_entry);
 
-               print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE,
-                                    data, len);
+               if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
+                   entries <= 0) {
+                       wiphy_err(dev->wiphy, "invalid rssi database.\n");
+                       goto err_data;
+               }
+       }
 
-               wiphy_err(dev->wiphy, "please report this issue.\n");
-               return;
+       db_len = sizeof(*entry) * entries;
+       priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
+       if (!priv->rssi_db)
+               return -ENOMEM;
+
+       priv->rssi_db->offset = 0;
+       priv->rssi_db->entries = entries;
+       priv->rssi_db->entry_size = sizeof(*entry);
+       priv->rssi_db->len = db_len;
+
+       entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
+       if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+               struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
+
+               for (i = 0; i < entries; i++) {
+                       entry[i].freq = le16_to_cpu(cal[i].freq);
+                       entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+                       entry[i].add = (s16) le16_to_cpu(cal[i].add);
+               }
+       } else {
+               struct pda_rssi_cal_entry *cal = (void *) &data[offset];
+
+               for (i = 0; i < entries; i++) {
+                       u16 freq;
+                       switch (i) {
+                       case IEEE80211_BAND_2GHZ:
+                               freq = 2437;
+                               break;
+                       case IEEE80211_BAND_5GHZ:
+                               freq = 5240;
+                               break;
+                       }
+
+                       entry[i].freq = freq;
+                       entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+                       entry[i].add = (s16) le16_to_cpu(cal[i].add);
+               }
        }
 
-       for (i = 0; i < num_entries; i++) {
-               struct pda_rssi_cal_entry *cal = data +
-                                                (offset + i * entry_size);
-               priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul);
-               priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add);
+       /* sort the list by channel frequency */
+       sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
+       return 0;
+
+err_data:
+       wiphy_err(dev->wiphy,
+                 "rssi calibration data packing type:(%x) len:%d.\n",
+                 type, len);
+
+       print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
+
+       wiphy_err(dev->wiphy, "please report this issue.\n");
+       return -EINVAL;
+}
+
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
+{
+       struct p54_rssi_db_entry *entry;
+       int i, found = -1;
+
+       if (!priv->rssi_db)
+               return &p54_rssi_default;
+
+       entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
+       for (i = 0; i < priv->rssi_db->entries; i++) {
+               if (!same_band(freq, entry[i].freq))
+                       continue;
+
+               if (found == -1) {
+                       found = i;
+                       continue;
+               }
+
+               /* nearest match */
+               if (abs(freq - entry[i].freq) <
+                   abs(freq - entry[found].freq)) {
+                       found = i;
+                       continue;
+               } else {
+                       break;
+               }
        }
+
+       return found < 0 ? &p54_rssi_default : &entry[found];
 }
 
 static void p54_parse_default_country(struct ieee80211_hw *dev,
@@ -627,21 +741,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
                case PDR_RSSI_LINEAR_APPROXIMATION:
                case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
                case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
-                       p54_parse_rssical(dev, entry->data, data_len,
-                                         le16_to_cpu(entry->code));
+                       err = p54_parse_rssical(dev, entry->data, data_len,
+                                               le16_to_cpu(entry->code));
+                       if (err)
+                               goto err;
                        break;
-               case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: {
-                       __le16 *src = (void *) entry->data;
-                       s16 *dst = (void *) &priv->rssical_db;
+               case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
+                       struct pda_custom_wrapper *pda = (void *) entry->data;
+                       __le16 *src;
+                       u16 *dst;
                        int i;
 
-                       if (data_len != sizeof(priv->rssical_db)) {
-                               err = -EINVAL;
-                               goto err;
-                       }
-                       for (i = 0; i < sizeof(priv->rssical_db) /
-                                       sizeof(*src); i++)
+                       if (priv->rssi_db || data_len < sizeof(*pda))
+                               break;
+
+                       priv->rssi_db = p54_convert_db(pda, data_len);
+                       if (!priv->rssi_db)
+                               break;
+
+                       src = (void *) priv->rssi_db->data;
+                       dst = (void *) priv->rssi_db->data;
+
+                       for (i = 0; i < priv->rssi_db->entries; i++)
                                *(dst++) = (s16) le16_to_cpu(*(src++));
+
                        }
                        break;
                case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
@@ -717,6 +840,8 @@ good_eeprom:
                SET_IEEE80211_PERM_ADDR(dev, perm_addr);
        }
 
+       priv->cur_rssi = &p54_rssi_default;
+
        wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
                   dev->wiphy->perm_addr, priv->version,
                   p54_rf_chips[priv->rxhw]);
@@ -727,9 +852,11 @@ err:
        kfree(priv->iq_autocal);
        kfree(priv->output_limit);
        kfree(priv->curve_data);
+       kfree(priv->rssi_db);
        priv->iq_autocal = NULL;
        priv->output_limit = NULL;
        priv->curve_data = NULL;
+       priv->rssi_db = NULL;
 
        wiphy_err(dev->wiphy, "eeprom parse failed!\n");
        return err;
index 9051aef112496fb8eb5caf2e14ed9fd2088a2d5c..afde72b8460652dfa1fa3a475b1d35d9f50a04c4 100644 (file)
@@ -81,6 +81,12 @@ struct pda_pa_curve_data {
        u8 data[0];
 } __packed;
 
+struct pda_rssi_cal_ext_entry {
+       __le16 freq;
+       __le16 mul;
+       __le16 add;
+} __packed;
+
 struct pda_rssi_cal_entry {
        __le16 mul;
        __le16 add;
@@ -179,6 +185,7 @@ struct pda_custom_wrapper {
 
 /* used by our modificated eeprom image */
 #define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM           0xDEAD
+#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2         0xCAFF
 #define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM    0xBEEF
 #define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM             0xB05D
 
index 92b9b1f05fd536d4ed529a73b92d1ea198189923..2fab7d20ffc2621bed540373ca05468c8c0669af 100644 (file)
@@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
        union p54_scan_body_union *body;
        struct p54_scan_tail_rate *rate;
        struct pda_rssi_cal_entry *rssi;
+       struct p54_rssi_db_entry *rssi_data;
        unsigned int i;
        void *entry;
-       int band = priv->hw->conf.channel->band;
        __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq);
 
        skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
@@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
        }
 
        rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
-       rssi->mul = cpu_to_le16(priv->rssical_db[band].mul);
-       rssi->add = cpu_to_le16(priv->rssical_db[band].add);
+       rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
+       rssi->mul = cpu_to_le16(rssi_data->mul);
+       rssi->add = cpu_to_le16(rssi_data->add);
        if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
                /* Longbow frontend needs ever more */
                rssi = (void *) skb_put(skb, sizeof(*rssi));
-               rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn);
-               rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2);
+               rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
+               rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
        }
 
        if (priv->fw_var >= 0x509) {
@@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
        hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
 
        p54_tx(priv, skb);
+       priv->cur_rssi = rssi_data;
        return 0;
 
 err:
@@ -557,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv)
 {
        struct sk_buff *skb;
        struct p54_edcf *edcf;
+       u8 rtd;
 
        skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
                            P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
@@ -573,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv)
                edcf->sifs = 0x0a;
                edcf->eofpad = 0x06;
        }
+       /*
+        * calculate the extra round trip delay according to the
+        * formula from 802.11-2007 17.3.8.6.
+        */
+       rtd = 3 * priv->coverage_class;
+       edcf->slottime += rtd;
+       edcf->round_trip_delay = cpu_to_le16(rtd);
        /* (see prism54/isl_oid.h for further details) */
        edcf->frameburst = cpu_to_le16(0);
-       edcf->round_trip_delay = cpu_to_le16(0);
        edcf->flags = 0;
        memset(edcf->mapping, 0, sizeof(edcf->mapping));
        memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
index 04b63ec80fa46c6b68f17c992278b490a1de7728..eb581abc107906d6b3b9a9cd2317cec158c10801 100644 (file)
@@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv);
 void p54_unregister_leds(struct p54_common *priv);
 
 /* xmit functions */
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
 int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
 void p54_tx(struct p54_common *priv, struct sk_buff *skb);
 
@@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot,
 /* eeprom */
 int p54_download_eeprom(struct p54_common *priv, void *buf,
                        u16 offset, u16 len);
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
 
 /* utility */
 u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
index 622d27b6d8f214a1e69b92b93c4c570a7b22b8df..356e6bb443a63fe7ac4f4611b34025d13d6f2c4d 100644 (file)
@@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv,
         * to cancel the old beacon template by hand, instead the firmware
         * will release the previous one through the feedback mechanism.
         */
-       WARN_ON(p54_tx_80211(priv->hw, beacon));
+       p54_tx_80211(priv->hw, beacon);
        priv->tsf_high32 = 0;
        priv->tsf_low32 = 0;
 
@@ -524,6 +524,59 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
        return 0;
 }
 
+static unsigned int p54_flush_count(struct p54_common *priv)
+{
+       unsigned int total = 0, i;
+
+       BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
+
+       /*
+        * Because the firmware has the sole control over any frames
+        * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
+        * don't really count as pending or active.
+        */
+       for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
+               total += priv->tx_stats[i].len;
+       return total;
+}
+
+static void p54_flush(struct ieee80211_hw *dev, bool drop)
+{
+       struct p54_common *priv = dev->priv;
+       unsigned int total, i;
+
+       /*
+        * Currently, it wouldn't really matter if we wait for one second
+        * or 15 minutes. But once someone gets around and completes the
+        * TODOs [ancel stuck frames / reset device] in p54_work, it will
+        * suddenly make sense to wait that long.
+        */
+       i = P54_STATISTICS_UPDATE * 2 / 20;
+
+       /*
+        * In this case no locking is required because as we speak the
+        * queues have already been stopped and no new frames can sneak
+        * up from behind.
+        */
+       while ((total = p54_flush_count(priv) && i--)) {
+               /* waste time */
+               msleep(20);
+       }
+
+       WARN(total, "tx flush timeout, unresponsive firmware");
+}
+
+static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
+{
+       struct p54_common *priv = dev->priv;
+
+       mutex_lock(&priv->conf_mutex);
+       /* support all coverage class values as in 802.11-2007 Table 7-27 */
+       priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
+       p54_set_edcf(priv);
+       mutex_unlock(&priv->conf_mutex);
+}
+
 static const struct ieee80211_ops p54_ops = {
        .tx                     = p54_tx_80211,
        .start                  = p54_start,
@@ -536,11 +589,13 @@ static const struct ieee80211_ops p54_ops = {
        .sta_remove             = p54_sta_add_remove,
        .set_key                = p54_set_key,
        .config                 = p54_config,
+       .flush                  = p54_flush,
        .bss_info_changed       = p54_bss_info_changed,
        .configure_filter       = p54_configure_filter,
        .conf_tx                = p54_conf_tx,
        .get_stats              = p54_get_stats,
        .get_survey             = p54_get_survey,
+       .set_coverage_class     = p54_set_coverage_class,
 };
 
 struct ieee80211_hw *p54_init_common(size_t priv_data_len)
@@ -611,7 +666,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
 
 int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
 {
-       struct p54_common *priv = dev->priv;
+       struct p54_common __maybe_unused *priv = dev->priv;
        int err;
 
        err = ieee80211_register_hw(dev);
@@ -642,10 +697,12 @@ void p54_free_common(struct ieee80211_hw *dev)
        kfree(priv->iq_autocal);
        kfree(priv->output_limit);
        kfree(priv->curve_data);
+       kfree(priv->rssi_db);
        kfree(priv->used_rxkeys);
        priv->iq_autocal = NULL;
        priv->output_limit = NULL;
        priv->curve_data = NULL;
+       priv->rssi_db = NULL;
        priv->used_rxkeys = NULL;
        ieee80211_free_hw(dev);
 }
index 43a3b2ead81adf886fff780c600cce62de6fe5d2..50730fc23fe57ba8c0cb6e849690a66859ae3c44 100644 (file)
@@ -116,7 +116,8 @@ struct p54_edcf_queue_param {
        __le16 txop;
 } __packed;
 
-struct p54_rssi_linear_approximation {
+struct p54_rssi_db_entry {
+       u16 freq;
        s16 mul;
        s16 add;
        s16 longbow_unkn;
@@ -197,13 +198,14 @@ struct p54_common {
        u8 rx_diversity_mask;
        u8 tx_diversity_mask;
        unsigned int output_power;
+       struct p54_rssi_db_entry *cur_rssi;
        int noise;
        /* calibration, output power limit and rssi<->dBm conversation data */
        struct pda_iq_autocal_entry *iq_autocal;
        unsigned int iq_autocal_len;
        struct p54_cal_database *curve_data;
        struct p54_cal_database *output_limit;
-       struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
+       struct p54_cal_database *rssi_db;
        struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
 
        /* BBP/MAC state */
@@ -215,6 +217,7 @@ struct p54_common {
        u32 tsf_low32, tsf_high32;
        u32 basic_rate_mask;
        u16 aid;
+       u8 coverage_class;
        bool powersave_override;
        __le32 beacon_req_id;
        struct completion beacon_comp;
index d592cbd34d78069911d0fc9c12dca390b09e970a..0b7bfb0adcf2e5f1b9f4559cdf40577d93e960fd 100644 (file)
@@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = {
 0x03, 0x00, 0x00, 0x11,                /* PDR_ANTENNA_GAIN */
        0x08, 0x08, 0x08, 0x08,
 
-0x09, 0x00, 0xad, 0xde,                /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */
-       0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x0a, 0x00, 0xff, 0xca,                /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */
+       0x01, 0x00, 0x0a, 0x00,
+       0x00, 0x00, 0x0a, 0x00,
+               0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
 
 /* struct pda_custom_wrapper */
 0x10, 0x06, 0x5d, 0xb0,                /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
@@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = {
        0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
 
 0x02, 0x00, 0x00, 0x00,                /* PDR_END */
-       0x67, 0x99,
+       0xb6, 0x04,
 };
 
 #endif /* P54SPI_EEPROM_H */
index f618b9623e5a6d38753a8ee010fb61f4f8777820..7834c26c295438e39ab1c63d3e1b0444ca81ef2f 100644 (file)
@@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb)
 
 static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
 {
-       int band = priv->hw->conf.channel->band;
-
        if (priv->rxhw != 5) {
-               return ((rssi * priv->rssical_db[band].mul) / 64 +
-                        priv->rssical_db[band].add) / 4;
+               return ((rssi * priv->cur_rssi->mul) / 64 +
+                        priv->cur_rssi->add) / 4;
        } else {
                /*
                 * TODO: find the correct formula
@@ -369,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
        rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
        priv->tsf_low32 = tsf32;
 
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
                header_len += hdr->align[0];
@@ -698,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher)
        }
 }
 
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct p54_common *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -719,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
                            &hdr_flags, &aid, &burst_allowed);
 
        if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
-               if (!IS_QOS_QUEUE(queue)) {
-                       dev_kfree_skb_any(skb);
-                       return NETDEV_TX_OK;
-               } else {
-                       return NETDEV_TX_BUSY;
-               }
+               dev_kfree_skb_any(skb);
+               return;
        }
 
        padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
@@ -867,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
        p54info->extra_len = extra_len;
 
        p54_tx(priv, skb);
-       return NETDEV_TX_OK;
 }
index 6f383cd684b009a1cee9663768759c32772a7cf1..f630552427b7ce70c36612402b5ff040c346cae0 100644 (file)
@@ -97,6 +97,18 @@ config RT2800PCI_RT35XX
          Support for these devices is non-functional at the moment and is
          intended for testers and developers.
 
+config RT2800PCI_RT53XX
+       bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       default n
+       ---help---
+         This adds support for rt53xx wireless chipset family to the
+         rt2800pci driver.
+         Supported chips: RT5390
+
+         Support for these devices is non-functional at the moment and is
+         intended for testers and developers.
+
 endif
 
 config RT2500USB
index 54ca49ad347200917152bf2aa013b2baa9d9951c..2725f3c4442eca8af535ff49123a3bb96eab7a71 100644 (file)
@@ -46,7 +46,7 @@
  * These indirect registers work with busy bits,
  * and we will try maximal REGISTER_BUSY_COUNT times to access
  * the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
+ * between each attempt. When the busy bit is still set at that time,
  * the access attempt is considered to have failed,
  * and we will print an error.
  */
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
                 * Enable synchronisation.
                 */
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
-               rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, CSR14_TBCN, 1);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
        }
 
@@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue)
                rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow the tbtt tasklet to be scheduled.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
                rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, CSR14_TBCN, 0);
                rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+               /*
+                * Wait for possibly running tbtt tasklets.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
                break;
        default:
                break;
@@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
 static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_OFF) ||
-                  (state == STATE_RADIO_IRQ_OFF_ISR);
+       int mask = (state == STATE_RADIO_IRQ_OFF);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        if (state == STATE_RADIO_IRQ_ON) {
                rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
                rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+               /*
+                * Enable tasklets.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
        }
 
        /*
         * Only toggle the interrupts bits we are going to use.
         * Non-checked interrupt bits are disabled by default.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
        rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
        rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
        rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
        rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
        rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished before
+                * disabling the interrupts.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+       }
 }
 
 static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2400pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt2400pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -1183,8 +1208,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
        /*
         * Enable beaconing again.
         */
-       rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
-       rt2x00_set_field32(&reg, CSR14_TBCN, 1);
        rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 }
@@ -1289,57 +1312,71 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
        }
 }
 
-static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                      struct rt2x00_field32 irq_field)
 {
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
+       unsigned long flags;
+       u32 reg;
 
        /*
-        * Handle interrupts, walk through all bits
-        * and run the tasks, the bits are checked in order of
-        * priority.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 
-       /*
-        * 1 - Beacon timer expired interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
-               rt2x00lib_beacondone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
 
-       /*
-        * 2 - Rx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_RXDONE))
-               rt2x00pci_rxdone(rt2x00dev);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
 
-       /*
-        * 3 - Atim ring transmit done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
-               rt2400pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2400pci_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       u32 reg;
+       unsigned long flags;
 
        /*
-        * 4 - Priority ring transmit done interrupt.
+        * Handle all tx queues.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
-               rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2400pci_txdone(rt2x00dev, QID_ATIM);
+       rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2400pci_txdone(rt2x00dev, QID_AC_VI);
 
        /*
-        * 5 - Tx ring transmit done interrupt.
+        * Enable all TXDONE interrupts again.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
-               rt2400pci_txdone(rt2x00dev, QID_AC_VI);
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
-       return IRQ_HANDLED;
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
+
+static void rt2400pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2400pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
 }
 
 static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg;
+       u32 reg, mask;
+       unsigned long flags;
 
        /*
         * Get the interrupt sources & saved to local variable.
@@ -1354,14 +1391,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       /* Store irqvalues for use in the interrupt thread. */
-       rt2x00dev->irqvalue[0] = reg;
+       mask = reg;
 
-       /* Disable interrupts, will be enabled again in the interrupt thread. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_OFF_ISR);
+       /*
+        * Schedule tasklets for interrupt handling.
+        */
+       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 
-       return IRQ_WAKE_THREAD;
+       if (rt2x00_get_field32(reg, CSR7_RXDONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+               /*
+                * Mask out all txdone interrupts.
+                */
+               rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+       }
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       reg |= mask;
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
 
 static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
        .irq_handler            = rt2400pci_interrupt,
-       .irq_handler_thread     = rt2400pci_interrupt_thread,
+       .txstatus_tasklet       = rt2400pci_txstatus_tasklet,
+       .tbtt_tasklet           = rt2400pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt2400pci_rxdone_tasklet,
        .probe_hw               = rt2400pci_probe_hw,
        .initialize             = rt2x00pci_initialize,
        .uninitialize           = rt2x00pci_uninitialize,
index a9ff26a27724a9e95ec827d094ea01cf12a541cb..3ef1fb4185c0945a2da2b70656fe723ddf501a94 100644 (file)
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
                 * Enable synchronisation.
                 */
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
-               rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, CSR14_TBCN, 1);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
        }
 
@@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue)
                rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow the tbtt tasklet to be scheduled.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
                rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
                rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
                rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, CSR14_TBCN, 0);
                rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
                rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+               /*
+                * Wait for possibly running tbtt tasklets.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
                break;
        default:
                break;
@@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
 static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_OFF) ||
-                  (state == STATE_RADIO_IRQ_OFF_ISR);
+       int mask = (state == STATE_RADIO_IRQ_OFF);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        if (state == STATE_RADIO_IRQ_ON) {
                rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
                rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+               /*
+                * Enable tasklets.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
        }
 
        /*
         * Only toggle the interrupts bits we are going to use.
         * Non-checked interrupt bits are disabled by default.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
        rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
        rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
        rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
        rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
        rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+       }
 }
 
 static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt2500pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -1337,8 +1361,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
        /*
         * Enable beaconing again.
         */
-       rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
-       rt2x00_set_field32(&reg, CSR14_TBCN, 1);
        rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
        rt2x00pci_register_write(rt2x00dev, CSR14, reg);
 }
@@ -1422,58 +1444,71 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
        }
 }
 
-static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                      struct rt2x00_field32 irq_field)
 {
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
+       unsigned long flags;
+       u32 reg;
 
        /*
-        * Handle interrupts, walk through all bits
-        * and run the tasks, the bits are checked in order of
-        * priority.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 
-       /*
-        * 1 - Beacon timer expired interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
-               rt2x00lib_beacondone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
 
-       /*
-        * 2 - Rx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_RXDONE))
-               rt2x00pci_rxdone(rt2x00dev);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
 
-       /*
-        * 3 - Atim ring transmit done interrupt.
-        */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
-               rt2500pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2500pci_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       u32 reg;
+       unsigned long flags;
 
        /*
-        * 4 - Priority ring transmit done interrupt.
+        * Handle all tx queues.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
-               rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2500pci_txdone(rt2x00dev, QID_ATIM);
+       rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+       rt2500pci_txdone(rt2x00dev, QID_AC_VI);
 
        /*
-        * 5 - Tx ring transmit done interrupt.
+        * Enable all TXDONE interrupts again.
         */
-       if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
-               rt2500pci_txdone(rt2x00dev, QID_AC_VI);
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+       rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
 
-       return IRQ_HANDLED;
+static void rt2500pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2500pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
 }
 
 static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg;
+       u32 reg, mask;
+       unsigned long flags;
 
        /*
         * Get the interrupt sources & saved to local variable.
@@ -1488,14 +1523,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       /* Store irqvalues for use in the interrupt thread. */
-       rt2x00dev->irqvalue[0] = reg;
+       mask = reg;
 
-       /* Disable interrupts, will be enabled again in the interrupt thread. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_OFF_ISR);
+       /*
+        * Schedule tasklets for interrupt handling.
+        */
+       if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 
-       return IRQ_WAKE_THREAD;
+       if (rt2x00_get_field32(reg, CSR7_RXDONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+           rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+               /*
+                * Mask out all txdone interrupts.
+                */
+               rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+               rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+       }
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+       rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+       reg |= mask;
+       rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
 
 static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
        .irq_handler            = rt2500pci_interrupt,
-       .irq_handler_thread     = rt2500pci_interrupt_thread,
+       .txstatus_tasklet       = rt2500pci_txstatus_tasklet,
+       .tbtt_tasklet           = rt2500pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt2500pci_rxdone_tasklet,
        .probe_hw               = rt2500pci_probe_hw,
        .initialize             = rt2x00pci_initialize,
        .uninitialize           = rt2x00pci_uninitialize,
index 6b3b1de46792e96a6119b912eb749e9f43033f67..01f385d5846c6641eda7376e41ff911ebf69f691 100644 (file)
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
                rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
 
                rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
-               rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
                rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
-               rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
                rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
        }
 
@@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                /* No support, but no error either */
                break;
        case STATE_DEEP_SLEEP:
index 4c55e8525cad107706b4dbb6061cde64aa7b67ab..6f4a2432c021d6837e7b32532bb6fd0fa714fe09 100644 (file)
@@ -51,6 +51,7 @@
  * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
  * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
  * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5390 2.4G 1T1R
  */
 #define RF2820                         0x0001
 #define RF2850                         0x0002
@@ -65,6 +66,7 @@
 #define RF3320                         0x000b
 #define RF3322                         0x000c
 #define RF3853                         0x000d
+#define RF5390                         0x5390
 
 /*
  * Chipset revisions.
@@ -77,6 +79,7 @@
 #define REV_RT3071E                    0x0211
 #define REV_RT3090E                    0x0211
 #define REV_RT3390E                    0x0211
+#define REV_RT5390F                    0x0502
 
 /*
  * Signal information.
 #define E2PROM_CSR_LOAD_STATUS         FIELD32(0x00000040)
 #define E2PROM_CSR_RELOAD              FIELD32(0x00000080)
 
+/*
+ * AUX_CTRL: Aux/PCI-E related configuration
+ */
+#define AUX_CTRL               0x10c
+#define AUX_CTRL_WAKE_PCIE_EN          FIELD32(0x00000002)
+#define AUX_CTRL_FORCE_PCIE_CLK        FIELD32(0x00000400)
+
 /*
  * OPT_14: Unknown register used by rt3xxx devices.
  */
 
 /*
  * GPIO_CTRL_CFG:
+ * GPIOD: GPIO direction, 0: Output, 1: Input
  */
 #define GPIO_CTRL_CFG                  0x0228
 #define GPIO_CTRL_CFG_BIT0             FIELD32(0x00000001)
 #define GPIO_CTRL_CFG_BIT5             FIELD32(0x00000020)
 #define GPIO_CTRL_CFG_BIT6             FIELD32(0x00000040)
 #define GPIO_CTRL_CFG_BIT7             FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_BIT8             FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT0       FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT1       FIELD32(0x00000200)
+#define GPIO_CTRL_CFG_GPIOD_BIT2       FIELD32(0x00000400)
+#define GPIO_CTRL_CFG_GPIOD_BIT3       FIELD32(0x00000800)
+#define GPIO_CTRL_CFG_GPIOD_BIT4       FIELD32(0x00001000)
+#define GPIO_CTRL_CFG_GPIOD_BIT5       FIELD32(0x00002000)
+#define GPIO_CTRL_CFG_GPIOD_BIT6       FIELD32(0x00004000)
+#define GPIO_CTRL_CFG_GPIOD_BIT7       FIELD32(0x00008000)
 
 /*
  * MCU_CMD_CFG
 
 /*
  * US_CYC_CNT
+ * BT_MODE_EN: Bluetooth mode enable
+ * CLOCK CYCLE: Clock cycle count in 1us.
+ * PCI:0x21, PCIE:0x7d, USB:0x1e
  */
 #define US_CYC_CNT                     0x02a4
+#define US_CYC_CNT_BT_MODE_EN          FIELD32(0x00000100)
 #define US_CYC_CNT_CLOCK_CYCLE         FIELD32(0x000000ff)
 
 /*
  */
 #define        RF_CSR_CFG                      0x0500
 #define RF_CSR_CFG_DATA                        FIELD32(0x000000ff)
-#define RF_CSR_CFG_REGNUM              FIELD32(0x00001f00)
+#define RF_CSR_CFG_REGNUM              FIELD32(0x00003f00)
 #define RF_CSR_CFG_WRITE               FIELD32(0x00010000)
 #define RF_CSR_CFG_BUSY                        FIELD32(0x00020000)
 
  * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
  * PROTECT_CTRL: Protection control frame type for CCK TX
  *               0:none, 1:RTS/CTS, 2:CTS-to-self
- * PROTECT_NAV: TXOP protection type for CCK TX
- *              0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV
+ * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV
  * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
  * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
  * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
 #define CCK_PROT_CFG                   0x1364
 #define CCK_PROT_CFG_PROTECT_RATE      FIELD32(0x0000ffff)
 #define CCK_PROT_CFG_PROTECT_CTRL      FIELD32(0x00030000)
-#define CCK_PROT_CFG_PROTECT_NAV       FIELD32(0x000c0000)
+#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define CCK_PROT_CFG_PROTECT_NAV_LONG  FIELD32(0x00080000)
 #define CCK_PROT_CFG_TX_OP_ALLOW_CCK   FIELD32(0x00100000)
 #define CCK_PROT_CFG_TX_OP_ALLOW_OFDM  FIELD32(0x00200000)
 #define CCK_PROT_CFG_TX_OP_ALLOW_MM20  FIELD32(0x00400000)
 #define OFDM_PROT_CFG                  0x1368
 #define OFDM_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define OFDM_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define OFDM_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define OFDM_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define MM20_PROT_CFG                  0x136c
 #define MM20_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define MM20_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define MM20_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define MM20_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define MM20_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define MM40_PROT_CFG                  0x1370
 #define MM40_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define MM40_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define MM40_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define MM40_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define MM40_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define GF20_PROT_CFG                  0x1374
 #define GF20_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define GF20_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define GF20_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define GF20_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define GF20_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
 #define GF40_PROT_CFG                  0x1378
 #define GF40_PROT_CFG_PROTECT_RATE     FIELD32(0x0000ffff)
 #define GF40_PROT_CFG_PROTECT_CTRL     FIELD32(0x00030000)
-#define GF40_PROT_CFG_PROTECT_NAV      FIELD32(0x000c0000)
+#define GF40_PROT_CFG_PROTECT_NAV_SHORT        FIELD32(0x00040000)
+#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
 #define GF40_PROT_CFG_TX_OP_ALLOW_CCK  FIELD32(0x00100000)
 #define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
 #define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1697,11 +1725,14 @@ struct mac_iveiv_entry {
  */
 
 /*
- * BBP 1: TX Antenna & Power
- * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
- *     3 - increase tx power by 6dBm
+ * BBP 1: TX Antenna & Power Control
+ * POWER_CTRL:
+ * 0 - normal,
+ * 1 - drop tx power by 6dBm,
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
  */
-#define BBP1_TX_POWER                  FIELD8(0x07)
+#define BBP1_TX_POWER_CTRL             FIELD8(0x07)
 #define BBP1_TX_ANTENNA                        FIELD8(0x18)
 
 /*
@@ -1715,6 +1746,13 @@ struct mac_iveiv_entry {
  */
 #define BBP4_TX_BF                     FIELD8(0x01)
 #define BBP4_BANDWIDTH                 FIELD8(0x18)
+#define BBP4_MAC_IF_CTRL               FIELD8(0x40)
+
+/*
+ * BBP 109
+ */
+#define BBP109_TX0_POWER       FIELD8(0x0f)
+#define BBP109_TX1_POWER       FIELD8(0xf0)
 
 /*
  * BBP 138: Unknown
@@ -1724,6 +1762,11 @@ struct mac_iveiv_entry {
 #define BBP138_TX_DAC1                 FIELD8(0x20)
 #define BBP138_TX_DAC2                 FIELD8(0x40)
 
+/*
+ * BBP 152: Rx Ant
+ */
+#define BBP152_RX_DEFAULT_ANT  FIELD8(0x80)
+
 /*
  * RFCSR registers
  * The wordsize of the RFCSR is 8 bits.
@@ -1733,11 +1776,17 @@ struct mac_iveiv_entry {
  * RFCSR 1:
  */
 #define RFCSR1_RF_BLOCK_EN             FIELD8(0x01)
+#define RFCSR1_PLL_PD                  FIELD8(0x02)
 #define RFCSR1_RX0_PD                  FIELD8(0x04)
 #define RFCSR1_TX0_PD                  FIELD8(0x08)
 #define RFCSR1_RX1_PD                  FIELD8(0x10)
 #define RFCSR1_TX1_PD                  FIELD8(0x20)
 
+/*
+ * RFCSR 2:
+ */
+#define RFCSR2_RESCAL_EN               FIELD8(0x80)
+
 /*
  * RFCSR 6:
  */
@@ -1749,6 +1798,11 @@ struct mac_iveiv_entry {
  */
 #define RFCSR7_RF_TUNING               FIELD8(0x01)
 
+/*
+ * RFCSR 11:
+ */
+#define RFCSR11_R                      FIELD8(0x03)
+
 /*
  * RFCSR 12:
  */
@@ -1770,6 +1824,7 @@ struct mac_iveiv_entry {
 #define RFCSR17_TXMIXER_GAIN           FIELD8(0x07)
 #define RFCSR17_TX_LO1_EN              FIELD8(0x08)
 #define RFCSR17_R                      FIELD8(0x20)
+#define RFCSR17_CODE                   FIELD8(0x7f)
 
 /*
  * RFCSR 20:
@@ -1802,8 +1857,32 @@ struct mac_iveiv_entry {
 /*
  * RFCSR 30:
  */
+#define RFCSR30_TX_H20M                FIELD8(0x02)
+#define RFCSR30_RX_H20M                FIELD8(0x04)
+#define RFCSR30_RX_VCM         FIELD8(0x18)
 #define RFCSR30_RF_CALIBRATION         FIELD8(0x80)
 
+/*
+ * RFCSR 31:
+ */
+#define RFCSR31_RX_AGC_FC              FIELD8(0x1f)
+#define RFCSR31_RX_H20M                        FIELD8(0x20)
+
+/*
+ * RFCSR 38:
+ */
+#define RFCSR38_RX_LO1_EN      FIELD8(0x20)
+
+/*
+ * RFCSR 39:
+ */
+#define RFCSR39_RX_LO2_EN      FIELD8(0x80)
+
+/*
+ * RFCSR 49:
+ */
+#define RFCSR49_TX                     FIELD8(0x3f)
+
 /*
  * RF registers
  */
@@ -1836,6 +1915,11 @@ struct mac_iveiv_entry {
  * The wordsize of the EEPROM is 16 bits.
  */
 
+/*
+ * Chip ID
+ */
+#define EEPROM_CHIP_ID         0x0000
+
 /*
  * EEPROM Version
  */
@@ -1989,23 +2073,26 @@ struct mac_iveiv_entry {
 #define EEPROM_RSSI_A2_LNA_A2          FIELD16(0xff00)
 
 /*
- * EEPROM Maximum TX power values
+ * EEPROM EIRP Maximum TX power values(unit: dbm)
  */
-#define EEPROM_MAX_TX_POWER            0x0027
-#define EEPROM_MAX_TX_POWER_24GHZ      FIELD16(0x00ff)
-#define EEPROM_MAX_TX_POWER_5GHZ       FIELD16(0xff00)
+#define EEPROM_EIRP_MAX_TX_POWER       0x0027
+#define EEPROM_EIRP_MAX_TX_POWER_2GHZ  FIELD16(0x00ff)
+#define EEPROM_EIRP_MAX_TX_POWER_5GHZ  FIELD16(0xff00)
 
 /*
  * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
  * This is delta in 40MHZ.
- * VALUE: Tx Power dalta value (MAX=4)
+ * VALUE: Tx Power dalta value, MAX=4(unit: dbm)
  * TYPE: 1: Plus the delta value, 0: minus the delta value
- * TXPOWER: Enable:
+ * ENABLE: enable tx power compensation for 40BW
  */
 #define EEPROM_TXPOWER_DELTA           0x0028
-#define EEPROM_TXPOWER_DELTA_VALUE     FIELD16(0x003f)
-#define EEPROM_TXPOWER_DELTA_TYPE      FIELD16(0x0040)
-#define EEPROM_TXPOWER_DELTA_TXPOWER   FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_2G  FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE_2G   FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_5G  FIELD16(0x3f00)
+#define EEPROM_TXPOWER_DELTA_TYPE_5G   FIELD16(0x4000)
+#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000)
 
 /*
  * EEPROM TXPOWER 802.11BG
@@ -2058,6 +2145,7 @@ struct mac_iveiv_entry {
 #define MCU_LED_LED_POLARITY           0x54
 #define MCU_RADAR                      0x60
 #define MCU_BOOT_SIGNAL                        0x72
+#define MCU_ANT_SELECT                 0X73
 #define MCU_BBP_SIGNAL                 0x80
 #define MCU_POWER_SAVE                 0x83
 
@@ -2202,4 +2290,9 @@ struct mac_iveiv_entry {
 #define TXPOWER_A_TO_DEV(__txpower) \
        clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
 
+/*
+ *  Board's maximun TX power limitation
+ */
+#define EIRP_MAX_TX_POWER_LIMIT        0x50
+
 #endif /* RT2800_H */
index 54917a28139813713904cc03e9f77c67f40e7588..3da78bf0ca2629451e581669e4973a6b4f01a459 100644 (file)
@@ -400,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
        if (rt2800_wait_csr_ready(rt2x00dev))
                return -EBUSY;
 
-       if (rt2x00_is_pci(rt2x00dev))
+       if (rt2x00_is_pci(rt2x00dev)) {
+               if (rt2x00_rt(rt2x00dev, RT5390)) {
+                       rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+                       rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+                       rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+                       rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+               }
                rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
+       }
 
        /*
         * Disable DMA, will be reenabled later when enabling
@@ -773,13 +780,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
        unsigned int beacon_base;
        unsigned int padding_len;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
        rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+       orig_reg = reg;
        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
        rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
@@ -810,7 +818,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
         * Write entire beacon with TXWI and padding to register.
         */
        padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
-       skb_pad(entry->skb, padding_len);
+       if (padding_len && skb_pad(entry->skb, padding_len)) {
+               ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+               /* skb freed by skb_pad() on failure */
+               entry->skb = NULL;
+               rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
+               return;
+       }
+
        beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
        rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
                                   entry->skb->len + padding_len);
@@ -818,8 +833,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
        /*
         * Enable beaconing again.
         */
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
        rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
@@ -831,8 +844,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
 }
 EXPORT_SYMBOL_GPL(rt2800_write_beacon);
 
-static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
-                                      unsigned int beacon_base)
+static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
+                                               unsigned int beacon_base)
 {
        int i;
 
@@ -845,6 +858,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
                rt2800_register_write(rt2x00dev, beacon_base + i, 0);
 }
 
+void rt2800_clear_beacon(struct queue_entry *entry)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       u32 reg;
+
+       /*
+        * Disable beaconing while we are reloading the beacon data,
+        * otherwise we might be sending out invalid data.
+        */
+       rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+       /*
+        * Clear beacon.
+        */
+       rt2800_clear_beacon_register(rt2x00dev,
+                                    HW_BEACON_OFFSET(entry->entry_idx));
+
+       /*
+        * Enabled beaconing again.
+        */
+       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
+
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
 const struct rt2x00debug rt2800_rt2x00debug = {
        .owner  = THIS_MODULE,
@@ -1005,7 +1045,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
 
        memset(&wcid_entry, 0, sizeof(wcid_entry));
        if (crypto->cmd == SET_KEY)
-               memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+               memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
        rt2800_register_multiwrite(rt2x00dev, offset,
                                      &wcid_entry, sizeof(wcid_entry));
 }
@@ -1154,30 +1194,12 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
        bool update_bssid = false;
 
        if (flags & CONFIG_UPDATE_TYPE) {
-               /*
-                * Clear current synchronisation setup.
-                */
-               rt2800_clear_beacon(rt2x00dev,
-                                   HW_BEACON_OFFSET(intf->beacon->entry_idx));
                /*
                 * Enable synchronisation.
                 */
                rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
-                                  (conf->sync == TSF_SYNC_ADHOC ||
-                                   conf->sync == TSF_SYNC_AP_NONE));
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-               /*
-                * Enable pre tbtt interrupt for beaconing modes
-                */
-               rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
-               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
-                                  (conf->sync == TSF_SYNC_AP_NONE));
-               rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
        }
 
        if (flags & CONFIG_UPDATE_MAC) {
@@ -1361,10 +1383,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
 }
 EXPORT_SYMBOL_GPL(rt2800_config_erp);
 
+static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
+                                    enum antenna ant)
+{
+       u32 reg;
+       u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
+       u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
+
+       if (rt2x00_is_pci(rt2x00dev)) {
+               rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
+               rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin);
+               rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
+       } else if (rt2x00_is_usb(rt2x00dev))
+               rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
+                                  eesk_pin, 0);
+
+       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+}
+
 void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
 {
        u8 r1;
        u8 r3;
+       u16 eeprom;
 
        rt2800_bbp_read(rt2x00dev, 1, &r1);
        rt2800_bbp_read(rt2x00dev, 3, &r3);
@@ -1372,7 +1416,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        /*
         * Configure the TX antenna.
         */
-       switch ((int)ant->tx) {
+       switch (ant->tx_chain_num) {
        case 1:
                rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
                break;
@@ -1387,8 +1431,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        /*
         * Configure the RX antenna.
         */
-       switch ((int)ant->rx) {
+       switch (ant->rx_chain_num) {
        case 1:
+               if (rt2x00_rt(rt2x00dev, RT3070) ||
+                   rt2x00_rt(rt2x00dev, RT3090) ||
+                   rt2x00_rt(rt2x00dev, RT3390)) {
+                       rt2x00_eeprom_read(rt2x00dev,
+                                          EEPROM_NIC_CONF1, &eeprom);
+                       if (rt2x00_get_field16(eeprom,
+                                               EEPROM_NIC_CONF1_ANT_DIVERSITY))
+                               rt2800_set_ant_diversity(rt2x00dev,
+                                               rt2x00dev->default_ant.rx);
+               }
                rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
                break;
        case 2:
@@ -1434,13 +1488,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
 {
        rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
 
-       if (rt2x00dev->default_ant.tx == 1)
+       if (rt2x00dev->default_ant.tx_chain_num == 1)
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
 
-       if (rt2x00dev->default_ant.rx == 1) {
+       if (rt2x00dev->default_ant.rx_chain_num == 1) {
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
-       } else if (rt2x00dev->default_ant.rx == 2)
+       } else if (rt2x00dev->default_ant.rx_chain_num == 2)
                rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
 
        if (rf->channel > 14) {
@@ -1526,6 +1580,99 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
        rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
 }
 
+
+#define RT5390_POWER_BOUND     0x27
+#define RT5390_FREQ_OFFSET_BOUND       0x5f
+
+static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_conf *conf,
+                                        struct rf_channel *rf,
+                                        struct channel_info *info)
+{
+       u8 rfcsr;
+       u16 eeprom;
+
+       rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+       rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
+       rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+       if (info->default_power1 > RT5390_POWER_BOUND)
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+       rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+       if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, RT5390_FREQ_OFFSET_BOUND);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       if (rf->channel <= 14) {
+               int idx = rf->channel-1;
+
+               if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+                       if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+                               /* r55/r59 value array of channel 1~14 */
+                               static const char r55_bt_rev[] = {0x83, 0x83,
+                                       0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
+                                       0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
+                               static const char r59_bt_rev[] = {0x0e, 0x0e,
+                                       0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
+                                       0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
+
+                               rt2800_rfcsr_write(rt2x00dev, 55, r55_bt_rev[idx]);
+                               rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]);
+                       } else {
+                               static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+                                       0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
+                                       0x88, 0x88, 0x86, 0x85, 0x84};
+
+                               rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
+                       }
+               } else {
+                       if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+                               static const char r55_nonbt_rev[] = {0x23, 0x23,
+                                       0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
+                                       0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
+                               static const char r59_nonbt_rev[] = {0x07, 0x07,
+                                       0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+                                       0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
+
+                               rt2800_rfcsr_write(rt2x00dev, 55, r55_nonbt_rev[idx]);
+                               rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]);
+                       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+                               static const char r59_non_bt[] = {0x8f, 0x8f,
+                                       0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
+                                       0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
+
+                               rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]);
+                       }
+               }
+       }
+
+       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+       rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+}
+
 static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                                  struct ieee80211_conf *conf,
                                  struct rf_channel *rf,
@@ -1550,6 +1697,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
            rt2x00_rf(rt2x00dev, RF3052) ||
            rt2x00_rf(rt2x00dev, RF3320))
                rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
+       else if (rt2x00_rf(rt2x00dev, RF5390))
+               rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
        else
                rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
 
@@ -1562,12 +1711,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        rt2800_bbp_write(rt2x00dev, 86, 0);
 
        if (rf->channel <= 14) {
-               if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
-                       rt2800_bbp_write(rt2x00dev, 82, 0x62);
-                       rt2800_bbp_write(rt2x00dev, 75, 0x46);
-               } else {
-                       rt2800_bbp_write(rt2x00dev, 82, 0x84);
-                       rt2800_bbp_write(rt2x00dev, 75, 0x50);
+               if (!rt2x00_rt(rt2x00dev, RT5390)) {
+                       if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
+                               rt2800_bbp_write(rt2x00dev, 82, 0x62);
+                               rt2800_bbp_write(rt2x00dev, 75, 0x46);
+                       } else {
+                               rt2800_bbp_write(rt2x00dev, 82, 0x84);
+                               rt2800_bbp_write(rt2x00dev, 75, 0x50);
+                       }
                }
        } else {
                rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@@ -1587,13 +1738,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        tx_pin = 0;
 
        /* Turn on unused PA or LNA when not using 1T or 1R */
-       if (rt2x00dev->default_ant.tx != 1) {
+       if (rt2x00dev->default_ant.tx_chain_num == 2) {
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
        }
 
        /* Turn on unused PA or LNA when not using 1T or 1R */
-       if (rt2x00dev->default_ant.rx != 1) {
+       if (rt2x00dev->default_ant.rx_chain_num == 2) {
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
        }
@@ -1637,30 +1788,116 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
 }
 
+static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
+                                     enum ieee80211_band band)
+{
+       u16 eeprom;
+       u8 comp_en;
+       u8 comp_type;
+       int comp_value;
+
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+
+       if (eeprom == 0xffff)
+               return 0;
+
+       if (band == IEEE80211_BAND_2GHZ) {
+               comp_en = rt2x00_get_field16(eeprom,
+                                EEPROM_TXPOWER_DELTA_ENABLE_2G);
+               if (comp_en) {
+                       comp_type = rt2x00_get_field16(eeprom,
+                                          EEPROM_TXPOWER_DELTA_TYPE_2G);
+                       comp_value = rt2x00_get_field16(eeprom,
+                                           EEPROM_TXPOWER_DELTA_VALUE_2G);
+                       if (!comp_type)
+                               comp_value = -comp_value;
+               }
+       } else {
+               comp_en = rt2x00_get_field16(eeprom,
+                                EEPROM_TXPOWER_DELTA_ENABLE_5G);
+               if (comp_en) {
+                       comp_type = rt2x00_get_field16(eeprom,
+                                          EEPROM_TXPOWER_DELTA_TYPE_5G);
+                       comp_value = rt2x00_get_field16(eeprom,
+                                           EEPROM_TXPOWER_DELTA_VALUE_5G);
+                       if (!comp_type)
+                               comp_value = -comp_value;
+               }
+       }
+
+       return comp_value;
+}
+
+static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
+                                    int is_rate_b,
+                                    enum ieee80211_band band,
+                                    int power_level,
+                                    u8 txpower)
+{
+       u32 reg;
+       u16 eeprom;
+       u8 criterion;
+       u8 eirp_txpower;
+       u8 eirp_txpower_criterion;
+       u8 reg_limit;
+       int bw_comp = 0;
+
+       if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
+               return txpower;
+
+       if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+               bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
+
+       if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
+               /*
+                * Check if eirp txpower exceed txpower_limit.
+                * We use OFDM 6M as criterion and its eirp txpower
+                * is stored at EEPROM_EIRP_MAX_TX_POWER.
+                * .11b data rate need add additional 4dbm
+                * when calculating eirp txpower.
+                */
+               rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+               criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
+
+               rt2x00_eeprom_read(rt2x00dev,
+                                  EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+               if (band == IEEE80211_BAND_2GHZ)
+                       eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+                                                EEPROM_EIRP_MAX_TX_POWER_2GHZ);
+               else
+                       eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+                                                EEPROM_EIRP_MAX_TX_POWER_5GHZ);
+
+               eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
+                                      (is_rate_b ? 4 : 0) + bw_comp;
+
+               reg_limit = (eirp_txpower > power_level) ?
+                                       (eirp_txpower - power_level) : 0;
+       } else
+               reg_limit = 0;
+
+       return txpower + bw_comp - reg_limit;
+}
+
 static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
-                                 const int max_txpower)
+                                 struct ieee80211_conf *conf)
 {
        u8 txpower;
-       u8 max_value = (u8)max_txpower;
        u16 eeprom;
-       int i;
+       int i, is_rate_b;
        u32 reg;
        u8 r1;
        u32 offset;
+       enum ieee80211_band band = conf->channel->band;
+       int power_level = conf->power_level;
 
        /*
-        * set to normal tx power mode: +/- 0dBm
+        * set to normal bbp tx power control mode: +/- 0dBm
         */
        rt2800_bbp_read(rt2x00dev, 1, &r1);
-       rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
+       rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
        rt2800_bbp_write(rt2x00dev, 1, r1);
-
-       /*
-        * The eeprom contains the tx power values for each rate. These
-        * values map to 100% tx power. Each 16bit word contains four tx
-        * power values and the order is the same as used in the TX_PWR_CFG
-        * registers.
-        */
        offset = TX_PWR_CFG_0;
 
        for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
@@ -1674,73 +1911,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
                rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
                                   &eeprom);
 
-               /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
+               is_rate_b = i ? 0 : 1;
+               /*
+                * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
                 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE0);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
 
-               /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
+               /*
+                * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
                 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE1);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
 
-               /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS,
+               /*
+                * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
                 * TX_PWR_CFG_2: MCS6,  TX_PWR_CFG_3: MCS14,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE2);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
 
-               /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
+               /*
+                * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
                 * TX_PWR_CFG_2: MCS7,  TX_PWR_CFG_3: MCS15,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE3);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
 
                /* read the next four txpower values */
                rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
                                   &eeprom);
 
-               /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
+               is_rate_b = 0;
+               /*
+                * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
                 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE0);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
 
-               /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
+               /*
+                * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
                 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE1);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
 
-               /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
+               /*
+                * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
                 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE2);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
 
-               /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
+               /*
+                * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
                 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
-                * TX_PWR_CFG_4: unknown */
+                * TX_PWR_CFG_4: unknown
+                */
                txpower = rt2x00_get_field16(eeprom,
                                             EEPROM_TXPOWER_BYRATE_RATE3);
-               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7,
-                                  min(txpower, max_value));
+               txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+                                            power_level, txpower);
+               rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
 
                rt2800_register_write(rt2x00dev, offset, reg);
 
@@ -1799,11 +2062,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
        /* Always recalculate LNA gain before changing configuration */
        rt2800_config_lna_gain(rt2x00dev, libconf);
 
-       if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+       if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
                rt2800_config_channel(rt2x00dev, libconf->conf,
                                      &libconf->rf, &libconf->channel);
+               rt2800_config_txpower(rt2x00dev, libconf->conf);
+       }
        if (flags & IEEE80211_CONF_CHANGE_POWER)
-               rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
+               rt2800_config_txpower(rt2x00dev, libconf->conf);
        if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
                rt2800_config_retry_limit(rt2x00dev, libconf);
        if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -1832,7 +2097,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3071) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
-                   rt2x00_rt(rt2x00dev, RT3390))
+                   rt2x00_rt(rt2x00dev, RT3390) ||
+                   rt2x00_rt(rt2x00dev, RT5390))
                        return 0x1c + (2 * rt2x00dev->lna_gain);
                else
                        return 0x2e + rt2x00dev->lna_gain;
@@ -1964,6 +2230,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
+       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
        } else {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2032,7 +2302,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2045,7 +2315,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2058,7 +2328,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2071,7 +2341,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2084,7 +2354,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2097,7 +2367,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
-       rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+       rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
        rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2187,19 +2457,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        /*
         * Clear all beacons
         */
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
-       rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
+       rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
 
        if (rt2x00_is_usb(rt2x00dev)) {
                rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
                rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
                rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+       } else if (rt2x00_is_pcie(rt2x00dev)) {
+               rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+               rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
+               rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
        }
 
        rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2335,15 +2609,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                     rt2800_wait_bbp_ready(rt2x00dev)))
                return -EACCES;
 
-       if (rt2800_is_305x_soc(rt2x00dev))
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_bbp_read(rt2x00dev, 4, &value);
+               rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
+               rt2800_bbp_write(rt2x00dev, 4, value);
+       }
+
+       if (rt2800_is_305x_soc(rt2x00dev) ||
+           rt2x00_rt(rt2x00dev, RT5390))
                rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
        rt2800_bbp_write(rt2x00dev, 65, 0x2c);
        rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
        if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
                rt2800_bbp_write(rt2x00dev, 69, 0x16);
                rt2800_bbp_write(rt2x00dev, 73, 0x12);
+       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_bbp_write(rt2x00dev, 69, 0x12);
+               rt2800_bbp_write(rt2x00dev, 73, 0x13);
+               rt2800_bbp_write(rt2x00dev, 75, 0x46);
+               rt2800_bbp_write(rt2x00dev, 76, 0x28);
+               rt2800_bbp_write(rt2x00dev, 77, 0x59);
        } else {
                rt2800_bbp_write(rt2x00dev, 69, 0x12);
                rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -2354,7 +2644,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3071) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
-           rt2x00_rt(rt2x00dev, RT3390)) {
+           rt2x00_rt(rt2x00dev, RT3390) ||
+           rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_bbp_write(rt2x00dev, 79, 0x13);
                rt2800_bbp_write(rt2x00dev, 80, 0x05);
                rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -2366,35 +2657,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        }
 
        rt2800_bbp_write(rt2x00dev, 82, 0x62);
-       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+       else
+               rt2800_bbp_write(rt2x00dev, 83, 0x6a);
 
        if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
                rt2800_bbp_write(rt2x00dev, 84, 0x19);
+       else if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 84, 0x9a);
        else
                rt2800_bbp_write(rt2x00dev, 84, 0x99);
 
-       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 86, 0x38);
+       else
+               rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
        rt2800_bbp_write(rt2x00dev, 91, 0x04);
-       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 92, 0x02);
+       else
+               rt2800_bbp_write(rt2x00dev, 92, 0x00);
 
        if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+           rt2x00_rt(rt2x00dev, RT5390) ||
            rt2800_is_305x_soc(rt2x00dev))
                rt2800_bbp_write(rt2x00dev, 103, 0xc0);
        else
                rt2800_bbp_write(rt2x00dev, 103, 0x00);
 
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 104, 0x92);
+
        if (rt2800_is_305x_soc(rt2x00dev))
                rt2800_bbp_write(rt2x00dev, 105, 0x01);
+       else if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 105, 0x3c);
        else
                rt2800_bbp_write(rt2x00dev, 105, 0x05);
-       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 106, 0x03);
+       else
+               rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       if (rt2x00_rt(rt2x00dev, RT5390))
+               rt2800_bbp_write(rt2x00dev, 128, 0x12);
 
        if (rt2x00_rt(rt2x00dev, RT3071) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
-           rt2x00_rt(rt2x00dev, RT3390)) {
+           rt2x00_rt(rt2x00dev, RT3390) ||
+           rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_bbp_read(rt2x00dev, 138, &value);
 
                rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -2406,6 +2724,41 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 138, value);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               int ant, div_mode;
+
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+               div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
+               ant = (div_mode == 3) ? 1 : 0;
+
+               /* check if this is a Bluetooth combo card */
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+               if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+                       u32 reg;
+
+                       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+                       if (ant == 0)
+                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+                       else if (ant == 1)
+                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
+                       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+               }
+
+               rt2800_bbp_read(rt2x00dev, 152, &value);
+               if (ant == 0)
+                       rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+               else
+                       rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+               rt2800_bbp_write(rt2x00dev, 152, value);
+
+               /* Init frequency calibration */
+               rt2800_bbp_write(rt2x00dev, 142, 1);
+               rt2800_bbp_write(rt2x00dev, 143, 57);
+       }
 
        for (i = 0; i < EEPROM_BBP_SIZE; i++) {
                rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -2436,6 +2789,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
        rt2800_bbp_write(rt2x00dev, 4, bbp);
 
+       rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
+       rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
        rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
        rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2491,18 +2848,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rt(rt2x00dev, RT3071) &&
            !rt2x00_rt(rt2x00dev, RT3090) &&
            !rt2x00_rt(rt2x00dev, RT3390) &&
+           !rt2x00_rt(rt2x00dev, RT5390) &&
            !rt2800_is_305x_soc(rt2x00dev))
                return 0;
 
        /*
         * Init RF calibration.
         */
-       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
-       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-       msleep(1);
-       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
-       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+               rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+               msleep(1);
+               rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
+               rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+       } else {
+               rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+               rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+               msleep(1);
+               rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+               rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+       }
 
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3071) ||
@@ -2510,7 +2877,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
                rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
                rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
-               rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
+               rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
                rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
                rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
                rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2593,6 +2960,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
                rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
                return 0;
+       } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+               rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+               rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+               rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+               rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+               rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+               rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+
+               rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+               rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
+               rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+               rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+
+               rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+               rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+               rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+               rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+               rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+               rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
+               rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
+               rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
+               rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+               rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+
+               rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
+               rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+               rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
+               rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+               rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+               rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+
+               rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+                       rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+               else
+                       rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+               rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
        }
 
        if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -2602,12 +3050,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
        } else if (rt2x00_rt(rt2x00dev, RT3071) ||
                   rt2x00_rt(rt2x00dev, RT3090)) {
+               rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
                rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
                rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
                rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
 
-               rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
-
                rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
                rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
                if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2619,6 +3067,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                                rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
                }
                rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+               rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+               rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+               rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
        } else if (rt2x00_rt(rt2x00dev, RT3390)) {
                rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
                rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2642,21 +3094,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                        rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
        }
 
-       /*
-        * Set back to initial state
-        */
-       rt2800_bbp_write(rt2x00dev, 24, 0);
+       if (!rt2x00_rt(rt2x00dev, RT5390)) {
+               /*
+                * Set back to initial state
+                */
+               rt2800_bbp_write(rt2x00dev, 24, 0);
 
-       rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
-       rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+               rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+               rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
 
-       /*
-        * set BBP back to BW20
-        */
-       rt2800_bbp_read(rt2x00dev, 4, &bbp);
-       rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
-       rt2800_bbp_write(rt2x00dev, 4, bbp);
+               /*
+                * Set BBP back to BW20
+                */
+               rt2800_bbp_read(rt2x00dev, 4, &bbp);
+               rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+               rt2800_bbp_write(rt2x00dev, 4, bbp);
+       }
 
        if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
            rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2668,24 +3122,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
        rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
        rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
 
-       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
-       if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
-           rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
-           rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-               if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
-                       rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
-       }
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
-       if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
-               rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
-                                 rt2x00_get_field16(eeprom,
-                                                  EEPROM_TXMIXER_GAIN_BG_VAL));
-       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+       if (!rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+               if (rt2x00_rt(rt2x00dev, RT3070) ||
+                   rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+                   rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+                   rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+                       if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
+                               rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+               }
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+               if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+                       rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+                                       rt2x00_get_field16(eeprom,
+                                               EEPROM_TXMIXER_GAIN_BG_VAL));
+               rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+       }
 
        if (rt2x00_rt(rt2x00dev, RT3090)) {
                rt2800_bbp_read(rt2x00dev, 138, &bbp);
 
+               /*  Turn off unused DAC1 and ADC1 to reduce power consumption */
                rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
                if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
                        rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
@@ -2719,10 +3177,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
        }
 
-       if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+       if (rt2x00_rt(rt2x00dev, RT3070)) {
                rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
-               if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
-                   rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+               if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
                        rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
                else
                        rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2732,6 +3189,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+               rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+               rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+               rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+               rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+               rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+               rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+       }
+
        return 0;
 }
 
@@ -2810,10 +3281,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
 
        rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
        rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-       rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
        rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-       rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
-       rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
        rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
 
        /* Wait for DMA, ignore error */
@@ -2823,9 +3291,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
        rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
        rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
        rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
-       rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
-       rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
 }
 EXPORT_SYMBOL_GPL(rt2800_disable_radio);
 
@@ -2986,13 +3451,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
                                   default_lna_gain);
        rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
-       if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
-               rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
-       if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
-               rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
-       rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -3009,10 +3467,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
 
        /*
-        * Identify RF chipset.
+        * Identify RF chipset by EEPROM value
+        * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
+        * RT53xx: defined in "EEPROM_CHIP_ID" field
         */
-       value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
        rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+       if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
+               rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
+       else
+               value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
 
        rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
                        value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -3024,7 +3487,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rt(rt2x00dev, RT3071) &&
            !rt2x00_rt(rt2x00dev, RT3090) &&
            !rt2x00_rt(rt2x00dev, RT3390) &&
-           !rt2x00_rt(rt2x00dev, RT3572)) {
+           !rt2x00_rt(rt2x00dev, RT3572) &&
+           !rt2x00_rt(rt2x00dev, RT5390)) {
                ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
                return -ENODEV;
        }
@@ -3038,7 +3502,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rf(rt2x00dev, RF3021) &&
            !rt2x00_rf(rt2x00dev, RF3022) &&
            !rt2x00_rf(rt2x00dev, RF3052) &&
-           !rt2x00_rf(rt2x00dev, RF3320)) {
+           !rt2x00_rf(rt2x00dev, RF3320) &&
+           !rt2x00_rf(rt2x00dev, RF5390)) {
                ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
                return -ENODEV;
        }
@@ -3046,11 +3511,35 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Identify default antenna configuration.
         */
-       rt2x00dev->default_ant.tx =
+       rt2x00dev->default_ant.tx_chain_num =
            rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
-       rt2x00dev->default_ant.rx =
+       rt2x00dev->default_ant.rx_chain_num =
            rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
 
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+
+       if (rt2x00_rt(rt2x00dev, RT3070) ||
+           rt2x00_rt(rt2x00dev, RT3090) ||
+           rt2x00_rt(rt2x00dev, RT3390)) {
+               value = rt2x00_get_field16(eeprom,
+                               EEPROM_NIC_CONF1_ANT_DIVERSITY);
+               switch (value) {
+               case 0:
+               case 1:
+               case 2:
+                       rt2x00dev->default_ant.tx = ANTENNA_A;
+                       rt2x00dev->default_ant.rx = ANTENNA_A;
+                       break;
+               case 3:
+                       rt2x00dev->default_ant.tx = ANTENNA_A;
+                       rt2x00dev->default_ant.rx = ANTENNA_B;
+                       break;
+               }
+       } else {
+               rt2x00dev->default_ant.tx = ANTENNA_A;
+               rt2x00dev->default_ant.rx = ANTENNA_A;
+       }
+
        /*
         * Read frequency offset and RF programming sequence.
         */
@@ -3084,6 +3573,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
 #endif /* CONFIG_RT2X00_LIB_LEDS */
 
+       /*
+        * Check if support EIRP tx power limit feature.
+        */
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+       if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
+                                       EIRP_MAX_TX_POWER_LIMIT)
+               __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
@@ -3236,7 +3734,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        char *default_power1;
        char *default_power2;
        unsigned int i;
-       unsigned short max_power;
        u16 eeprom;
 
        /*
@@ -3303,7 +3800,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                   rt2x00_rf(rt2x00dev, RF2020) ||
                   rt2x00_rf(rt2x00dev, RF3021) ||
                   rt2x00_rf(rt2x00dev, RF3022) ||
-                  rt2x00_rf(rt2x00dev, RF3320)) {
+                  rt2x00_rf(rt2x00dev, RF3320) ||
+                  rt2x00_rf(rt2x00dev, RF5390)) {
                spec->num_channels = 14;
                spec->channels = rf_vals_3x;
        } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3361,26 +3859,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        spec->channels_info = info;
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
-       max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
        default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
        default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
 
        for (i = 0; i < 14; i++) {
-               info[i].max_power = max_power;
-               info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
-               info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
+               info[i].default_power1 = default_power1[i];
+               info[i].default_power2 = default_power2[i];
        }
 
        if (spec->num_channels > 14) {
-               max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
                default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
                default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
 
                for (i = 14; i < spec->num_channels; i++) {
-                       info[i].max_power = max_power;
-                       info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
-                       info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
+                       info[i].default_power1 = default_power1[i];
+                       info[i].default_power2 = default_power2[i];
                }
        }
 
@@ -3530,7 +4023,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
 
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                       u8 buf_size)
 {
        int ret = 0;
 
index e3c995a9dec4b7c7f1afcfaf011c1e568b811696..0c92d86a36f48f902745d9367d125d532f277907 100644 (file)
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
 void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
 
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+void rt2800_clear_beacon(struct queue_entry *entry);
 
 extern const struct rt2x00debug rt2800_rt2x00debug;
 
@@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
 u64 rt2800_get_tsf(struct ieee80211_hw *hw);
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                       u8 buf_size);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
                      struct survey_info *survey);
 
index 3b3f1e45ab3e58e7538a3ae02c5a4058caaad29b..38605e9fe427610e12dd4229e3788562dc0f7ab5 100644 (file)
@@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue)
                rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow beacon tasklets to be scheduled for periodic
+                * beacon updates.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+               tasklet_enable(&rt2x00dev->pretbtt_tasklet);
+
                rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+               rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+               rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
                break;
        default:
                break;
@@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
                rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+               rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+               rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+               /*
+                * Wait for tbtt tasklets to finish.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
+               tasklet_disable(&rt2x00dev->pretbtt_tasklet);
                break;
        default:
                break;
@@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
 static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_ON) ||
-                  (state == STATE_RADIO_IRQ_ON_ISR);
+       int mask = (state == STATE_RADIO_IRQ_ON);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        if (state == STATE_RADIO_IRQ_ON) {
                rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
                rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+               /*
+                * Enable tasklets. The beacon related tasklets are
+                * enabled when the beacon queue is started.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
+               tasklet_enable(&rt2x00dev->autowake_tasklet);
        }
 
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
        rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
        rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
        rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
        rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
        rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished before
+                * disabling the interrupts.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+               tasklet_disable(&rt2x00dev->autowake_tasklet);
+       }
 }
 
 static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -452,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
        rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 
+       if (rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+               rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+               rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+               rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+       }
+
        rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
 
        rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -475,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
 
 static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
 {
-       u32 reg;
-
-       rt2800_disable_radio(rt2x00dev);
-
-       rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
-
-       rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
-       rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
-       rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
-       rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+       if (rt2x00_is_soc(rt2x00dev)) {
+               rt2800_disable_radio(rt2x00dev);
+               rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+               rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
+       }
 }
 
 static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
                               enum dev_state state)
 {
-       /*
-        * Always put the device to sleep (even when we intend to wakeup!)
-        * if the device is booting and wasn't asleep it will return
-        * failure when attempting to wakeup.
-        */
-       rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
-
        if (state == STATE_AWAKE) {
-               rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+               rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
                rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+       } else if (state == STATE_SLEEP) {
+               rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
+               rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
+               rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
        }
 
        return 0;
@@ -538,9 +570,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt2800pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -732,45 +762,60 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
        }
 }
 
-static void rt2800pci_txstatus_tasklet(unsigned long data)
+static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                      struct rt2x00_field32 irq_field)
 {
-       rt2800pci_txdone((struct rt2x00_dev *)data);
-}
-
-static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
-{
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
+       unsigned long flags;
+       u32 reg;
 
        /*
-        * 1 - Pre TBTT interrupt.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
-               rt2x00lib_pretbtt(rt2x00dev);
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+       rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 1);
+       rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
 
-       /*
-        * 2 - Beacondone interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
-               rt2x00lib_beacondone(rt2x00dev);
+static void rt2800pci_txstatus_tasklet(unsigned long data)
+{
+       rt2800pci_txdone((struct rt2x00_dev *)data);
 
        /*
-        * 3 - Rx ring done interrupt.
+        * No need to enable the tx status interrupt here as we always
+        * leave it enabled to minimize the possibility of a tx status
+        * register overflow. See comment in interrupt handler.
         */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
-               rt2x00pci_rxdone(rt2x00dev);
+}
 
-       /*
-        * 4 - Auto wakeup interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
-               rt2800pci_wakeup(rt2x00dev);
+static void rt2800pci_pretbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_pretbtt(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
+static void rt2800pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
 
-       return IRQ_HANDLED;
+static void rt2800pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+
+static void rt2800pci_autowake_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2800pci_wakeup(rt2x00dev);
+       rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
 }
 
 static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -816,8 +861,8 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
 static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg;
-       irqreturn_t ret = IRQ_HANDLED;
+       u32 reg, mask;
+       unsigned long flags;
 
        /* Read status and ACK all interrupts */
        rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -829,38 +874,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
-               rt2800pci_txstatus_interrupt(rt2x00dev);
+       /*
+        * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+        * for interrupts and interrupt masks we can just use the value of
+        * INT_SOURCE_CSR to create the interrupt mask.
+        */
+       mask = ~reg;
 
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) ||
-           rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) ||
-           rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
-           rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+               rt2800pci_txstatus_interrupt(rt2x00dev);
                /*
-                * All other interrupts are handled in the interrupt thread.
-                * Store irqvalue for use in the interrupt thread.
+                * Never disable the TX_FIFO_STATUS interrupt.
                 */
-               rt2x00dev->irqvalue[0] = reg;
+               rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+       }
 
-               /*
-                * Disable interrupts, will be enabled again in the
-                * interrupt thread.
-               */
-               rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                                     STATE_RADIO_IRQ_OFF_ISR);
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+               tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
 
-               /*
-                * Leave the TX_FIFO_STATUS interrupt enabled to not lose any
-                * tx status reports.
-                */
-               rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-               rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
-               rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 
-               ret = IRQ_WAKE_THREAD;
-       }
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 
-       return ret;
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+               tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+       rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       reg &= mask;
+       rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -975,8 +1026,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
 
 static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .irq_handler            = rt2800pci_interrupt,
-       .irq_handler_thread     = rt2800pci_interrupt_thread,
-       .txstatus_tasklet       = rt2800pci_txstatus_tasklet,
+       .txstatus_tasklet       = rt2800pci_txstatus_tasklet,
+       .pretbtt_tasklet        = rt2800pci_pretbtt_tasklet,
+       .tbtt_tasklet           = rt2800pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt2800pci_rxdone_tasklet,
+       .autowake_tasklet       = rt2800pci_autowake_tasklet,
        .probe_hw               = rt2800pci_probe_hw,
        .get_firmware_name      = rt2800pci_get_firmware_name,
        .check_firmware         = rt2800_check_firmware,
@@ -996,6 +1050,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .write_tx_desc          = rt2800pci_write_tx_desc,
        .write_tx_data          = rt2800_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
+       .clear_beacon           = rt2800_clear_beacon,
        .fill_rxdone            = rt2800pci_fill_rxdone,
        .config_shared_key      = rt2800_config_shared_key,
        .config_pairwise_key    = rt2800_config_pairwise_key,
@@ -1078,6 +1133,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
        { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
        { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
+#ifdef CONFIG_RT2800PCI_RT53XX
+       { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
 #endif
        { 0, }
 };
index 197a36c05fdaf18a294000b9eae8b5dad2073eea..5d91561e0de7794330d73b7967a4116caba9c151 100644 (file)
@@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                /* No support, but no error either */
                break;
        case STATE_DEEP_SLEEP:
@@ -639,6 +637,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
        .write_tx_desc          = rt2800usb_write_tx_desc,
        .write_tx_data          = rt2800usb_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
+       .clear_beacon           = rt2800_clear_beacon,
        .get_tx_data_len        = rt2800usb_get_tx_data_len,
        .fill_rxdone            = rt2800usb_fill_rxdone,
        .config_shared_key      = rt2800_config_shared_key,
index 84aaf393da43a06144b838d820e78f3cf866e068..19453d23e90d493d00c816025baef28b582f1e29 100644 (file)
@@ -189,6 +189,7 @@ struct rt2x00_chip {
 #define RT3572         0x3572
 #define RT3593         0x3593  /* PCIe */
 #define RT3883         0x3883  /* WSOC */
+#define RT5390         0x5390  /* 2.4GHz */
 
        u16 rf;
        u16 rev;
@@ -225,6 +226,8 @@ struct channel_info {
 struct antenna_setup {
        enum antenna rx;
        enum antenna tx;
+       u8 rx_chain_num;
+       u8 tx_chain_num;
 };
 
 /*
@@ -368,6 +371,7 @@ struct rt2x00_intf {
         * dedicated beacon entry.
         */
        struct queue_entry *beacon;
+       bool enable_beacon;
 
        /*
         * Actions that needed rescheduling.
@@ -510,15 +514,14 @@ struct rt2x00lib_ops {
         */
        irq_handler_t irq_handler;
 
-       /*
-        * Threaded Interrupt handlers.
-        */
-       irq_handler_t irq_handler_thread;
-
        /*
         * TX status tasklet handler.
         */
        void (*txstatus_tasklet) (unsigned long data);
+       void (*pretbtt_tasklet) (unsigned long data);
+       void (*tbtt_tasklet) (unsigned long data);
+       void (*rxdone_tasklet) (unsigned long data);
+       void (*autowake_tasklet) (unsigned long data);
 
        /*
         * Device init handlers.
@@ -573,6 +576,7 @@ struct rt2x00lib_ops {
                               struct txentry_desc *txdesc);
        void (*write_beacon) (struct queue_entry *entry,
                              struct txentry_desc *txdesc);
+       void (*clear_beacon) (struct queue_entry *entry);
        int (*get_tx_data_len) (struct queue_entry *entry);
 
        /*
@@ -664,6 +668,7 @@ enum rt2x00_flags {
         */
        CONFIG_SUPPORT_HW_BUTTON,
        CONFIG_SUPPORT_HW_CRYPTO,
+       CONFIG_SUPPORT_POWER_LIMIT,
        DRIVER_SUPPORT_CONTROL_FILTERS,
        DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
        DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
@@ -788,10 +793,12 @@ struct rt2x00_dev {
         *  - Open ap interface count.
         *  - Open sta interface count.
         *  - Association count.
+        *  - Beaconing enabled count.
         */
        unsigned int intf_ap_count;
        unsigned int intf_sta_count;
        unsigned int intf_associated;
+       unsigned int intf_beaconing;
 
        /*
         * Link quality
@@ -857,6 +864,13 @@ struct rt2x00_dev {
         */
        struct ieee80211_low_level_stats low_level_stats;
 
+       /**
+        * Work queue for all work which should not be placed
+        * on the mac80211 workqueue (because of dependencies
+        * between various work structures).
+        */
+       struct workqueue_struct *workqueue;
+
        /*
         * Scheduled work.
         * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -886,12 +900,6 @@ struct rt2x00_dev {
         */
        const struct firmware *fw;
 
-       /*
-        * Interrupt values, stored between interrupt service routine
-        * and interrupt thread routine.
-        */
-       u32 irqvalue[2];
-
        /*
         * FIFO for storing tx status reports between isr and tasklet.
         */
@@ -901,6 +909,15 @@ struct rt2x00_dev {
         * Tasklet for processing tx status reports (rt2800pci).
         */
        struct tasklet_struct txstatus_tasklet;
+       struct tasklet_struct pretbtt_tasklet;
+       struct tasklet_struct tbtt_tasklet;
+       struct tasklet_struct rxdone_tasklet;
+       struct tasklet_struct autowake_tasklet;
+
+       /*
+        * Protect the interrupt mask register.
+        */
+       spinlock_t irqmask_lock;
 };
 
 /*
@@ -1168,7 +1185,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry);
 /*
  * mac80211 handlers.
  */
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 int rt2x00mac_start(struct ieee80211_hw *hw);
 void rt2x00mac_stop(struct ieee80211_hw *hw);
 int rt2x00mac_add_interface(struct ieee80211_hw *hw,
index 9597a03242cceb7bbed7fd8b0d847741c2450097..9de9dbe943998e1790a8d514157eea8028410238 100644 (file)
@@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
                return;
 
        if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
-               rt2x00queue_update_beacon(rt2x00dev, vif, true);
+               rt2x00queue_update_beacon(rt2x00dev, vif);
 }
 
 static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
            vif->type != NL80211_IFTYPE_WDS)
                return;
 
-       rt2x00queue_update_beacon(rt2x00dev, vif, true);
+       /*
+        * Update the beacon without locking. This is safe on PCI devices
+        * as they only update the beacon periodically here. This should
+        * never be called for USB devices.
+        */
+       WARN_ON(rt2x00_is_usb(rt2x00dev));
+       rt2x00queue_update_beacon_locked(rt2x00dev, vif);
 }
 
 void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
                return;
 
        /* send buffered bc/mc frames out for every bssid */
-       ieee80211_iterate_active_interfaces(rt2x00dev->hw,
-                                           rt2x00lib_bc_buffer_iter,
-                                           rt2x00dev);
+       ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+                                                  rt2x00lib_bc_buffer_iter,
+                                                  rt2x00dev);
        /*
         * Devices with pre tbtt interrupt don't need to update the beacon
         * here as they will fetch the next beacon directly prior to
@@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
                return;
 
        /* fetch next beacon */
-       ieee80211_iterate_active_interfaces(rt2x00dev->hw,
-                                           rt2x00lib_beaconupdate_iter,
-                                           rt2x00dev);
+       ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+                                                  rt2x00lib_beaconupdate_iter,
+                                                  rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
 
@@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
                return;
 
        /* fetch next beacon */
-       ieee80211_iterate_active_interfaces(rt2x00dev->hw,
-                                           rt2x00lib_beaconupdate_iter,
-                                           rt2x00dev);
+       ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+                                                  rt2x00lib_beaconupdate_iter,
+                                                  rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
 
@@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
                              const int channel, const int tx_power,
                              const int value)
 {
-       entry->center_freq = ieee80211_channel_to_frequency(channel);
+       /* XXX: this assumption about the band is wrong for 802.11j */
+       entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       entry->center_freq = ieee80211_channel_to_frequency(channel,
+                                                           entry->band);
        entry->hw_value = value;
        entry->max_power = tx_power;
        entry->max_antenna_gain = 0xff;
@@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
                                     GFP_KERNEL);
                if (status)
                        return status;
+       }
 
-               /* tasklet for processing the tx status reports. */
-               if (rt2x00dev->ops->lib->txstatus_tasklet)
-                       tasklet_init(&rt2x00dev->txstatus_tasklet,
-                                    rt2x00dev->ops->lib->txstatus_tasklet,
-                                    (unsigned long)rt2x00dev);
-
+       /*
+        * Initialize tasklets if used by the driver. Tasklets are
+        * disabled until the interrupts are turned on. The driver
+        * has to handle that.
+        */
+#define RT2X00_TASKLET_INIT(taskletname) \
+       if (rt2x00dev->ops->lib->taskletname) { \
+               tasklet_init(&rt2x00dev->taskletname, \
+                            rt2x00dev->ops->lib->taskletname, \
+                            (unsigned long)rt2x00dev); \
+               tasklet_disable(&rt2x00dev->taskletname); \
        }
 
+       RT2X00_TASKLET_INIT(txstatus_tasklet);
+       RT2X00_TASKLET_INIT(pretbtt_tasklet);
+       RT2X00_TASKLET_INIT(tbtt_tasklet);
+       RT2X00_TASKLET_INIT(rxdone_tasklet);
+       RT2X00_TASKLET_INIT(autowake_tasklet);
+
+#undef RT2X00_TASKLET_INIT
+
        /*
         * Register HW.
         */
@@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
 {
        int retval = -ENOMEM;
 
+       spin_lock_init(&rt2x00dev->irqmask_lock);
        mutex_init(&rt2x00dev->csr_mutex);
 
        set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                    BIT(NL80211_IFTYPE_WDS);
 
        /*
-        * Initialize configuration work.
+        * Initialize work.
         */
+       rt2x00dev->workqueue =
+           alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
+       if (!rt2x00dev->workqueue) {
+               retval = -ENOMEM;
+               goto exit;
+       }
+
        INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
 
        /*
@@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
        cancel_work_sync(&rt2x00dev->intf_work);
        cancel_work_sync(&rt2x00dev->rxdone_work);
        cancel_work_sync(&rt2x00dev->txdone_work);
+       destroy_workqueue(rt2x00dev->workqueue);
 
        /*
         * Free the tx status fifo.
@@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
         * Kill the tx status tasklet.
         */
        tasklet_kill(&rt2x00dev->txstatus_tasklet);
+       tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+       tasklet_kill(&rt2x00dev->tbtt_tasklet);
+       tasklet_kill(&rt2x00dev->rxdone_tasklet);
+       tasklet_kill(&rt2x00dev->autowake_tasklet);
 
        /*
         * Uninitialize device.
index b7ad46ecaa1dd0119d21246951cd57c6f7a3889c..03d9579da6815d209abba9a5dfaecd71e87f61d5 100644 (file)
@@ -69,7 +69,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
                        txdesc->mcs |= 0x08;
        }
 
-
        /*
         * This frame is eligible for an AMPDU, however, don't aggregate
         * frames that are intended to probe a specific tx rate.
index a105c500627bd746f04206c50f8614821ffe59a9..2d94cbaf5f4aac1c52f9049c70c6b5d06f88b899 100644 (file)
@@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
                               bool local);
 
 /**
- * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
+ * rt2x00queue_update_beacon - Send new beacon from mac80211
+ *     to hardware. Handles locking by itself (mutex).
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
  * @vif: Interface for which the beacon should be updated.
- * @enable_beacon: Enable beaconing
  */
 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
-                             struct ieee80211_vif *vif,
-                             const bool enable_beacon);
+                             struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
+ *     to hardware. Caller needs to ensure locking.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+                                    struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_clear_beacon - Clear beacon in hardware
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+                            struct ieee80211_vif *vif);
 
 /**
  * rt2x00queue_index_inc - Index incrementation function
index bfda60eaf4efc6b888091f7e7976cd7a2c4d0267..c975b0a12e950a5bfc3764ecec16fb6478580b18 100644 (file)
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
            !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
                return;
 
-       schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+       ieee80211_queue_delayed_work(rt2x00dev->hw,
+                                    &link->watchdog_work, WATCHDOG_INTERVAL);
 }
 
 void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
        rt2x00dev->ops->lib->watchdog(rt2x00dev);
 
        if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
-               schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+               ieee80211_queue_delayed_work(rt2x00dev->hw,
+                                            &link->watchdog_work,
+                                            WATCHDOG_INTERVAL);
 }
 
 void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
index f3da051df39e1a794ba376212e47f81974d733a3..c2c35838c2f390cddc9b5e73d35d251f2ce3369e 100644 (file)
@@ -99,7 +99,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         * either RTS or CTS-to-self frame and handles everything
         * inside the hardware.
         */
-       if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
-                                               IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
-           !rt2x00dev->ops->hw->set_rts_threshold) {
+       if (!rt2x00dev->ops->hw->set_rts_threshold &&
+           (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
+                                               IEEE80211_TX_RC_USE_CTS_PROTECT))) {
                if (rt2x00queue_available(queue) <= 1)
                        goto exit_fail;
 
@@ -155,12 +155,11 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (rt2x00queue_threshold(queue))
                rt2x00queue_pause_queue(queue);
 
-       return NETDEV_TX_OK;
+       return;
 
  exit_fail:
        ieee80211_stop_queue(rt2x00dev->hw, qid);
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_tx);
 
@@ -617,11 +616,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                                      bss_conf->bssid);
 
        /*
-        * Update the beacon.
+        * Update the beacon. This is only required on USB devices. PCI
+        * devices fetch beacons periodically.
         */
-       if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED))
-               rt2x00queue_update_beacon(rt2x00dev, vif,
-                                         bss_conf->enable_beacon);
+       if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+               rt2x00queue_update_beacon(rt2x00dev, vif);
+
+       /*
+        * Start/stop beaconing.
+        */
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               if (!bss_conf->enable_beacon && intf->enable_beacon) {
+                       rt2x00queue_clear_beacon(rt2x00dev, vif);
+                       rt2x00dev->intf_beaconing--;
+                       intf->enable_beacon = false;
+
+                       if (rt2x00dev->intf_beaconing == 0) {
+                               /*
+                                * Last beaconing interface disabled
+                                * -> stop beacon queue.
+                                */
+                               mutex_lock(&intf->beacon_skb_mutex);
+                               rt2x00queue_stop_queue(rt2x00dev->bcn);
+                               mutex_unlock(&intf->beacon_skb_mutex);
+                       }
+
+
+               } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+                       rt2x00dev->intf_beaconing++;
+                       intf->enable_beacon = true;
+
+                       if (rt2x00dev->intf_beaconing == 1) {
+                               /*
+                                * First beaconing interface enabled
+                                * -> start beacon queue.
+                                */
+                               mutex_lock(&intf->beacon_skb_mutex);
+                               rt2x00queue_start_queue(rt2x00dev->bcn);
+                               mutex_unlock(&intf->beacon_skb_mutex);
+                       }
+               }
+       }
 
        /*
         * When the association status has changed we must reset the link
index ace0b668c04ea01f42a5fdaa140c204fd5339667..4dd82b0b05201bf63888cbe4412f7fdcb3bbfc9b 100644 (file)
@@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
        /*
         * Register interrupt handler.
         */
-       status = request_threaded_irq(rt2x00dev->irq,
-                                     rt2x00dev->ops->lib->irq_handler,
-                                     rt2x00dev->ops->lib->irq_handler_thread,
-                                     IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+       status = request_irq(rt2x00dev->irq,
+                            rt2x00dev->ops->lib->irq_handler,
+                            IRQF_SHARED, rt2x00dev->name, rt2x00dev);
        if (status) {
                ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
                      rt2x00dev->irq, status);
index ca82b3a91697b439722d995b2d3d3384a672abb5..bf9bba3562806f8ed9b52cae9985f2d515afd77c 100644 (file)
@@ -365,13 +365,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
 
        /*
         * Beacons and probe responses require the tsf timestamp
-        * to be inserted into the frame, except for a frame that has been injected
-        * through a monitor interface. This latter is needed for testing a
-        * monitor interface.
+        * to be inserted into the frame.
         */
-       if ((ieee80211_is_beacon(hdr->frame_control) ||
-           ieee80211_is_probe_resp(hdr->frame_control)) &&
-           (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
+       if (ieee80211_is_beacon(hdr->frame_control) ||
+           ieee80211_is_probe_resp(hdr->frame_control))
                __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
 
        /*
@@ -566,13 +563,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
        return 0;
 }
 
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
-                             struct ieee80211_vif *vif,
-                             const bool enable_beacon)
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+                            struct ieee80211_vif *vif)
 {
        struct rt2x00_intf *intf = vif_to_intf(vif);
-       struct skb_frame_desc *skbdesc;
-       struct txentry_desc txdesc;
 
        if (unlikely(!intf->beacon))
                return -ENOBUFS;
@@ -584,17 +578,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
         */
        rt2x00queue_free_skb(intf->beacon);
 
-       if (!enable_beacon) {
-               rt2x00queue_stop_queue(intf->beacon->queue);
-               mutex_unlock(&intf->beacon_skb_mutex);
-               return 0;
-       }
+       /*
+        * Clear beacon (single bssid devices don't need to clear the beacon
+        * since the beacon queue will get stopped anyway).
+        */
+       if (rt2x00dev->ops->lib->clear_beacon)
+               rt2x00dev->ops->lib->clear_beacon(intf->beacon);
+
+       mutex_unlock(&intf->beacon_skb_mutex);
+
+       return 0;
+}
+
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+                                    struct ieee80211_vif *vif)
+{
+       struct rt2x00_intf *intf = vif_to_intf(vif);
+       struct skb_frame_desc *skbdesc;
+       struct txentry_desc txdesc;
+
+       if (unlikely(!intf->beacon))
+               return -ENOBUFS;
+
+       /*
+        * Clean up the beacon skb.
+        */
+       rt2x00queue_free_skb(intf->beacon);
 
        intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
-       if (!intf->beacon->skb) {
-               mutex_unlock(&intf->beacon_skb_mutex);
+       if (!intf->beacon->skb)
                return -ENOMEM;
-       }
 
        /*
         * Copy all TX descriptor information into txdesc,
@@ -611,13 +624,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
        skbdesc->entry = intf->beacon;
 
        /*
-        * Send beacon to hardware and enable beacon genaration..
+        * Send beacon to hardware.
         */
        rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
 
+       return 0;
+
+}
+
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+                             struct ieee80211_vif *vif)
+{
+       struct rt2x00_intf *intf = vif_to_intf(vif);
+       int ret;
+
+       mutex_lock(&intf->beacon_skb_mutex);
+       ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
        mutex_unlock(&intf->beacon_skb_mutex);
 
-       return 0;
+       return ret;
 }
 
 void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -885,7 +910,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
         * The queue flush has failed...
         */
        if (unlikely(!rt2x00queue_empty(queue)))
-               WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
+               WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
 
        /*
         * Restore the queue to the previous status
index e8259ae48ced96504e1157dfa08c0dd94772d82b..6f867eec49cce734c7c01ab149423bd8a3f77222 100644 (file)
@@ -85,8 +85,6 @@ enum dev_state {
        STATE_RADIO_OFF,
        STATE_RADIO_IRQ_ON,
        STATE_RADIO_IRQ_OFF,
-       STATE_RADIO_IRQ_ON_ISR,
-       STATE_RADIO_IRQ_OFF_ISR,
 };
 
 /*
index 1a9937d5aff65e923498cc31208fd1e8b42e3168..fbe735f5b352319198a3fae5be0560b9e8b5b429 100644 (file)
@@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
         * Schedule the delayed work for reading the TX status
         * from the device.
         */
-       ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
+       queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 }
 
 static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
         * Schedule the delayed work for reading the RX status
         * from the device.
         */
-       ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
+       queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
 }
 
 static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
@@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
                 * Schedule the completion handler manually, when this
                 * worker function runs, it should cleanup the queue.
                 */
-               ieee80211_queue_work(queue->rt2x00dev->hw, completion);
+               queue_work(queue->rt2x00dev->workqueue, completion);
 
                /*
                 * Wait for a little while to give the driver
@@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
        WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
                " invoke forced tx handler\n", queue->qid);
 
-       ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
+       queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
 }
 
 void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
index 8de44dd401e0f80927dca2e992fed8952fd79caf..927a4a3e0eeb40b1aef2e8fd1afb154be2436ed1 100644 (file)
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
                                struct rt2x00intf_conf *conf,
                                const unsigned int flags)
 {
-       unsigned int beacon_base;
        u32 reg;
 
        if (flags & CONFIG_UPDATE_TYPE) {
-               /*
-                * Clear current synchronisation setup.
-                * For the Beacon base registers, we only need to clear
-                * the first byte since that byte contains the VALID and OWNER
-                * bits which (when set to 0) will invalidate the entire beacon.
-                */
-               beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
-               rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
-
                /*
                 * Enable synchronisation.
                 */
                rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
                rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
        }
 
@@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue)
                rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
                break;
        case QID_BEACON:
+               /*
+                * Allow the tbtt tasklet to be scheduled.
+                */
+               tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
                rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
                rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue)
                rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
                rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
                rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+               /*
+                * Wait for possibly running tbtt tasklets.
+                */
+               tasklet_disable(&rt2x00dev->tbtt_tasklet);
                break;
        default:
                break;
@@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
 static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                               enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_OFF) ||
-                  (state == STATE_RADIO_IRQ_OFF_ISR);
+       int mask = (state == STATE_RADIO_IRQ_OFF);
        u32 reg;
+       unsigned long flags;
 
        /*
         * When interrupts are being enabled, the interrupt registers
@@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
 
                rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
                rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
+
+               /*
+                * Enable tasklets.
+                */
+               tasklet_enable(&rt2x00dev->txstatus_tasklet);
+               tasklet_enable(&rt2x00dev->rxdone_tasklet);
+               tasklet_enable(&rt2x00dev->autowake_tasklet);
        }
 
        /*
         * Only toggle the interrupts bits we are going to use.
         * Non-checked interrupt bits are disabled by default.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
        rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
        rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
        rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
        rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
        rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Ensure that all tasklets are finished.
+                */
+               tasklet_disable(&rt2x00dev->txstatus_tasklet);
+               tasklet_disable(&rt2x00dev->rxdone_tasklet);
+               tasklet_disable(&rt2x00dev->autowake_tasklet);
+       }
 }
 
 static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt61pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                rt61pci_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
@@ -1962,13 +1978,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
        struct queue_entry_priv_pci *entry_priv = entry->priv_data;
        unsigned int beacon_base;
        unsigned int padding_len;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
        rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       orig_reg = reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1986,7 +2003,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
         * Write entire beacon with descriptor and padding to register.
         */
        padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
-       skb_pad(entry->skb, padding_len);
+       if (padding_len && skb_pad(entry->skb, padding_len)) {
+               ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+               /* skb freed by skb_pad() on failure */
+               entry->skb = NULL;
+               rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+               return;
+       }
+
        beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
        rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
                                      entry_priv->desc, TXINFO_SIZE);
@@ -2002,8 +2026,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
         */
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
 
-       rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
-       rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -2014,6 +2036,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
        entry->skb = NULL;
 }
 
+static void rt61pci_clear_beacon(struct queue_entry *entry)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       u32 reg;
+
+       /*
+        * Disable beaconing while we are reloading the beacon data,
+        * otherwise we might be sending out invalid data.
+        */
+       rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+       rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+       /*
+        * Clear beacon.
+        */
+       rt2x00pci_register_write(rt2x00dev,
+                                HW_BEACON_OFFSET(entry->entry_idx), 0);
+
+       /*
+        * Enable beaconing again.
+        */
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+       rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
 /*
  * RX control handlers
  */
@@ -2078,9 +2126,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
                /*
-                * FIXME: Legacy driver indicates that the frame does
-                * contain the Michael Mic. Unfortunately, in rt2x00
-                * the MIC seems to be missing completely...
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
                 */
                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 
@@ -2211,61 +2258,80 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
        rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 }
 
-static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance)
+static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                    struct rt2x00_field32 irq_field)
 {
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg = rt2x00dev->irqvalue[0];
-       u32 reg_mcu = rt2x00dev->irqvalue[1];
+       unsigned long flags;
+       u32 reg;
 
        /*
-        * Handle interrupts, walk through all bits
-        * and run the tasks, the bits are checked in order of
-        * priority.
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
         */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 
-       /*
-        * 1 - Rx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
-               rt2x00pci_rxdone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
 
-       /*
-        * 2 - Tx ring done interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
-               rt61pci_txdone(rt2x00dev);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
 
-       /*
-        * 3 - Handle MCU command done.
-        */
-       if (reg_mcu)
-               rt2x00pci_register_write(rt2x00dev,
-                                        M2H_CMD_DONE_CSR, 0xffffffff);
+static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
+                                        struct rt2x00_field32 irq_field)
+{
+       unsigned long flags;
+       u32 reg;
 
        /*
-        * 4 - MCU Autowakeup interrupt.
+        * Enable a single MCU interrupt. The interrupt mask register
+        * access needs locking.
         */
-       if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
-               rt61pci_wakeup(rt2x00dev);
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 
-       /*
-        * 5 - Beacon done interrupt.
-        */
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
-               rt2x00lib_beacondone(rt2x00dev);
+       rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 0);
+       rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
 
-       /* Enable interrupts again. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_ON_ISR);
-       return IRQ_HANDLED;
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
+
+static void rt61pci_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt61pci_txdone(rt2x00dev);
+       rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
+}
+
+static void rt61pci_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_beacondone(rt2x00dev);
+       rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
+}
+
+static void rt61pci_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00pci_rxdone(rt2x00dev);
+       rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
 }
 
+static void rt61pci_autowake_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt61pci_wakeup(rt2x00dev);
+       rt2x00pci_register_write(rt2x00dev,
+                                M2H_CMD_DONE_CSR, 0xffffffff);
+       rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
+}
 
 static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
 {
        struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg_mcu;
-       u32 reg;
+       u32 reg_mcu, mask_mcu;
+       u32 reg, mask;
+       unsigned long flags;
 
        /*
         * Get the interrupt sources & saved to local variable.
@@ -2283,14 +2349,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return IRQ_HANDLED;
 
-       /* Store irqvalues for use in the interrupt thread. */
-       rt2x00dev->irqvalue[0] = reg;
-       rt2x00dev->irqvalue[1] = reg_mcu;
+       /*
+        * Schedule tasklets for interrupt handling.
+        */
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+       if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+               tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+       /*
+        * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+        * for interrupts and interrupt masks we can just use the value of
+        * INT_SOURCE_CSR to create the interrupt mask.
+        */
+       mask = reg;
+       mask_mcu = reg_mcu;
 
-       /* Disable interrupts, will be enabled again in the interrupt thread. */
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev,
-                                             STATE_RADIO_IRQ_OFF_ISR);
-       return IRQ_WAKE_THREAD;
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+       rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       reg |= mask;
+       rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+
+       rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+       reg |= mask_mcu;
+       rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       return IRQ_HANDLED;
 }
 
 /*
@@ -2884,7 +2982,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
 
 static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
        .irq_handler            = rt61pci_interrupt,
-       .irq_handler_thread     = rt61pci_interrupt_thread,
+       .txstatus_tasklet       = rt61pci_txstatus_tasklet,
+       .tbtt_tasklet           = rt61pci_tbtt_tasklet,
+       .rxdone_tasklet         = rt61pci_rxdone_tasklet,
+       .autowake_tasklet       = rt61pci_autowake_tasklet,
        .probe_hw               = rt61pci_probe_hw,
        .get_firmware_name      = rt61pci_get_firmware_name,
        .check_firmware         = rt61pci_check_firmware,
@@ -2903,6 +3004,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
        .stop_queue             = rt61pci_stop_queue,
        .write_tx_desc          = rt61pci_write_tx_desc,
        .write_beacon           = rt61pci_write_beacon,
+       .clear_beacon           = rt61pci_clear_beacon,
        .fill_rxdone            = rt61pci_fill_rxdone,
        .config_shared_key      = rt61pci_config_shared_key,
        .config_pairwise_key    = rt61pci_config_pairwise_key,
index 029be3c6c030971f6aec1d9326e43185a8b6479a..6e9981a1dd7f2fb0675d69d850418d57e73e2087 100644 (file)
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
                                struct rt2x00intf_conf *conf,
                                const unsigned int flags)
 {
-       unsigned int beacon_base;
        u32 reg;
 
        if (flags & CONFIG_UPDATE_TYPE) {
-               /*
-                * Clear current synchronisation setup.
-                * For the Beacon base registers we only need to clear
-                * the first byte since that byte contains the VALID and OWNER
-                * bits which (when set to 0) will invalidate the entire beacon.
-                */
-               beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
-               rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
-
                /*
                 * Enable synchronisation.
                 */
                rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
                rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
-               rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
                rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
        }
 
@@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt73usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_IRQ_ON:
-       case STATE_RADIO_IRQ_ON_ISR:
        case STATE_RADIO_IRQ_OFF:
-       case STATE_RADIO_IRQ_OFF_ISR:
                /* No support, but no error either */
                break;
        case STATE_DEEP_SLEEP:
@@ -1547,13 +1533,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        unsigned int beacon_base;
        unsigned int padding_len;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
        rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       orig_reg = reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1577,7 +1564,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
         * Write entire beacon with descriptor and padding to register.
         */
        padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
-       skb_pad(entry->skb, padding_len);
+       if (padding_len && skb_pad(entry->skb, padding_len)) {
+               ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+               /* skb freed by skb_pad() on failure */
+               entry->skb = NULL;
+               rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+               return;
+       }
+
        beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
        rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
                                      entry->skb->len + padding_len);
@@ -1590,8 +1584,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
         */
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
 
-       rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
-       rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1602,6 +1594,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
        entry->skb = NULL;
 }
 
+static void rt73usb_clear_beacon(struct queue_entry *entry)
+{
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       unsigned int beacon_base;
+       u32 reg;
+
+       /*
+        * Disable beaconing while we are reloading the beacon data,
+        * otherwise we might be sending out invalid data.
+        */
+       rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+       /*
+        * Clear beacon.
+        */
+       beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+       rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
+
+       /*
+        * Enable beaconing again.
+        */
+       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
 static int rt73usb_get_tx_data_len(struct queue_entry *entry)
 {
        int length;
@@ -1698,9 +1717,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
                /*
-                * FIXME: Legacy driver indicates that the frame does
-                * contain the Michael Mic. Unfortunately, in rt2x00
-                * the MIC seems to be missing completely...
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
                 */
                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 
@@ -2313,6 +2331,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
        .flush_queue            = rt2x00usb_flush_queue,
        .write_tx_desc          = rt73usb_write_tx_desc,
        .write_beacon           = rt73usb_write_beacon,
+       .clear_beacon           = rt73usb_clear_beacon,
        .get_tx_data_len        = rt73usb_get_tx_data_len,
        .fill_rxdone            = rt73usb_fill_rxdone,
        .config_shared_key      = rt73usb_config_shared_key,
index 5851cbc1e9577a08758f880d388c69f5c8f1d694..80db5cabc9b917164d19d839d4a7e25e0a939692 100644 (file)
@@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                        rx_status.freq = dev->conf.channel->center_freq;
                        rx_status.band = dev->conf.channel->band;
                        rx_status.mactime = le64_to_cpu(entry->tsft);
-                       rx_status.flag |= RX_FLAG_TSFT;
+                       rx_status.flag |= RX_FLAG_MACTIME_MPDU;
                        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
 
@@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
-
-       return 0;
 }
 
 void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work)
        struct ieee80211_hw *dev = vif_priv->dev;
        struct ieee80211_mgmt *mgmt;
        struct sk_buff *skb;
-       int err = 0;
 
        /* don't overflow the tx ring */
        if (ieee80211_queue_stopped(dev, 0))
@@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       err = rtl8180_tx(dev, skb);
-       WARN_ON(err);
+       rtl8180_tx(dev, skb);
 
 resched:
        /*
index 6b82cac37ee3332c7e34e0430fac49a7732e3b62..c5a5e788f25fc77817df606e5bc660043f12f972 100644 (file)
@@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb)
        }
 }
 
-static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct rtl8187_priv *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
                kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return;
        }
 
        flags = skb->len;
@@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
                kfree_skb(skb);
        }
        usb_free_urb(urb);
-
-       return NETDEV_TX_OK;
 }
 
 static void rtl8187_rx_cb(struct urb *urb)
@@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb)
        rx_status.rate_idx = rate;
        rx_status.freq = dev->conf.channel->center_freq;
        rx_status.band = dev->conf.channel->band;
-       rx_status.flag |= RX_FLAG_TSFT;
+       rx_status.flag |= RX_FLAG_MACTIME_MPDU;
        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
index 7f6573f7f47021617cee956d10ff2532343166e7..ce49e0ce7cad347256a6fe6502be65afd3a521bd 100644 (file)
@@ -1,15 +1,33 @@
 config RTL8192CE
-       tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter"
-       depends on MAC80211 && EXPERIMENTAL
+       tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
+       depends on MAC80211 && PCI && EXPERIMENTAL
        select FW_LOADER
        select RTLWIFI
+       select RTL8192C_COMMON
        ---help---
        This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
        wireless network adapters.
 
        If you choose to build it as a module, it will be called rtl8192ce
 
+config RTL8192CU
+       tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
+       depends on MAC80211 && USB && EXPERIMENTAL
+       select FW_LOADER
+       select RTLWIFI
+       select RTL8192C_COMMON
+       ---help---
+       This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
+       wireless network adapters.
+
+       If you choose to build it as a module, it will be called rtl8192cu
+
 config RTLWIFI
        tristate
-       depends on RTL8192CE
+       depends on RTL8192CE || RTL8192CU
+       default m
+
+config RTL8192C_COMMON
+       tristate
+       depends on RTL8192CE || RTL8192CU
        default m
index 2a7a4384f8ee9725a871aa6d4ef47f152cbcf25a..9192fd583413b7a71ddfa245b4651f7809c2d7e9 100644 (file)
@@ -5,9 +5,19 @@ rtlwifi-objs   :=              \
                core.o          \
                debug.o         \
                efuse.o         \
-               pci.o           \
                ps.o            \
                rc.o            \
-               regd.o
+               regd.o          \
+               usb.o
 
+rtl8192c_common-objs +=                \
+
+ifeq ($(CONFIG_PCI),y)
+rtlwifi-objs   += pci.o
+endif
+
+obj-$(CONFIG_RTL8192C_COMMON)  += rtl8192c/
 obj-$(CONFIG_RTL8192CE)                += rtl8192ce/
+obj-$(CONFIG_RTL8192CU)                += rtl8192cu/
+
+ccflags-y += -D__CHECK_ENDIAN__
index cf0b73e51fc2875e87711be58ddf2ef1bf736908..bb0c781f4a1b2e27d5d310671d4e5f6cee6dd22b 100644 (file)
@@ -144,7 +144,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
                ht_cap->mcs.rx_mask[1] = 0xFF;
                ht_cap->mcs.rx_mask[4] = 0x01;
 
-               ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
+               ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
        } else if (get_rf_type(rtlphy) == RF_1T1R) {
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
@@ -153,7 +153,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
                ht_cap->mcs.rx_mask[1] = 0x00;
                ht_cap->mcs.rx_mask[4] = 0x01;
 
-               ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
+               ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
        }
 }
 
@@ -283,13 +283,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
        rtlmac->hw = hw;
 
        /* <2> rate control register */
-       if (rtl_rate_control_register()) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("rtl: Unable to register rtl_rc,"
-                         "use default RC !!\n"));
-       } else {
-               hw->rate_control_algorithm = "rtl_rc";
-       }
+       hw->rate_control_algorithm = "rtl_rc";
 
        /*
         * <3> init CRDA must come after init
@@ -325,8 +319,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
 
 void rtl_deinit_core(struct ieee80211_hw *hw)
 {
-        /*RC*/
-       rtl_rate_control_unregister();
 }
 
 void rtl_init_rx_config(struct ieee80211_hw *hw)
@@ -399,21 +391,21 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
        u8 rate_flag = info->control.rates[0].flags;
 
        /* Common Settings */
-       tcb_desc->b_rts_stbc = false;
-       tcb_desc->b_cts_enable = false;
+       tcb_desc->rts_stbc = false;
+       tcb_desc->cts_enable = false;
        tcb_desc->rts_sc = 0;
-       tcb_desc->b_rts_bw = false;
-       tcb_desc->b_rts_use_shortpreamble = false;
-       tcb_desc->b_rts_use_shortgi = false;
+       tcb_desc->rts_bw = false;
+       tcb_desc->rts_use_shortpreamble = false;
+       tcb_desc->rts_use_shortgi = false;
 
        if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
                /* Use CTS-to-SELF in protection mode. */
-               tcb_desc->b_rts_enable = true;
-               tcb_desc->b_cts_enable = true;
+               tcb_desc->rts_enable = true;
+               tcb_desc->cts_enable = true;
                tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
        } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
                /* Use RTS-CTS in protection mode. */
-               tcb_desc->b_rts_enable = true;
+               tcb_desc->rts_enable = true;
                tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
        }
 
@@ -429,7 +421,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
                if (mac->opmode == NL80211_IFTYPE_STATION)
                        tcb_desc->ratr_index = 0;
                else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-                       if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
+                       if (tcb_desc->multicast || tcb_desc->broadcast) {
                                tcb_desc->hw_rate =
                                    rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
                                tcb_desc->use_driver_rate = 1;
@@ -439,7 +431,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
                }
        }
 
-       if (rtlpriv->dm.b_useramask) {
+       if (rtlpriv->dm.useramask) {
                /* TODO we will differentiate adhoc and station futrue  */
                tcb_desc->mac_id = 0;
 
@@ -461,19 +453,19 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-       tcb_desc->b_packet_bw = false;
+       tcb_desc->packet_bw = false;
 
        if (!mac->bw_40 || !mac->ht_enable)
                return;
 
-       if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
+       if (tcb_desc->multicast || tcb_desc->broadcast)
                return;
 
        /*use legency rate, shall use 20MHz */
        if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
                return;
 
-       tcb_desc->b_packet_bw = true;
+       tcb_desc->packet_bw = true;
 }
 
 static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
@@ -498,7 +490,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
        struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
        struct ieee80211_rate *txrate;
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
 
        memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
 
@@ -545,9 +537,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
                }
 
                if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
-                       tcb_desc->b_multicast = 1;
+                       tcb_desc->multicast = 1;
                else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
-                       tcb_desc->b_broadcast = 1;
+                       tcb_desc->broadcast = 1;
 
                _rtl_txrate_selectmode(hw, tcb_desc);
                _rtl_query_bandwidth_mode(hw, tcb_desc);
@@ -570,7 +562,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
 
        if (ieee80211_is_auth(fc)) {
                RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
@@ -587,7 +579,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
        u8 category;
 
@@ -632,7 +624,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u16 ether_type;
        u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
        const struct iphdr *ip;
@@ -646,7 +638,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
        ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
                              SNAP_SIZE + PROTOC_TYPE_SIZE);
        ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
-       ether_type = ntohs(ether_type);
 
        if (ETH_P_IP == ether_type) {
                if (IPPROTO_UDP == ip->protocol) {
@@ -690,7 +681,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                }
 
                return true;
-       } else if (0x86DD == ether_type) {
+       } else if (ETH_P_IPV6 == ether_type) {
+               /* IPv6 */
                return true;
        }
 
@@ -777,10 +769,10 @@ void rtl_watchdog_wq_callback(void *data)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-       bool b_busytraffic = false;
-       bool b_higher_busytraffic = false;
-       bool b_higher_busyrxtraffic = false;
-       bool b_higher_busytxtraffic = false;
+       bool busytraffic = false;
+       bool higher_busytraffic = false;
+       bool higher_busyrxtraffic = false;
+       bool higher_busytxtraffic = false;
 
        u8 idx = 0;
        u32 rx_cnt_inp4eriod = 0;
@@ -788,7 +780,7 @@ void rtl_watchdog_wq_callback(void *data)
        u32 aver_rx_cnt_inperiod = 0;
        u32 aver_tx_cnt_inperiod = 0;
 
-       bool benter_ps = false;
+       bool enter_ps = false;
 
        if (is_hal_stop(rtlhal))
                return;
@@ -832,29 +824,29 @@ void rtl_watchdog_wq_callback(void *data)
 
                /* (2) check traffic busy */
                if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
-                       b_busytraffic = true;
+                       busytraffic = true;
 
                /* Higher Tx/Rx data. */
                if (aver_rx_cnt_inperiod > 4000 ||
                    aver_tx_cnt_inperiod > 4000) {
-                       b_higher_busytraffic = true;
+                       higher_busytraffic = true;
 
                        /* Extremely high Rx data. */
                        if (aver_rx_cnt_inperiod > 5000)
-                               b_higher_busyrxtraffic = true;
+                               higher_busyrxtraffic = true;
                        else
-                               b_higher_busytxtraffic = false;
+                               higher_busytxtraffic = false;
                }
 
                if (((rtlpriv->link_info.num_rx_inperiod +
                      rtlpriv->link_info.num_tx_inperiod) > 8) ||
                    (rtlpriv->link_info.num_rx_inperiod > 2))
-                       benter_ps = false;
+                       enter_ps = false;
                else
-                       benter_ps = true;
+                       enter_ps = true;
 
                /* LeisurePS only work in infra mode. */
-               if (benter_ps)
+               if (enter_ps)
                        rtl_lps_enter(hw);
                else
                        rtl_lps_leave(hw);
@@ -863,9 +855,9 @@ void rtl_watchdog_wq_callback(void *data)
        rtlpriv->link_info.num_rx_inperiod = 0;
        rtlpriv->link_info.num_tx_inperiod = 0;
 
-       rtlpriv->link_info.b_busytraffic = b_busytraffic;
-       rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
-       rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
+       rtlpriv->link_info.busytraffic = busytraffic;
+       rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
+       rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
 
 }
 
@@ -945,11 +937,16 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
 
 static int __init rtl_core_module_init(void)
 {
+       if (rtl_rate_control_register())
+               printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
+                      "use default RC !!\n");
        return 0;
 }
 
 static void __exit rtl_core_module_exit(void)
 {
+        /*RC*/
+       rtl_rate_control_unregister();
 }
 
 module_init(rtl_core_module_init);
index 3de5a14745f14248db81f39e647adc43682b2bbc..043045342bc7e91a5dd77b655f344e048bc85b95 100644 (file)
@@ -30,6 +30,7 @@
 #define __RTL_BASE_H__
 
 #define RTL_DUMMY_OFFSET       0
+#define RTL_RX_DESC_SIZE       24
 #define RTL_DUMMY_UNIT         8
 #define RTL_TX_DUMMY_SIZE      (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
 #define RTL_TX_DESC_SIZE       32
 #define FRAME_OFFSET_SEQUENCE          22
 #define FRAME_OFFSET_ADDRESS4          24
 
-#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val)                \
-       WRITEEF2BYTE(_hdr, _val)
-#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val)     \
-       WRITEEF1BYTE(_hdr, _val)
-#define SET_80211_HDR_PWR_MGNT(_hdr, _val)             \
-       SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
-#define SET_80211_HDR_TO_DS(_hdr, _val)                        \
-       SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
 
 #define SET_80211_PS_POLL_AID(_hdr, _val)              \
-       WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val)
+       (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
 #define SET_80211_PS_POLL_BSSID(_hdr, _val)            \
-       CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val))
+       memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
 #define SET_80211_PS_POLL_TA(_hdr, _val)               \
-       CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val))
+       memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN)
 
 #define SET_80211_HDR_DURATION(_hdr, _val)     \
-       WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val)
+       (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
 #define SET_80211_HDR_ADDRESS1(_hdr, _val)     \
-       CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
+       memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN)
 #define SET_80211_HDR_ADDRESS2(_hdr, _val)     \
-       CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val))
+       memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN)
 #define SET_80211_HDR_ADDRESS3(_hdr, _val)     \
-       CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
-#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val)  \
-       WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
-
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val)     \
-       WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
-#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
-       WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
-#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
-       WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
-#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr)   \
-       READEF2BYTE(((u8 *)(__phdr)) + 34)
-#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
-       WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
-#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
-       SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
-       (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
+       memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN)
 
 int rtl_init_core(struct ieee80211_hw *hw);
 void rtl_deinit_core(struct ieee80211_hw *hw);
index d6a924a056549a43dba381e19537de76c6e1daae..e4f4aee8f2988f7ce75d8bcbba1c32906217a4dc 100644 (file)
@@ -82,7 +82,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_unlock(&rtlpriv->locks.conf_mutex);
 }
 
-static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -97,11 +97,10 @@ static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        rtlpriv->intf_ops->adapter_tx(hw, skb);
 
-       return NETDEV_TX_OK;
+       return;
 
 err_free:
        dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
 }
 
 static int rtl_op_add_interface(struct ieee80211_hw *hw,
@@ -434,9 +433,9 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
 
        aci = _rtl_get_hal_qnum(queue);
        mac->ac[aci].aifs = param->aifs;
-       mac->ac[aci].cw_min = param->cw_min;
-       mac->ac[aci].cw_max = param->cw_max;
-       mac->ac[aci].tx_op = param->txop;
+       mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
+       mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
+       mac->ac[aci].tx_op = cpu_to_le16(param->txop);
        memcpy(&mac->edca_param[aci], param, sizeof(*param));
        rtlpriv->cfg->ops->set_qos(hw, aci);
        return 0;
@@ -552,6 +551,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
                         ("BSS_CHANGED_HT\n"));
 
+               rcu_read_lock();
                sta = ieee80211_find_sta(mac->vif, mac->bssid);
 
                if (sta) {
@@ -564,6 +564,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                mac->current_ampdu_factor =
                                    sta->ht_cap.ampdu_factor;
                }
+               rcu_read_unlock();
 
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
                                              (u8 *) (&mac->max_mss_density));
@@ -615,6 +616,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                else
                        mac->mode = WIRELESS_MODE_G;
 
+               rcu_read_lock();
                sta = ieee80211_find_sta(mac->vif, mac->bssid);
 
                if (sta) {
@@ -649,6 +651,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                 */
                        }
                }
+               rcu_read_unlock();
 
                /*mac80211 just give us CCK rates any time
                 *So we add G rate in basic rates when
@@ -666,7 +669,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
                                        (u8 *) (&basic_rates));
 
-                       if (rtlpriv->dm.b_useramask)
+                       if (rtlpriv->dm.useramask)
                                rtlpriv->cfg->ops->update_rate_mask(hw, 0);
                        else
                                rtlpriv->cfg->ops->update_rate_table(hw);
@@ -681,7 +684,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
         */
        if (changed & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
-                       if (ppsc->b_fwctrl_lps) {
+                       if (ppsc->fwctrl_lps) {
                                u8 mstatus = RT_MEDIA_CONNECT;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                      HW_VAR_H2C_FW_JOINBSSRPT,
@@ -689,7 +692,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                ppsc->report_linked = true;
                        }
                } else {
-                       if (ppsc->b_fwctrl_lps) {
+                       if (ppsc->fwctrl_lps) {
                                u8 mstatus = RT_MEDIA_DISCONNECT;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                      HW_VAR_H2C_FW_JOINBSSRPT,
@@ -748,7 +751,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
 static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                              u8 buf_size)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -817,7 +821,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
                /* fix fwlps issue */
                rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
 
-               if (rtlpriv->dm.b_useramask)
+               if (rtlpriv->dm.useramask)
                        rtlpriv->cfg->ops->update_rate_mask(hw, 0);
                else
                        rtlpriv->cfg->ops->update_rate_table(hw);
index 08bdec2ceda456994b22106da2a1fbcc10fdddc9..e4aa8687408cfd5b4ca45c9c629e5c63d555a921 100644 (file)
 #define COMP_MAC80211          BIT(26)
 #define COMP_REGD                      BIT(27)
 #define COMP_CHAN                      BIT(28)
+#define COMP_USB                       BIT(29)
 
 /*--------------------------------------------------------------
                Define the rt_print components
index 62876cd5c41a43db48ddaaadb685a959988589c2..4f92cba6810ab193cf6b93000eb22e7a26679861 100644 (file)
@@ -1169,21 +1169,3 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
        return word_cnts;
 }
 
-void efuse_reset_loader(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u16 tmp_u2b;
-
-       tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
-       rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
-                      (tmp_u2b & ~(BIT(12))));
-       udelay(10000);
-       rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
-                      (tmp_u2b | BIT(12)));
-       udelay(10000);
-}
-
-bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype)
-{
-       return true;
-}
index 2d39a4df181b5f9858be119bc20507e983753396..47774dd4c2a6c945ebf5386ba04320557c492b29 100644 (file)
@@ -117,8 +117,5 @@ extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
 extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
 extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
 extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
-extern bool efuse_program_map(struct ieee80211_hw *hw,
-                             char *p_filename, u8 tabletype);
-extern void efuse_reset_loader(struct ieee80211_hw *hw);
 
 #endif
index 1758d4463247395c7e9d3d15a8e3be5010160744..9cd7703c2a3035d06f1cdc8e8c781185f2899422 100644 (file)
@@ -50,7 +50,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
        u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
 
        ppsc->reg_rfps_level = 0;
-       ppsc->b_support_aspm = 0;
+       ppsc->support_aspm = 0;
 
        /*Update PCI ASPM setting */
        ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -115,29 +115,29 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
        switch (rtlpci->const_support_pciaspm) {
        case 0:{
                        /*Not support ASPM. */
-                       bool b_support_aspm = false;
-                       ppsc->b_support_aspm = b_support_aspm;
+                       bool support_aspm = false;
+                       ppsc->support_aspm = support_aspm;
                        break;
                }
        case 1:{
                        /*Support ASPM. */
-                       bool b_support_aspm = true;
-                       bool b_support_backdoor = true;
-                       ppsc->b_support_aspm = b_support_aspm;
+                       bool support_aspm = true;
+                       bool support_backdoor = true;
+                       ppsc->support_aspm = support_aspm;
 
                        /*if(priv->oem_id == RT_CID_TOSHIBA &&
                           !priv->ndis_adapter.amd_l1_patch)
-                          b_support_backdoor = false; */
+                          support_backdoor = false; */
 
-                       ppsc->b_support_backdoor = b_support_backdoor;
+                       ppsc->support_backdoor = support_backdoor;
 
                        break;
                }
        case 2:
                /*ASPM value set by chipset. */
                if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
-                       bool b_support_aspm = true;
-                       ppsc->b_support_aspm = b_support_aspm;
+                       bool support_aspm = true;
+                       ppsc->support_aspm = support_aspm;
                }
                break;
        default:
@@ -476,9 +476,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
 
                skb = __skb_dequeue(&ring->queue);
                pci_unmap_single(rtlpci->pdev,
-                                le32_to_cpu(rtlpriv->cfg->ops->
+                                rtlpriv->cfg->ops->
                                             get_desc((u8 *) entry, true,
-                                                     HW_DESC_TXBUFF_ADDR)),
+                                                     HW_DESC_TXBUFF_ADDR),
                                 skb->len, PCI_DMA_TODEVICE);
 
                RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
@@ -557,7 +557,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        return;
                } else {
                        struct ieee80211_hdr *hdr;
-                       u16 fc;
+                       __le16 fc;
                        struct sk_buff *new_skb = NULL;
 
                        rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
@@ -583,9 +583,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                         */
 
                        hdr = (struct ieee80211_hdr *)(skb->data);
-                       fc = le16_to_cpu(hdr->frame_control);
+                       fc = hdr->frame_control;
 
-                       if (!stats.b_crc) {
+                       if (!stats.crc) {
                                memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
                                       sizeof(rx_status));
 
@@ -666,7 +666,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 
                }
 done:
-               bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
+               bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
                tmp_one = 1;
                rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
                                            HW_DESC_RXBUFF_ADDR,
@@ -690,75 +690,6 @@ done:
 
 }
 
-void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       int prio;
-
-       for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) {
-               struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
-
-               while (skb_queue_len(&ring->queue)) {
-                       struct rtl_tx_desc *entry = &ring->desc[ring->idx];
-                       struct sk_buff *skb;
-                       struct ieee80211_tx_info *info;
-                       u8 own;
-
-                       /*
-                        *beacon packet will only use the first
-                        *descriptor defautly, and the own may not
-                        *be cleared by the hardware, and
-                        *beacon will free in prepare beacon
-                        */
-                       if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE ||
-                           prio == HCCA_QUEUE)
-                               break;
-
-                       own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry,
-                                                              true,
-                                                              HW_DESC_OWN);
-
-                       if (own)
-                               break;
-
-                       skb = __skb_dequeue(&ring->queue);
-                       pci_unmap_single(rtlpci->pdev,
-                                        le32_to_cpu(rtlpriv->cfg->ops->
-                                                    get_desc((u8 *) entry,
-                                                    true,
-                                                    HW_DESC_TXBUFF_ADDR)),
-                                        skb->len, PCI_DMA_TODEVICE);
-
-                       ring->idx = (ring->idx + 1) % ring->entries;
-
-                       info = IEEE80211_SKB_CB(skb);
-                       ieee80211_tx_info_clear_status(info);
-
-                       info->flags |= IEEE80211_TX_STAT_ACK;
-                       /*info->status.rates[0].count = 1; */
-
-                       ieee80211_tx_status_irqsafe(hw, skb);
-
-                       if ((ring->entries - skb_queue_len(&ring->queue))
-                           == 2 && prio != BEACON_QUEUE) {
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                        ("more desc left, wake "
-                                         "skb_queue@%d,ring->idx = %d,"
-                                         "skb_queue_len = 0x%d\n",
-                                         prio, ring->idx,
-                                         skb_queue_len(&ring->queue)));
-
-                               ieee80211_wake_queue(hw,
-                                                    skb_get_queue_mapping
-                                                    (skb));
-                       }
-
-                       skb = NULL;
-               }
-       }
-}
-
 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 {
        struct ieee80211_hw *hw = dev_id;
@@ -959,17 +890,17 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
        rtlhal->hw = hw;
        rtlpci->pdev = pdev;
 
-       ppsc->b_inactiveps = false;
-       ppsc->b_leisure_ps = true;
-       ppsc->b_fwctrl_lps = true;
-       ppsc->b_reg_fwctrl_lps = 3;
+       ppsc->inactiveps = false;
+       ppsc->leisure_ps = true;
+       ppsc->fwctrl_lps = true;
+       ppsc->reg_fwctrl_lps = 3;
        ppsc->reg_max_lps_awakeintvl = 5;
 
-       if (ppsc->b_reg_fwctrl_lps == 1)
+       if (ppsc->reg_fwctrl_lps == 1)
                ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
-       else if (ppsc->b_reg_fwctrl_lps == 2)
+       else if (ppsc->reg_fwctrl_lps == 2)
                ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
-       else if (ppsc->b_reg_fwctrl_lps == 3)
+       else if (ppsc->reg_fwctrl_lps == 3)
                ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
 
        /*Tx/Rx related var */
@@ -1024,9 +955,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
                 ("queue:%d, ring_addr:%p\n", prio, ring));
 
        for (i = 0; i < entries; i++) {
-               nextdescaddress = cpu_to_le32((u32) dma +
-                                             ((i + 1) % entries) *
-                                             sizeof(*ring));
+               nextdescaddress = (u32) dma + ((i + 1) % entries) *
+                                             sizeof(*ring);
 
                rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
                                            true, HW_DESC_TX_NEXTDESC_ADDR,
@@ -1090,7 +1020,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
                                           rtlpci->rxbuffersize,
                                           PCI_DMA_FROMDEVICE);
 
-                       bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
+                       bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
                        rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
                                                    HW_DESC_RXBUFF_ADDR,
                                                    (u8 *)&bufferaddress);
@@ -1121,9 +1051,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
                struct sk_buff *skb = __skb_dequeue(&ring->queue);
 
                pci_unmap_single(rtlpci->pdev,
-                                le32_to_cpu(rtlpriv->cfg->
+                                rtlpriv->cfg->
                                             ops->get_desc((u8 *) entry, true,
-                                                  HW_DESC_TXBUFF_ADDR)),
+                                                  HW_DESC_TXBUFF_ADDR),
                                 skb->len, PCI_DMA_TODEVICE);
                kfree_skb(skb);
                ring->idx = (ring->idx + 1) % ring->entries;
@@ -1255,11 +1185,11 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
                                    __skb_dequeue(&ring->queue);
 
                                pci_unmap_single(rtlpci->pdev,
-                                                le32_to_cpu(rtlpriv->cfg->ops->
+                                                rtlpriv->cfg->ops->
                                                         get_desc((u8 *)
                                                         entry,
                                                         true,
-                                                        HW_DESC_TXBUFF_ADDR)),
+                                                        HW_DESC_TXBUFF_ADDR),
                                                 skb->len, PCI_DMA_TODEVICE);
                                kfree_skb(skb);
                                ring->idx = (ring->idx + 1) % ring->entries;
@@ -1273,7 +1203,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
        return 0;
 }
 
-unsigned int _rtl_mac_to_hwqueue(u16 fc,
+static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
                unsigned int mac80211_queue_index)
 {
        unsigned int hw_queue_index;
@@ -1312,7 +1242,7 @@ out:
        return hw_queue_index;
 }
 
-int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1323,7 +1253,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        unsigned int queue_index, hw_queue;
        unsigned long flags;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u8 *pda_addr = hdr->addr1;
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        /*ssn */
@@ -1429,7 +1359,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        return 0;
 }
 
-void rtl_pci_deinit(struct ieee80211_hw *hw)
+static void rtl_pci_deinit(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1444,7 +1374,7 @@ void rtl_pci_deinit(struct ieee80211_hw *hw)
 
 }
 
-int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        int err;
@@ -1461,7 +1391,7 @@ int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
        return 1;
 }
 
-int rtl_pci_start(struct ieee80211_hw *hw)
+static int rtl_pci_start(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1496,7 +1426,7 @@ int rtl_pci_start(struct ieee80211_hw *hw)
        return 0;
 }
 
-void rtl_pci_stop(struct ieee80211_hw *hw)
+static void rtl_pci_stop(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1547,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
        struct pci_dev *bridge_pdev = pdev->bus->self;
        u16 venderid;
        u16 deviceid;
-       u8 revisionid;
        u16 irqline;
        u8 tmp;
 
        venderid = pdev->vendor;
        deviceid = pdev->device;
-       pci_read_config_byte(pdev, 0x8, &revisionid);
        pci_read_config_word(pdev, 0x3C, &irqline);
 
        if (deviceid == RTL_PCI_8192_DID ||
@@ -1564,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
            deviceid == RTL_PCI_8173_DID ||
            deviceid == RTL_PCI_8172_DID ||
            deviceid == RTL_PCI_8171_DID) {
-               switch (revisionid) {
+               switch (pdev->revision) {
                case RTL_PCI_REVISION_ID_8192PCIE:
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                                 ("8192 PCI-E is found - "
@@ -1838,7 +1766,7 @@ fail3:
        ieee80211_free_hw(hw);
 
        if (rtlpriv->io.pci_mem_start != 0)
-               pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+               pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
 
 fail2:
        pci_release_regions(pdev);
@@ -1888,7 +1816,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
        }
 
        if (rtlpriv->io.pci_mem_start != 0) {
-               pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
+               pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
                pci_release_regions(pdev);
        }
 
index d36a66939958bd8b5190c7b1b7dd0fcc855985d8..0caa81429726db273e46e00ac80653c4057f3125 100644 (file)
@@ -244,34 +244,34 @@ int rtl_pci_resume(struct pci_dev *pdev);
 
 static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-       return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr);
+       return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-       return readw((u8 *) rtlpriv->io.pci_mem_start + addr);
+       return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
 {
-       return readl((u8 *) rtlpriv->io.pci_mem_start + addr);
+       return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
 {
-       writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+       writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void pci_write16_async(struct rtl_priv *rtlpriv,
                                     u32 addr, u16 val)
 {
-       writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+       writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void pci_write32_async(struct rtl_priv *rtlpriv,
                                     u32 addr, u32 val)
 {
-       writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
+       writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
 }
 
 static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
index d2326c13449e08b67945bac5a9662218e313a08d..6b7e217b6b89583b70d2d30a969f4d42dbba8b03 100644 (file)
@@ -86,7 +86,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        enum rf_pwrstate rtstate;
-       bool b_actionallowed = false;
+       bool actionallowed = false;
        u16 rfwait_cnt = 0;
        unsigned long flag;
 
@@ -139,13 +139,13 @@ no_protect:
                ppsc->rfoff_reason &= (~changesource);
 
                if ((changesource == RF_CHANGE_BY_HW) &&
-                   (ppsc->b_hwradiooff == true)) {
-                       ppsc->b_hwradiooff = false;
+                   (ppsc->hwradiooff == true)) {
+                       ppsc->hwradiooff = false;
                }
 
                if (!ppsc->rfoff_reason) {
                        ppsc->rfoff_reason = 0;
-                       b_actionallowed = true;
+                       actionallowed = true;
                }
 
                break;
@@ -153,17 +153,17 @@ no_protect:
        case ERFOFF:
 
                if ((changesource == RF_CHANGE_BY_HW)
-                   && (ppsc->b_hwradiooff == false)) {
-                       ppsc->b_hwradiooff = true;
+                   && (ppsc->hwradiooff == false)) {
+                       ppsc->hwradiooff = true;
                }
 
                ppsc->rfoff_reason |= changesource;
-               b_actionallowed = true;
+               actionallowed = true;
                break;
 
        case ERFSLEEP:
                ppsc->rfoff_reason |= changesource;
-               b_actionallowed = true;
+               actionallowed = true;
                break;
 
        default:
@@ -172,7 +172,7 @@ no_protect:
                break;
        }
 
-       if (b_actionallowed)
+       if (actionallowed)
                rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
 
        if (!protect_or_not) {
@@ -181,7 +181,7 @@ no_protect:
                spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
        }
 
-       return b_actionallowed;
+       return actionallowed;
 }
 EXPORT_SYMBOL(rtl_ps_set_rf_state);
 
@@ -191,7 +191,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 
-       ppsc->b_swrf_processing = true;
+       ppsc->swrf_processing = true;
 
        if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
                if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
@@ -213,7 +213,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
                }
        }
 
-       ppsc->b_swrf_processing = false;
+       ppsc->swrf_processing = false;
 }
 
 void rtl_ips_nic_off_wq_callback(void *data)
@@ -239,13 +239,13 @@ void rtl_ips_nic_off_wq_callback(void *data)
        if (rtlpriv->sec.being_setkey)
                return;
 
-       if (ppsc->b_inactiveps) {
+       if (ppsc->inactiveps) {
                rtstate = ppsc->rfpwr_state;
 
                /*
                 *Do not enter IPS in the following conditions:
                 *(1) RF is already OFF or Sleep
-                *(2) b_swrf_processing (indicates the IPS is still under going)
+                *(2) swrf_processing (indicates the IPS is still under going)
                 *(3) Connectted (only disconnected can trigger IPS)
                 *(4) IBSS (send Beacon)
                 *(5) AP mode (send Beacon)
@@ -253,14 +253,14 @@ void rtl_ips_nic_off_wq_callback(void *data)
                 */
 
                if (rtstate == ERFON &&
-                   !ppsc->b_swrf_processing &&
+                   !ppsc->swrf_processing &&
                    (mac->link_state == MAC80211_NOLINK) &&
                    !mac->act_scanning) {
                        RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
                                 ("IPSEnter(): Turn off RF.\n"));
 
                        ppsc->inactive_pwrstate = ERFOFF;
-                       ppsc->b_in_powersavemode = true;
+                       ppsc->in_powersavemode = true;
 
                        /*rtl_pci_reset_trx_ring(hw); */
                        _rtl_ps_inactive_ps(hw);
@@ -290,15 +290,15 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
 
        spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
 
-       if (ppsc->b_inactiveps) {
+       if (ppsc->inactiveps) {
                rtstate = ppsc->rfpwr_state;
 
                if (rtstate != ERFON &&
-                   !ppsc->b_swrf_processing &&
+                   !ppsc->swrf_processing &&
                    ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
 
                        ppsc->inactive_pwrstate = ERFON;
-                       ppsc->b_in_powersavemode = false;
+                       ppsc->in_powersavemode = false;
 
                        _rtl_ps_inactive_ps(hw);
                }
@@ -370,9 +370,9 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
         *   mode and set RPWM to turn RF on.
         */
 
-       if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) &&
+       if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) &&
             ppsc->report_linked) {
-               bool b_fw_current_inps;
+               bool fw_current_inps;
                if (ppsc->dot11_psmode == EACTIVE) {
                        RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                                 ("FW LPS leave ps_mode:%x\n",
@@ -385,11 +385,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_H2C_FW_PWRMODE,
                                        (u8 *) (&fw_pwrmode));
-                       b_fw_current_inps = false;
+                       fw_current_inps = false;
 
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_FW_PSMODE_STATUS,
-                                       (u8 *) (&b_fw_current_inps));
+                                       (u8 *) (&fw_current_inps));
 
                } else {
                        if (rtl_get_fwlps_doze(hw)) {
@@ -398,10 +398,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
                                                 ppsc->fwctrl_psmode));
 
                                rpwm_val = 0x02;        /* RF off */
-                               b_fw_current_inps = true;
+                               fw_current_inps = true;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_FW_PSMODE_STATUS,
-                                               (u8 *) (&b_fw_current_inps));
+                                               (u8 *) (&fw_current_inps));
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_H2C_FW_PWRMODE,
                                                (u8 *) (&ppsc->fwctrl_psmode));
@@ -425,13 +425,13 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        unsigned long flag;
 
-       if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps))
+       if (!(ppsc->fwctrl_lps && ppsc->leisure_ps))
                return;
 
        if (rtlpriv->sec.being_setkey)
                return;
 
-       if (rtlpriv->link_info.b_busytraffic)
+       if (rtlpriv->link_info.busytraffic)
                return;
 
        /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
@@ -446,7 +446,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
 
        spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
 
-       if (ppsc->b_leisure_ps) {
+       if (ppsc->leisure_ps) {
                /* Idle for a while if we connect to AP a while ago. */
                if (mac->cnt_after_linked >= 2) {
                        if (ppsc->dot11_psmode == EACTIVE) {
@@ -470,7 +470,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
 
        spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
 
-       if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) {
+       if (ppsc->fwctrl_lps && ppsc->leisure_ps) {
                if (ppsc->dot11_psmode != EACTIVE) {
 
                        /*FIX ME */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
new file mode 100644 (file)
index 0000000..aee42d7
--- /dev/null
@@ -0,0 +1,9 @@
+rtl8192c-common-objs :=                \
+               main.o          \
+               dm_common.o     \
+               fw_common.o     \
+               phy_common.o
+
+obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
new file mode 100644 (file)
index 0000000..bb02327
--- /dev/null
@@ -0,0 +1,1398 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "dm_common.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
+       0x7f8001fe,
+       0x788001e2,
+       0x71c001c7,
+       0x6b8001ae,
+       0x65400195,
+       0x5fc0017f,
+       0x5a400169,
+       0x55400155,
+       0x50800142,
+       0x4c000130,
+       0x47c0011f,
+       0x43c0010f,
+       0x40000100,
+       0x3c8000f2,
+       0x390000e4,
+       0x35c000d7,
+       0x32c000cb,
+       0x300000c0,
+       0x2d4000b5,
+       0x2ac000ab,
+       0x288000a2,
+       0x26000098,
+       0x24000090,
+       0x22000088,
+       0x20000080,
+       0x1e400079,
+       0x1c800072,
+       0x1b00006c,
+       0x19800066,
+       0x18000060,
+       0x16c0005b,
+       0x15800056,
+       0x14400051,
+       0x1300004c,
+       0x12000048,
+       0x11000044,
+       0x10000040,
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+       {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
+       {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
+       {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
+       {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
+       {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
+       {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
+       {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
+       {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
+       {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
+       {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
+       {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
+       {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
+       {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
+       {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
+       {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
+       {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
+       {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
+       {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
+       {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
+       {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+       {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+       {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
+       {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
+       {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
+       {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
+       {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
+       {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
+       {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
+       {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
+       {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
+       {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
+       {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
+       {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+       {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
+       {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
+       {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
+       {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
+       {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
+       {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
+       {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
+       {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
+       {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
+       {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
+       {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
+       {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
+       {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
+       {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
+       {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
+       {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
+       {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
+       {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
+       {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
+       {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+       {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+       {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
+       {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
+       {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+       {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+       {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
+       {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+       {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+       {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+       {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
+};
+
+static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
+{
+       dm_digtable.dig_enable_flag = true;
+       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+       dm_digtable.cur_igvalue = 0x20;
+       dm_digtable.pre_igvalue = 0x0;
+       dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+       dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+       dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+       dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+       dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+       dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+       dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+       dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+       dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+       dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+       dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+       dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+       dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
+       dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+}
+
+static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       long rssi_val_min = 0;
+
+       if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
+           (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
+               if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
+                       rssi_val_min =
+                           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
+                            rtlpriv->dm.undecorated_smoothed_pwdb) ?
+                           rtlpriv->dm.undecorated_smoothed_pwdb :
+                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+               else
+                       rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+       } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
+                  dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+               rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+       } else if (dm_digtable.curmultista_connectstate ==
+                  DIG_MULTISTA_CONNECT) {
+               rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+       }
+
+       return (u8) rssi_val_min;
+}
+
+static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+       u32 ret_value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+       falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
+
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+       falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
+       falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
+
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+       falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+       falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+           falsealm_cnt->cnt_rate_illegal +
+           falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+
+       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
+       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+       falsealm_cnt->cnt_cck_fail = ret_value;
+
+       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+       falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+       falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
+                                falsealm_cnt->cnt_rate_illegal +
+                                falsealm_cnt->cnt_crc8_fail +
+                                falsealm_cnt->cnt_mcs_fail +
+                                falsealm_cnt->cnt_cck_fail);
+
+       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+                 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+                 falsealm_cnt->cnt_parity_fail,
+                 falsealm_cnt->cnt_rate_illegal,
+                 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+                 falsealm_cnt->cnt_ofdm_fail,
+                 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value_igi = dm_digtable.cur_igvalue;
+
+       if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+               value_igi--;
+       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
+               value_igi += 0;
+       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
+               value_igi++;
+       else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
+               value_igi += 2;
+       if (value_igi > DM_DIG_FA_UPPER)
+               value_igi = DM_DIG_FA_UPPER;
+       else if (value_igi < DM_DIG_FA_LOWER)
+               value_igi = DM_DIG_FA_LOWER;
+       if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+               value_igi = 0x32;
+
+       dm_digtable.cur_igvalue = value_igi;
+       rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
+               if ((dm_digtable.backoff_val - 2) <
+                   dm_digtable.backoff_val_range_min)
+                       dm_digtable.backoff_val =
+                           dm_digtable.backoff_val_range_min;
+               else
+                       dm_digtable.backoff_val -= 2;
+       } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
+               if ((dm_digtable.backoff_val + 2) >
+                   dm_digtable.backoff_val_range_max)
+                       dm_digtable.backoff_val =
+                           dm_digtable.backoff_val_range_max;
+               else
+                       dm_digtable.backoff_val += 2;
+       }
+
+       if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
+           dm_digtable.rx_gain_range_max)
+               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
+       else if ((dm_digtable.rssi_val_min + 10 -
+                 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
+               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
+       else
+               dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
+                   dm_digtable.backoff_val;
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("rssi_val_min = %x backoff_val %x\n",
+                 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
+
+       rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
+{
+       static u8 binitialized; /* initialized to false */
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+       bool multi_sta = false;
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC)
+               multi_sta = true;
+
+       if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
+                                    DIG_STA_DISCONNECT)) {
+               binitialized = false;
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+               return;
+       } else if (binitialized == false) {
+               binitialized = true;
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+               dm_digtable.cur_igvalue = 0x20;
+               rtl92c_dm_write_dig(hw);
+       }
+
+       if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+               if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
+                   (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
+
+                       if (dm_digtable.dig_ext_port_stage ==
+                           DIG_EXT_PORT_STAGE_2) {
+                               dm_digtable.cur_igvalue = 0x20;
+                               rtl92c_dm_write_dig(hw);
+                       }
+
+                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
+               } else if (rssi_strength > dm_digtable.rssi_highthresh) {
+                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
+                       rtl92c_dm_ctrl_initgain_by_fa(hw);
+               }
+       } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+               dm_digtable.cur_igvalue = 0x20;
+               rtl92c_dm_write_dig(hw);
+       }
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("curmultista_connectstate = "
+                 "%x dig_ext_port_stage %x\n",
+                 dm_digtable.curmultista_connectstate,
+                 dm_digtable.dig_ext_port_stage));
+}
+
+static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("presta_connectstate = %x,"
+                 " cursta_connectctate = %x\n",
+                 dm_digtable.presta_connectstate,
+                 dm_digtable.cursta_connectctate));
+
+       if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
+           || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
+           || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+
+               if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
+                       dm_digtable.rssi_val_min =
+                           rtl92c_dm_initial_gain_min_pwdb(hw);
+                       rtl92c_dm_ctrl_initgain_by_rssi(hw);
+               }
+       } else {
+               dm_digtable.rssi_val_min = 0;
+               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+               dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+               dm_digtable.cur_igvalue = 0x20;
+               dm_digtable.pre_igvalue = 0;
+               rtl92c_dm_write_dig(hw);
+       }
+}
+
+static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+               dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+
+               if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+                       if (dm_digtable.rssi_val_min <= 25)
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_LowRssi;
+                       else
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_HighRssi;
+               } else {
+                       if (dm_digtable.rssi_val_min <= 20)
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_LowRssi;
+                       else
+                               dm_digtable.cur_cck_pd_state =
+                                   CCK_PD_STAGE_HighRssi;
+               }
+       } else {
+               dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+       }
+
+       if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
+               if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+                       if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
+                               dm_digtable.cur_cck_fa_state =
+                                   CCK_FA_STAGE_High;
+                       else
+                               dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
+
+                       if (dm_digtable.pre_cck_fa_state !=
+                           dm_digtable.cur_cck_fa_state) {
+                               if (dm_digtable.cur_cck_fa_state ==
+                                   CCK_FA_STAGE_Low)
+                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+                                                     0x83);
+                               else
+                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+                                                     0xcd);
+
+                               dm_digtable.pre_cck_fa_state =
+                                   dm_digtable.cur_cck_fa_state;
+                       }
+
+                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
+
+                       if (IS_92C_SERIAL(rtlhal->version))
+                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+                                             MASKBYTE2, 0xd7);
+               } else {
+                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+
+                       if (IS_92C_SERIAL(rtlhal->version))
+                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+                                             MASKBYTE2, 0xd3);
+               }
+               dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
+       }
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       if (mac->act_scanning == true)
+               return;
+
+       if ((mac->link_state > MAC80211_NOLINK) &&
+           (mac->link_state < MAC80211_LINKED))
+               dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
+       else if (mac->link_state >= MAC80211_LINKED)
+               dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
+       else
+               dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+
+       rtl92c_dm_initial_gain_sta(hw);
+       rtl92c_dm_initial_gain_multi_sta(hw);
+       rtl92c_dm_cck_packet_detection_thresh(hw);
+
+       dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
+
+}
+
+static void rtl92c_dm_dig(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->dm.dm_initialgain_enable == false)
+               return;
+       if (dm_digtable.dig_enable_flag == false)
+               return;
+
+       rtl92c_dm_ctrl_initgain_by_twoport(hw);
+
+}
+
+static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dynamic_txpower_enable = false;
+
+       rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+       rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+                ("cur_igvalue = 0x%x, "
+                 "pre_igvalue = 0x%x, backoff_val = %d\n",
+                 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
+                 dm_digtable.backoff_val));
+
+       if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
+               rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+                             dm_digtable.cur_igvalue);
+               rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+                             dm_digtable.cur_igvalue);
+
+               dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+       }
+}
+EXPORT_SYMBOL(rtl92c_dm_write_dig);
+
+static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+       u8 h2c_parameter[3] = { 0 };
+
+       return;
+
+       if (tmpentry_max_pwdb != 0) {
+               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
+                   tmpentry_max_pwdb;
+       } else {
+               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+       }
+
+       if (tmpentry_min_pwdb != 0xff) {
+               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
+                   tmpentry_min_pwdb;
+       } else {
+               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+       }
+
+       h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+       h2c_parameter[0] = 0;
+
+       rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+}
+
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       rtlpriv->dm.current_turbo_edca = false;
+       rtlpriv->dm.is_any_nonbepkts = false;
+       rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
+
+static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       static u64 last_txok_cnt;
+       static u64 last_rxok_cnt;
+       u64 cur_txok_cnt;
+       u64 cur_rxok_cnt;
+       u32 edca_be_ul = 0x5ea42b;
+       u32 edca_be_dl = 0x5ea42b;
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC)
+               goto dm_checkedcaturbo_exit;
+
+       if (mac->link_state != MAC80211_LINKED) {
+               rtlpriv->dm.current_turbo_edca = false;
+               return;
+       }
+
+       if (!mac->ht_enable) {  /*FIX MERGE */
+               if (!(edca_be_ul & 0xffff0000))
+                       edca_be_ul |= 0x005e0000;
+
+               if (!(edca_be_dl & 0xffff0000))
+                       edca_be_dl |= 0x005e0000;
+       }
+
+       if ((!rtlpriv->dm.is_any_nonbepkts) &&
+           (!rtlpriv->dm.disable_framebursting)) {
+               cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+               cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+               if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+                       if (!rtlpriv->dm.is_cur_rdlstate ||
+                           !rtlpriv->dm.current_turbo_edca) {
+                               rtl_write_dword(rtlpriv,
+                                               REG_EDCA_BE_PARAM,
+                                               edca_be_dl);
+                               rtlpriv->dm.is_cur_rdlstate = true;
+                       }
+               } else {
+                       if (rtlpriv->dm.is_cur_rdlstate ||
+                           !rtlpriv->dm.current_turbo_edca) {
+                               rtl_write_dword(rtlpriv,
+                                               REG_EDCA_BE_PARAM,
+                                               edca_be_ul);
+                               rtlpriv->dm.is_cur_rdlstate = false;
+                       }
+               }
+               rtlpriv->dm.current_turbo_edca = true;
+       } else {
+               if (rtlpriv->dm.current_turbo_edca) {
+                       u8 tmp = AC0_BE;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_AC_PARAM,
+                                                     (u8 *) (&tmp));
+                       rtlpriv->dm.current_turbo_edca = false;
+               }
+       }
+
+dm_checkedcaturbo_exit:
+       rtlpriv->dm.is_any_nonbepkts = false;
+       last_txok_cnt = rtlpriv->stats.txbytesunicast;
+       last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
+                                                            *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 thermalvalue, delta, delta_lck, delta_iqk;
+       long ele_a, ele_d, temp_cck, val_x, value32;
+       long val_y, ele_c;
+       u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
+       int i;
+       bool is2t = IS_92C_SERIAL(rtlhal->version);
+       u8 txpwr_level[2] = {0, 0};
+       u8 ofdm_min_index = 6, rf;
+
+       rtlpriv->dm.txpower_trackingInit = true;
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
+
+       thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+                 "eeprom_thermalmeter 0x%x\n",
+                 thermalvalue, rtlpriv->dm.thermalvalue,
+                 rtlefuse->eeprom_thermalmeter));
+
+       rtl92c_phy_ap_calibrate(hw, (thermalvalue -
+                                    rtlefuse->eeprom_thermalmeter));
+       if (is2t)
+               rf = 2;
+       else
+               rf = 1;
+
+       if (thermalvalue) {
+               ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                     MASKDWORD) & MASKOFDM_D;
+
+               for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+                       if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+                               ofdm_index_old[0] = (u8) i;
+
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                       ("Initial pathA ele_d reg0x%x = 0x%lx, "
+                                        "ofdm_index=0x%x\n",
+                                        ROFDM0_XATXIQIMBALANCE,
+                                        ele_d, ofdm_index_old[0]));
+                               break;
+                       }
+               }
+
+               if (is2t) {
+                       ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+                                             MASKDWORD) & MASKOFDM_D;
+
+                       for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+                               if (ele_d == (ofdmswing_table[i] &
+                                   MASKOFDM_D)) {
+                                       ofdm_index_old[1] = (u8) i;
+
+                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+                                          DBG_LOUD,
+                                          ("Initial pathB ele_d reg0x%x = "
+                                          "0x%lx, ofdm_index=0x%x\n",
+                                          ROFDM0_XBTXIQIMBALANCE, ele_d,
+                                          ofdm_index_old[1]));
+                                       break;
+                               }
+                       }
+               }
+
+               temp_cck =
+                   rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
+
+               for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+                       if (rtlpriv->dm.cck_inch14) {
+                               if (memcmp((void *)&temp_cck,
+                                          (void *)&cckswing_table_ch14[i][2],
+                                          4) == 0) {
+                                       cck_index_old = (u8) i;
+
+                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+                                                DBG_LOUD,
+                                                ("Initial reg0x%x = 0x%lx, "
+                                                 "cck_index=0x%x, ch 14 %d\n",
+                                                 RCCK0_TXFILTER2, temp_cck,
+                                                 cck_index_old,
+                                                 rtlpriv->dm.cck_inch14));
+                                       break;
+                               }
+                       } else {
+                               if (memcmp((void *)&temp_cck,
+                                          (void *)
+                                          &cckswing_table_ch1ch13[i][2],
+                                          4) == 0) {
+                                       cck_index_old = (u8) i;
+
+                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+                                                DBG_LOUD,
+                                                ("Initial reg0x%x = 0x%lx, "
+                                                 "cck_index=0x%x, ch14 %d\n",
+                                                 RCCK0_TXFILTER2, temp_cck,
+                                                 cck_index_old,
+                                                 rtlpriv->dm.cck_inch14));
+                                       break;
+                               }
+                       }
+               }
+
+               if (!rtlpriv->dm.thermalvalue) {
+                       rtlpriv->dm.thermalvalue =
+                           rtlefuse->eeprom_thermalmeter;
+                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
+                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+                       for (i = 0; i < rf; i++)
+                               rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+                       rtlpriv->dm.cck_index = cck_index_old;
+               }
+
+               delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue) :
+                   (rtlpriv->dm.thermalvalue - thermalvalue);
+
+               delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+                   (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+
+               delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+                   (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                       ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+                        "eeprom_thermalmeter 0x%x delta 0x%x "
+                        "delta_lck 0x%x delta_iqk 0x%x\n",
+                        thermalvalue, rtlpriv->dm.thermalvalue,
+                        rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+                        delta_iqk));
+
+               if (delta_lck > 1) {
+                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
+                       rtl92c_phy_lc_calibrate(hw);
+               }
+
+               if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+                       if (thermalvalue > rtlpriv->dm.thermalvalue) {
+                               for (i = 0; i < rf; i++)
+                                       rtlpriv->dm.ofdm_index[i] -= delta;
+                               rtlpriv->dm.cck_index -= delta;
+                       } else {
+                               for (i = 0; i < rf; i++)
+                                       rtlpriv->dm.ofdm_index[i] += delta;
+                               rtlpriv->dm.cck_index += delta;
+                       }
+
+                       if (is2t) {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("temp OFDM_A_index=0x%x, "
+                                         "OFDM_B_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         rtlpriv->dm.ofdm_index[0],
+                                         rtlpriv->dm.ofdm_index[1],
+                                         rtlpriv->dm.cck_index));
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("temp OFDM_A_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         rtlpriv->dm.ofdm_index[0],
+                                         rtlpriv->dm.cck_index));
+                       }
+
+                       if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+                               for (i = 0; i < rf; i++)
+                                       ofdm_index[i] =
+                                           rtlpriv->dm.ofdm_index[i]
+                                           + 1;
+                               cck_index = rtlpriv->dm.cck_index + 1;
+                       } else {
+                               for (i = 0; i < rf; i++)
+                                       ofdm_index[i] =
+                                           rtlpriv->dm.ofdm_index[i];
+                               cck_index = rtlpriv->dm.cck_index;
+                       }
+
+                       for (i = 0; i < rf; i++) {
+                               if (txpwr_level[i] >= 0 &&
+                                   txpwr_level[i] <= 26) {
+                                       if (thermalvalue >
+                                           rtlefuse->eeprom_thermalmeter) {
+                                               if (delta < 5)
+                                                       ofdm_index[i] -= 1;
+
+                                               else
+                                                       ofdm_index[i] -= 2;
+                                       } else if (delta > 5 && thermalvalue <
+                                                  rtlefuse->
+                                                  eeprom_thermalmeter) {
+                                               ofdm_index[i] += 1;
+                                       }
+                               } else if (txpwr_level[i] >= 27 &&
+                                          txpwr_level[i] <= 32
+                                          && thermalvalue >
+                                          rtlefuse->eeprom_thermalmeter) {
+                                       if (delta < 5)
+                                               ofdm_index[i] -= 1;
+
+                                       else
+                                               ofdm_index[i] -= 2;
+                               } else if (txpwr_level[i] >= 32 &&
+                                          txpwr_level[i] <= 38 &&
+                                          thermalvalue >
+                                          rtlefuse->eeprom_thermalmeter
+                                          && delta > 5) {
+                                       ofdm_index[i] -= 1;
+                               }
+                       }
+
+                       if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
+                               if (thermalvalue >
+                                   rtlefuse->eeprom_thermalmeter) {
+                                       if (delta < 5)
+                                               cck_index -= 1;
+
+                                       else
+                                               cck_index -= 2;
+                               } else if (delta > 5 && thermalvalue <
+                                          rtlefuse->eeprom_thermalmeter) {
+                                       cck_index += 1;
+                               }
+                       } else if (txpwr_level[i] >= 27 &&
+                                  txpwr_level[i] <= 32 &&
+                                  thermalvalue >
+                                  rtlefuse->eeprom_thermalmeter) {
+                               if (delta < 5)
+                                       cck_index -= 1;
+
+                               else
+                                       cck_index -= 2;
+                       } else if (txpwr_level[i] >= 32 &&
+                                  txpwr_level[i] <= 38 &&
+                                  thermalvalue > rtlefuse->eeprom_thermalmeter
+                                  && delta > 5) {
+                               cck_index -= 1;
+                       }
+
+                       for (i = 0; i < rf; i++) {
+                               if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
+                                       ofdm_index[i] = OFDM_TABLE_SIZE - 1;
+
+                               else if (ofdm_index[i] < ofdm_min_index)
+                                       ofdm_index[i] = ofdm_min_index;
+                       }
+
+                       if (cck_index > CCK_TABLE_SIZE - 1)
+                               cck_index = CCK_TABLE_SIZE - 1;
+                       else if (cck_index < 0)
+                               cck_index = 0;
+
+                       if (is2t) {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("new OFDM_A_index=0x%x, "
+                                         "OFDM_B_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         ofdm_index[0], ofdm_index[1],
+                                         cck_index));
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                                        ("new OFDM_A_index=0x%x,"
+                                         "cck_index=0x%x\n",
+                                         ofdm_index[0], cck_index));
+                       }
+               }
+
+               if (rtlpriv->dm.txpower_track_control && delta != 0) {
+                       ele_d =
+                           (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+                       val_x = rtlphy->reg_e94;
+                       val_y = rtlphy->reg_e9c;
+
+                       if (val_x != 0) {
+                               if ((val_x & 0x00000200) != 0)
+                                       val_x = val_x | 0xFFFFFC00;
+                               ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+                               if ((val_y & 0x00000200) != 0)
+                                       val_y = val_y | 0xFFFFFC00;
+                               ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+                               value32 = (ele_d << 22) |
+                                   ((ele_c & 0x3F) << 16) | ele_a;
+
+                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                             MASKDWORD, value32);
+
+                               value32 = (ele_c & 0x000003C0) >> 6;
+                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+                                             value32);
+
+                               value32 = ((val_x * ele_d) >> 7) & 0x01;
+                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                             BIT(31), value32);
+
+                               value32 = ((val_y * ele_d) >> 7) & 0x01;
+                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                             BIT(29), value32);
+                       } else {
+                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                             MASKDWORD,
+                                             ofdmswing_table[ofdm_index[0]]);
+
+                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+                                             0x00);
+                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                             BIT(31) | BIT(29), 0x00);
+                       }
+
+                       if (!rtlpriv->dm.cck_inch14) {
+                               rtl_write_byte(rtlpriv, 0xa22,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [0]);
+                               rtl_write_byte(rtlpriv, 0xa23,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [1]);
+                               rtl_write_byte(rtlpriv, 0xa24,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [2]);
+                               rtl_write_byte(rtlpriv, 0xa25,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [3]);
+                               rtl_write_byte(rtlpriv, 0xa26,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [4]);
+                               rtl_write_byte(rtlpriv, 0xa27,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [5]);
+                               rtl_write_byte(rtlpriv, 0xa28,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [6]);
+                               rtl_write_byte(rtlpriv, 0xa29,
+                                              cckswing_table_ch1ch13[cck_index]
+                                              [7]);
+                       } else {
+                               rtl_write_byte(rtlpriv, 0xa22,
+                                              cckswing_table_ch14[cck_index]
+                                              [0]);
+                               rtl_write_byte(rtlpriv, 0xa23,
+                                              cckswing_table_ch14[cck_index]
+                                              [1]);
+                               rtl_write_byte(rtlpriv, 0xa24,
+                                              cckswing_table_ch14[cck_index]
+                                              [2]);
+                               rtl_write_byte(rtlpriv, 0xa25,
+                                              cckswing_table_ch14[cck_index]
+                                              [3]);
+                               rtl_write_byte(rtlpriv, 0xa26,
+                                              cckswing_table_ch14[cck_index]
+                                              [4]);
+                               rtl_write_byte(rtlpriv, 0xa27,
+                                              cckswing_table_ch14[cck_index]
+                                              [5]);
+                               rtl_write_byte(rtlpriv, 0xa28,
+                                              cckswing_table_ch14[cck_index]
+                                              [6]);
+                               rtl_write_byte(rtlpriv, 0xa29,
+                                              cckswing_table_ch14[cck_index]
+                                              [7]);
+                       }
+
+                       if (is2t) {
+                               ele_d = (ofdmswing_table[ofdm_index[1]] &
+                                        0xFFC00000) >> 22;
+
+                               val_x = rtlphy->reg_eb4;
+                               val_y = rtlphy->reg_ebc;
+
+                               if (val_x != 0) {
+                                       if ((val_x & 0x00000200) != 0)
+                                               val_x = val_x | 0xFFFFFC00;
+                                       ele_a = ((val_x * ele_d) >> 8) &
+                                           0x000003FF;
+
+                                       if ((val_y & 0x00000200) != 0)
+                                               val_y = val_y | 0xFFFFFC00;
+                                       ele_c = ((val_y * ele_d) >> 8) &
+                                           0x00003FF;
+
+                                       value32 = (ele_d << 22) |
+                                           ((ele_c & 0x3F) << 16) | ele_a;
+                                       rtl_set_bbreg(hw,
+                                                     ROFDM0_XBTXIQIMBALANCE,
+                                                     MASKDWORD, value32);
+
+                                       value32 = (ele_c & 0x000003C0) >> 6;
+                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+                                                     MASKH4BITS, value32);
+
+                                       value32 = ((val_x * ele_d) >> 7) & 0x01;
+                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                                     BIT(27), value32);
+
+                                       value32 = ((val_y * ele_d) >> 7) & 0x01;
+                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                                     BIT(25), value32);
+                               } else {
+                                       rtl_set_bbreg(hw,
+                                                     ROFDM0_XBTXIQIMBALANCE,
+                                                     MASKDWORD,
+                                                     ofdmswing_table[ofdm_index
+                                                                     [1]]);
+                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+                                                     MASKH4BITS, 0x00);
+                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+                                                     BIT(27) | BIT(25), 0x00);
+                               }
+
+                       }
+               }
+
+               if (delta_iqk > 3) {
+                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+                       rtl92c_phy_iq_calibrate(hw, false);
+               }
+
+               if (rtlpriv->dm.txpower_track_control)
+                       rtlpriv->dm.thermalvalue = thermalvalue;
+       }
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+
+}
+
+static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
+                                               struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.txpower_tracking = true;
+       rtlpriv->dm.txpower_trackingInit = false;
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                ("pMgntInfo->txpower_tracking = %d\n",
+                 rtlpriv->dm.txpower_tracking));
+}
+
+static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+       rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
+}
+
+static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
+{
+       rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
+}
+
+static void rtl92c_dm_check_txpower_tracking_thermal_meter(
+                                               struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       static u8 tm_trigger;
+
+       if (!rtlpriv->dm.txpower_tracking)
+               return;
+
+       if (!tm_trigger) {
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
+                             0x60);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        ("Trigger 92S Thermal Meter!!\n"));
+               tm_trigger = 1;
+               return;
+       } else {
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        ("Schedule TxPowerTracking direct call!!\n"));
+               rtl92c_dm_txpower_tracking_directcall(hw);
+               tm_trigger = 0;
+       }
+}
+
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+       rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
+
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+       p_ra->ratr_state = DM_RATR_STA_INIT;
+       p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+       if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+               rtlpriv->dm.useramask = true;
+       else
+               rtlpriv->dm.useramask = false;
+
+}
+EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
+
+static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rate_adaptive *p_ra = &(rtlpriv->ra);
+       u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+
+       if (is_hal_stop(rtlhal)) {
+               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                        ("<---- driver is going to unload\n"));
+               return;
+       }
+
+       if (!rtlpriv->dm.useramask) {
+               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                       ("<---- driver does not control rate adaptive mask\n"));
+               return;
+       }
+
+       if (mac->link_state == MAC80211_LINKED) {
+
+               switch (p_ra->pre_ratr_state) {
+               case DM_RATR_STA_HIGH:
+                       high_rssithresh_for_ra = 50;
+                       low_rssithresh_for_ra = 20;
+                       break;
+               case DM_RATR_STA_MIDDLE:
+                       high_rssithresh_for_ra = 55;
+                       low_rssithresh_for_ra = 20;
+                       break;
+               case DM_RATR_STA_LOW:
+                       high_rssithresh_for_ra = 50;
+                       low_rssithresh_for_ra = 25;
+                       break;
+               default:
+                       high_rssithresh_for_ra = 50;
+                       low_rssithresh_for_ra = 20;
+                       break;
+               }
+
+               if (rtlpriv->dm.undecorated_smoothed_pwdb >
+                   (long)high_rssithresh_for_ra)
+                       p_ra->ratr_state = DM_RATR_STA_HIGH;
+               else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+                        (long)low_rssithresh_for_ra)
+                       p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+               else
+                       p_ra->ratr_state = DM_RATR_STA_LOW;
+
+               if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                ("RSSI = %ld\n",
+                                 rtlpriv->dm.undecorated_smoothed_pwdb));
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                ("PreState = %d, CurState = %d\n",
+                                 p_ra->pre_ratr_state, p_ra->ratr_state));
+
+                       rtlpriv->cfg->ops->update_rate_mask(hw,
+                                       p_ra->ratr_state);
+
+                       p_ra->pre_ratr_state = p_ra->ratr_state;
+               }
+       }
+}
+
+static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+       dm_pstable.pre_ccastate = CCA_MAX;
+       dm_pstable.cur_ccasate = CCA_MAX;
+       dm_pstable.pre_rfstate = RF_MAX;
+       dm_pstable.cur_rfstate = RF_MAX;
+       dm_pstable.rssi_val_min = 0;
+}
+
+static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (dm_pstable.rssi_val_min != 0) {
+               if (dm_pstable.pre_ccastate == CCA_2R) {
+                       if (dm_pstable.rssi_val_min >= 35)
+                               dm_pstable.cur_ccasate = CCA_1R;
+                       else
+                               dm_pstable.cur_ccasate = CCA_2R;
+               } else {
+                       if (dm_pstable.rssi_val_min <= 30)
+                               dm_pstable.cur_ccasate = CCA_2R;
+                       else
+                               dm_pstable.cur_ccasate = CCA_1R;
+               }
+       } else {
+               dm_pstable.cur_ccasate = CCA_MAX;
+       }
+
+       if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
+               if (dm_pstable.cur_ccasate == CCA_1R) {
+                       if (get_rf_type(rtlphy) == RF_2T2R) {
+                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+                                             MASKBYTE0, 0x13);
+                               rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
+                       } else {
+                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+                                             MASKBYTE0, 0x23);
+                               rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
+                       }
+               } else {
+                       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
+                                     0x33);
+                       rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
+               }
+               dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
+       }
+
+       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
+                                              (dm_pstable.cur_ccasate ==
+                                               0) ? "1RCCA" : "2RCCA"));
+}
+
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
+{
+       static u8 initialize;
+       static u32 reg_874, reg_c70, reg_85c, reg_a74;
+
+       if (initialize == 0) {
+               reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                        MASKDWORD) & 0x1CC000) >> 14;
+
+               reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+                                        MASKDWORD) & BIT(3)) >> 3;
+
+               reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+                                        MASKDWORD) & 0xFF000000) >> 24;
+
+               reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+
+               initialize = 1;
+       }
+
+       if (!bforce_in_normal) {
+               if (dm_pstable.rssi_val_min != 0) {
+                       if (dm_pstable.pre_rfstate == RF_NORMAL) {
+                               if (dm_pstable.rssi_val_min >= 30)
+                                       dm_pstable.cur_rfstate = RF_SAVE;
+                               else
+                                       dm_pstable.cur_rfstate = RF_NORMAL;
+                       } else {
+                               if (dm_pstable.rssi_val_min <= 25)
+                                       dm_pstable.cur_rfstate = RF_NORMAL;
+                               else
+                                       dm_pstable.cur_rfstate = RF_SAVE;
+                       }
+               } else {
+                       dm_pstable.cur_rfstate = RF_MAX;
+               }
+       } else {
+               dm_pstable.cur_rfstate = RF_NORMAL;
+       }
+
+       if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
+               if (dm_pstable.cur_rfstate == RF_SAVE) {
+                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                     0x1C0000, 0x2);
+                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
+                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+                                     0xFF000000, 0x63);
+                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                     0xC000, 0x2);
+                       rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
+                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
+               } else {
+                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+                                     0x1CC000, reg_874);
+                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
+                                     reg_c70);
+                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
+                                     reg_85c);
+                       rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+               }
+
+               dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
+       }
+}
+EXPORT_SYMBOL(rtl92c_dm_rf_saving);
+
+static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (((mac->link_state == MAC80211_NOLINK)) &&
+           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+               dm_pstable.rssi_val_min = 0;
+               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                        ("Not connected to any\n"));
+       }
+
+       if (mac->link_state == MAC80211_LINKED) {
+               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                       dm_pstable.rssi_val_min =
+                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                                ("AP Client PWDB = 0x%lx\n",
+                                 dm_pstable.rssi_val_min));
+               } else {
+                       dm_pstable.rssi_val_min =
+                           rtlpriv->dm.undecorated_smoothed_pwdb;
+                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                                ("STA Default Port PWDB = 0x%lx\n",
+                                 dm_pstable.rssi_val_min));
+               }
+       } else {
+               dm_pstable.rssi_val_min =
+                   rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+                        ("AP Ext Port PWDB = 0x%lx\n",
+                         dm_pstable.rssi_val_min));
+       }
+
+       if (IS_92C_SERIAL(rtlhal->version))
+               rtl92c_dm_1r_cca(hw);
+}
+
+void rtl92c_dm_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+       rtl92c_dm_diginit(hw);
+       rtl92c_dm_init_dynamic_txpower(hw);
+       rtl92c_dm_init_edca_turbo(hw);
+       rtl92c_dm_init_rate_adaptive_mask(hw);
+       rtl92c_dm_initialize_txpower_tracking(hw);
+       rtl92c_dm_init_dynamic_bb_powersaving(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_init);
+
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool fw_current_inpsmode = false;
+       bool fw_ps_awake = true;
+
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                     (u8 *) (&fw_current_inpsmode));
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+                                     (u8 *) (&fw_ps_awake));
+
+       if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
+                                            fw_ps_awake)
+           && (!ppsc->rfchange_inprogress)) {
+               rtl92c_dm_pwdb_monitor(hw);
+               rtl92c_dm_dig(hw);
+               rtl92c_dm_false_alarm_counter_statistics(hw);
+               rtl92c_dm_dynamic_bb_powersaving(hw);
+               rtlpriv->cfg->ops->dm_dynamic_txpower(hw);
+               rtl92c_dm_check_txpower_tracking(hw);
+               rtl92c_dm_refresh_rate_adaptive_mask(hw);
+               rtl92c_dm_check_edca_turbo(hw);
+
+       }
+}
+EXPORT_SYMBOL(rtl92c_dm_watchdog);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
new file mode 100644 (file)
index 0000000..b9cbb0a
--- /dev/null
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef        __RTL92COMMON_DM_H__
+#define __RTL92COMMON_DM_H__
+
+#include "../wifi.h"
+#include "../rtl8192ce/def.h"
+#include "../rtl8192ce/reg.h"
+#include "fw_common.h"
+
+#define HAL_DM_DIG_DISABLE                     BIT(0)
+#define HAL_DM_HIPWR_DISABLE                   BIT(1)
+
+#define OFDM_TABLE_LENGTH                      37
+#define CCK_TABLE_LENGTH                       33
+
+#define OFDM_TABLE_SIZE                                37
+#define CCK_TABLE_SIZE                         33
+
+#define BW_AUTO_SWITCH_HIGH_LOW                        25
+#define BW_AUTO_SWITCH_LOW_HIGH                        30
+
+#define DM_DIG_THRESH_HIGH                     40
+#define DM_DIG_THRESH_LOW                      35
+
+#define DM_FALSEALARM_THRESH_LOW               400
+#define DM_FALSEALARM_THRESH_HIGH              1000
+
+#define DM_DIG_MAX                             0x3e
+#define DM_DIG_MIN                             0x1e
+
+#define DM_DIG_FA_UPPER                                0x32
+#define DM_DIG_FA_LOWER                                0x20
+#define DM_DIG_FA_TH0                          0x20
+#define DM_DIG_FA_TH1                          0x100
+#define DM_DIG_FA_TH2                          0x200
+
+#define DM_DIG_BACKOFF_MAX                     12
+#define DM_DIG_BACKOFF_MIN                     -4
+#define DM_DIG_BACKOFF_DEFAULT                 10
+
+#define RXPATHSELECTION_SS_TH_lOW              30
+#define RXPATHSELECTION_DIFF_TH                        18
+
+#define DM_RATR_STA_INIT                       0
+#define DM_RATR_STA_HIGH                       1
+#define DM_RATR_STA_MIDDLE                     2
+#define DM_RATR_STA_LOW                                3
+
+#define CTS2SELF_THVAL                         30
+#define REGC38_TH                              20
+
+#define WAIOTTHVal                             25
+
+#define TXHIGHPWRLEVEL_NORMAL                  0
+#define TXHIGHPWRLEVEL_LEVEL1                  1
+#define TXHIGHPWRLEVEL_LEVEL2                  2
+#define TXHIGHPWRLEVEL_BT1                     3
+#define TXHIGHPWRLEVEL_BT2                     4
+
+#define DM_TYPE_BYFW                           0
+#define DM_TYPE_BYDRIVER                       1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2                74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1                67
+
+struct ps_t {
+       u8 pre_ccastate;
+       u8 cur_ccasate;
+       u8 pre_rfstate;
+       u8 cur_rfstate;
+       long rssi_val_min;
+};
+
+struct dig_t {
+       u8 dig_enable_flag;
+       u8 dig_ext_port_stage;
+       u32 rssi_lowthresh;
+       u32 rssi_highthresh;
+       u32 fa_lowthresh;
+       u32 fa_highthresh;
+       u8 cursta_connectctate;
+       u8 presta_connectstate;
+       u8 curmultista_connectstate;
+       u8 pre_igvalue;
+       u8 cur_igvalue;
+       char backoff_val;
+       char backoff_val_range_max;
+       char backoff_val_range_min;
+       u8 rx_gain_range_max;
+       u8 rx_gain_range_min;
+       u8 rssi_val_min;
+       u8 pre_cck_pd_state;
+       u8 cur_cck_pd_state;
+       u8 pre_cck_fa_state;
+       u8 cur_cck_fa_state;
+       u8 pre_ccastate;
+       u8 cur_ccasate;
+};
+
+struct swat_t {
+       u8 failure_cnt;
+       u8 try_flag;
+       u8 stop_trying;
+       long pre_rssi;
+       long trying_threshold;
+       u8 cur_antenna;
+       u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+       DIG_TYPE_THRESH_HIGH = 0,
+       DIG_TYPE_THRESH_LOW = 1,
+       DIG_TYPE_BACKOFF = 2,
+       DIG_TYPE_RX_GAIN_MIN = 3,
+       DIG_TYPE_RX_GAIN_MAX = 4,
+       DIG_TYPE_ENABLE = 5,
+       DIG_TYPE_DISABLE = 6,
+       DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+       CCK_PD_STAGE_LowRssi = 0,
+       CCK_PD_STAGE_HighRssi = 1,
+       CCK_FA_STAGE_Low = 2,
+       CCK_FA_STAGE_High = 3,
+       CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+       CCA_1R = 0,
+       CCA_2R = 1,
+       CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+       RF_SAVE = 0,
+       RF_NORMAL = 1,
+       RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+       ANS_ANTENNA_B = 1,
+       ANS_ANTENNA_A = 2,
+       ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+       DIG_EXT_PORT_STAGE_0 = 0,
+       DIG_EXT_PORT_STAGE_1 = 1,
+       DIG_EXT_PORT_STAGE_2 = 2,
+       DIG_EXT_PORT_STAGE_3 = 3,
+       DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+       DIG_STA_DISCONNECT = 0,
+       DIG_STA_CONNECT = 1,
+       DIG_STA_BEFORE_CONNECT = 2,
+       DIG_MULTISTA_DISCONNECT = 3,
+       DIG_MULTISTA_CONNECT = 4,
+       DIG_CONNECT_MAX
+};
+
+extern struct dig_t dm_digtable;
+void rtl92c_dm_init(struct ieee80211_hw *hw);
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
new file mode 100644 (file)
index 0000000..5ef9137
--- /dev/null
@@ -0,0 +1,774 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/firmware.h>
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "fw_common.h"
+
+static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) {
+               u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+               if (enable)
+                       value32 |= MCUFWDL_EN;
+               else
+                       value32 &= ~MCUFWDL_EN;
+               rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+       } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) {
+               u8 tmp;
+               if (enable) {
+
+                       tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+                       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1,
+                                      tmp | 0x04);
+
+                       tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+                       rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+
+                       tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+                       rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+               } else {
+
+                       tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+                       rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+
+                       rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+               }
+       }
+}
+
+static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
+                                  const u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 blockSize = sizeof(u32);
+       u8 *bufferPtr = (u8 *) buffer;
+       u32 *pu4BytePtr = (u32 *) buffer;
+       u32 i, offset, blockCount, remainSize;
+
+       blockCount = size / blockSize;
+       remainSize = size % blockSize;
+
+       for (i = 0; i < blockCount; i++) {
+               offset = i * blockSize;
+               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
+                               *(pu4BytePtr + i));
+       }
+
+       if (remainSize) {
+               offset = blockCount * blockSize;
+               bufferPtr += offset;
+               for (i = 0; i < remainSize; i++) {
+                       rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
+                                                offset + i), *(bufferPtr + i));
+               }
+       }
+}
+
+static void _rtl92c_fw_page_write(struct ieee80211_hw *hw,
+                                 u32 page, const u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value8;
+       u8 u8page = (u8) (page & 0x07);
+
+       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+       _rtl92c_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+       u32 fwlen = *pfwlen;
+       u8 remain = (u8) (fwlen % 4);
+
+       remain = (remain == 0) ? 0 : (4 - remain);
+
+       while (remain > 0) {
+               pfwbuf[fwlen] = 0;
+               fwlen++;
+               remain--;
+       }
+
+       *pfwlen = fwlen;
+}
+
+static void _rtl92c_write_fw(struct ieee80211_hw *hw,
+                            enum version_8192c version, u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 *bufferPtr = (u8 *) buffer;
+
+       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
+
+       if (IS_CHIP_VER_B(version)) {
+               u32 pageNums, remainSize;
+               u32 page, offset;
+
+               if (IS_HARDWARE_TYPE_8192CE(rtlhal))
+                       _rtl92c_fill_dummy(bufferPtr, &size);
+
+               pageNums = size / FW_8192C_PAGE_SIZE;
+               remainSize = size % FW_8192C_PAGE_SIZE;
+
+               if (pageNums > 4) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("Page numbers should not greater then 4\n"));
+               }
+
+               for (page = 0; page < pageNums; page++) {
+                       offset = page * FW_8192C_PAGE_SIZE;
+                       _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
+                                             FW_8192C_PAGE_SIZE);
+               }
+
+               if (remainSize) {
+                       offset = pageNums * FW_8192C_PAGE_SIZE;
+                       page = pageNums;
+                       _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
+                                             remainSize);
+               }
+       } else {
+               _rtl92c_fw_block_write(hw, buffer, size);
+       }
+}
+
+static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int err = -EIO;
+       u32 counter = 0;
+       u32 value32;
+
+       do {
+               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+       } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
+                (!(value32 & FWDL_ChkSum_rpt)));
+
+       if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+                         value32));
+               goto exit;
+       }
+
+       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+                ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+
+       value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+       value32 |= MCUFWDL_RDY;
+       value32 &= ~WINTINI_RDY;
+       rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+       counter = 0;
+
+       do {
+               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+               if (value32 & WINTINI_RDY) {
+                       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+                                ("Polling FW ready success!!"
+                                " REG_MCUFWDL:0x%08x .\n",
+                                value32));
+                       err = 0;
+                       goto exit;
+               }
+
+               mdelay(FW_8192C_POLLING_DELAY);
+
+       } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
+
+       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
+
+exit:
+       return err;
+}
+
+int rtl92c_download_fw(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl92c_firmware_header *pfwheader;
+       u8 *pfwdata;
+       u32 fwsize;
+       int err;
+       enum version_8192c version = rtlhal->version;
+       const struct firmware *firmware;
+
+       printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n",
+              rtlpriv->cfg->fw_name);
+       err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
+                              rtlpriv->io.dev);
+       if (err) {
+               printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
+               return 1;
+       }
+
+       if (firmware->size > 0x4000) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Firmware is too big!\n"));
+               release_firmware(firmware);
+               return 1;
+       }
+
+       memcpy(rtlhal->pfirmware, firmware->data, firmware->size);
+       fwsize = firmware->size;
+       release_firmware(firmware);
+
+       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+       pfwdata = (u8 *) rtlhal->pfirmware;
+
+       if (IS_FW_HEADER_EXIST(pfwheader)) {
+               RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+                        ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
+                         pfwheader->version, pfwheader->signature,
+                         (uint)sizeof(struct rtl92c_firmware_header)));
+
+               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
+               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+       }
+
+       _rtl92c_enable_fw_download(hw, true);
+       _rtl92c_write_fw(hw, version, pfwdata, fwsize);
+       _rtl92c_enable_fw_download(hw, false);
+
+       err = _rtl92c_fw_free_to_go(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Firmware is not ready to run!\n"));
+       } else {
+               RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+                        ("Firmware is ready to run!\n"));
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(rtl92c_download_fw);
+
+static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 val_hmetfr, val_mcutst_1;
+       bool result = false;
+
+       val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+       val_mcutst_1 = rtl_read_byte(rtlpriv, (REG_MCUTST_1 + boxnum));
+
+       if (((val_hmetfr >> boxnum) & BIT(0)) == 0 && val_mcutst_1 == 0)
+               result = true;
+       return result;
+}
+
+static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
+                             u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 boxnum;
+       u16 box_reg, box_extreg;
+       u8 u1b_tmp;
+       bool isfw_read = false;
+       u8 buf_index;
+       bool bwrite_sucess = false;
+       u8 wait_h2c_limmit = 100;
+       u8 wait_writeh2c_limmit = 100;
+       u8 boxcontent[4], boxextcontent[2];
+       u32 h2c_waitcounter = 0;
+       unsigned long flag;
+       u8 idx;
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
+
+       while (true) {
+               spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+               if (rtlhal->h2c_setinprogress) {
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                ("H2C set in progress! Wait to set.."
+                                 "element_id(%d).\n", element_id));
+
+                       while (rtlhal->h2c_setinprogress) {
+                               spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+                                                      flag);
+                               h2c_waitcounter++;
+                               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                        ("Wait 100 us (%d times)...\n",
+                                         h2c_waitcounter));
+                               udelay(100);
+
+                               if (h2c_waitcounter > 1000)
+                                       return;
+                               spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+                                                 flag);
+                       }
+                       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+               } else {
+                       rtlhal->h2c_setinprogress = true;
+                       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+                       break;
+               }
+       }
+
+       while (!bwrite_sucess) {
+               wait_writeh2c_limmit--;
+               if (wait_writeh2c_limmit == 0) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("Write H2C fail because no trigger "
+                                 "for FW INT!\n"));
+                       break;
+               }
+
+               boxnum = rtlhal->last_hmeboxnum;
+               switch (boxnum) {
+               case 0:
+                       box_reg = REG_HMEBOX_0;
+                       box_extreg = REG_HMEBOX_EXT_0;
+                       break;
+               case 1:
+                       box_reg = REG_HMEBOX_1;
+                       box_extreg = REG_HMEBOX_EXT_1;
+                       break;
+               case 2:
+                       box_reg = REG_HMEBOX_2;
+                       box_extreg = REG_HMEBOX_EXT_2;
+                       break;
+               case 3:
+                       box_reg = REG_HMEBOX_3;
+                       box_extreg = REG_HMEBOX_EXT_3;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("switch case not process\n"));
+                       break;
+               }
+
+               isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
+               while (!isfw_read) {
+
+                       wait_h2c_limmit--;
+                       if (wait_h2c_limmit == 0) {
+                               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                        ("Wating too long for FW read "
+                                         "clear HMEBox(%d)!\n", boxnum));
+                               break;
+                       }
+
+                       udelay(10);
+
+                       isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
+                       u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                ("Wating for FW read clear HMEBox(%d)!!! "
+                                 "0x1BF = %2x\n", boxnum, u1b_tmp));
+               }
+
+               if (!isfw_read) {
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                ("Write H2C register BOX[%d] fail!!!!! "
+                                 "Fw do not read.\n", boxnum));
+                       break;
+               }
+
+               memset(boxcontent, 0, sizeof(boxcontent));
+               memset(boxextcontent, 0, sizeof(boxextcontent));
+               boxcontent[0] = element_id;
+               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                        ("Write element_id box_reg(%4x) = %2x\n",
+                         box_reg, element_id));
+
+               switch (cmd_len) {
+               case 1:
+                       boxcontent[0] &= ~(BIT(7));
+                       memcpy((u8 *) (boxcontent) + 1,
+                              p_cmdbuffer + buf_index, 1);
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               case 2:
+                       boxcontent[0] &= ~(BIT(7));
+                       memcpy((u8 *) (boxcontent) + 1,
+                              p_cmdbuffer + buf_index, 2);
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               case 3:
+                       boxcontent[0] &= ~(BIT(7));
+                       memcpy((u8 *) (boxcontent) + 1,
+                              p_cmdbuffer + buf_index, 3);
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               case 4:
+                       boxcontent[0] |= (BIT(7));
+                       memcpy((u8 *) (boxextcontent),
+                              p_cmdbuffer + buf_index, 2);
+                       memcpy((u8 *) (boxcontent) + 1,
+                              p_cmdbuffer + buf_index + 2, 2);
+
+                       for (idx = 0; idx < 2; idx++) {
+                               rtl_write_byte(rtlpriv, box_extreg + idx,
+                                              boxextcontent[idx]);
+                       }
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               case 5:
+                       boxcontent[0] |= (BIT(7));
+                       memcpy((u8 *) (boxextcontent),
+                              p_cmdbuffer + buf_index, 2);
+                       memcpy((u8 *) (boxcontent) + 1,
+                              p_cmdbuffer + buf_index + 2, 3);
+
+                       for (idx = 0; idx < 2; idx++) {
+                               rtl_write_byte(rtlpriv, box_extreg + idx,
+                                              boxextcontent[idx]);
+                       }
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("switch case not process\n"));
+                       break;
+               }
+
+               bwrite_sucess = true;
+
+               rtlhal->last_hmeboxnum = boxnum + 1;
+               if (rtlhal->last_hmeboxnum == 4)
+                       rtlhal->last_hmeboxnum = 0;
+
+               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                        ("pHalData->last_hmeboxnum  = %d\n",
+                         rtlhal->last_hmeboxnum));
+       }
+
+       spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+       rtlhal->h2c_setinprogress = false;
+       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
+}
+
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+                        u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u32 tmp_cmdbuf[2];
+
+       if (rtlhal->fw_ready == false) {
+               RT_ASSERT(false, ("return H2C cmd because of Fw "
+                                 "download fail!!!\n"));
+               return;
+       }
+
+       memset(tmp_cmdbuf, 0, 8);
+       memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+       _rtl92c_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
+
+       return;
+}
+EXPORT_SYMBOL(rtl92c_fill_h2c_cmd);
+
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
+{
+       u8 u1b_tmp;
+       u8 delay = 100;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+       while (u1b_tmp & BIT(2)) {
+               delay--;
+               if (delay == 0) {
+                       RT_ASSERT(false, ("8051 reset fail.\n"));
+                       break;
+               }
+               udelay(50);
+               u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+       }
+}
+EXPORT_SYMBOL(rtl92c_firmware_selfreset);
+
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 u1_h2c_set_pwrmode[3] = {0};
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+
+       SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+       SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
+       SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+                                             ppsc->reg_max_lps_awakeintvl);
+
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+                     "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+                     u1_h2c_set_pwrmode, 3);
+       rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+
+}
+EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
+
+#define BEACON_PG              0 /*->1*/
+#define PSPOLL_PG              2
+#define NULL_PG                        3
+#define PROBERSP_PG            4 /*->5*/
+
+#define TOTAL_RESERVED_PKT_LEN 768
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+       /* page 0 beacon */
+       0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+       0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+       0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+       0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+       0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+       0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+       0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+       0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+       0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 1 beacon */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 2  ps-poll */
+       0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
+       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+       0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 3  null */
+       0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+       0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 4  probe_resp */
+       0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+       0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+       0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+       0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+       0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+       0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+       0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+       0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+       0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+       0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 5  probe_resp */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct sk_buff *skb = NULL;
+
+       u32 totalpacketlen;
+       bool rtstatus;
+       u8 u1RsvdPageLoc[3] = {0};
+       bool b_dlok = false;
+
+       u8 *beacon;
+       u8 *p_pspoll;
+       u8 *nullfunc;
+       u8 *p_probersp;
+       /*---------------------------------------------------------
+                               (1) beacon
+       ---------------------------------------------------------*/
+       beacon = &reserved_page_packet[BEACON_PG * 128];
+       SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+       /*-------------------------------------------------------
+                               (2) ps-poll
+       --------------------------------------------------------*/
+       p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+       SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+       SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+       SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+       SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+
+       /*--------------------------------------------------------
+                               (3) null data
+       ---------------------------------------------------------*/
+       nullfunc = &reserved_page_packet[NULL_PG * 128];
+       SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+       SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+
+       /*---------------------------------------------------------
+                               (4) probe response
+       ----------------------------------------------------------*/
+       p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+       SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+       SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
+
+       totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+                     "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+                     &reserved_page_packet[0], totalpacketlen);
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+                     "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+                     u1RsvdPageLoc, 3);
+
+
+       skb = dev_alloc_skb(totalpacketlen);
+       memcpy((u8 *) skb_put(skb, totalpacketlen),
+              &reserved_page_packet, totalpacketlen);
+
+       rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb);
+
+       if (rtstatus)
+               b_dlok = true;
+
+       if (b_dlok) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("Set RSVD page location to Fw.\n"));
+               RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+                               "H2C_RSVDPAGE:\n",
+                               u1RsvdPageLoc, 3);
+               rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
+                                   sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+       } else
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
+
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+       u8 u1_joinbssrpt_parm[1] = {0};
+
+       SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+       rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
new file mode 100644 (file)
index 0000000..3db33bd
--- /dev/null
@@ -0,0 +1,98 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C__FW__H__
+#define __RTL92C__FW__H__
+
+#define FW_8192C_SIZE                          0x3000
+#define FW_8192C_START_ADDRESS                 0x1000
+#define FW_8192C_END_ADDRESS                   0x3FFF
+#define FW_8192C_PAGE_SIZE                     4096
+#define FW_8192C_POLLING_DELAY                 5
+#define FW_8192C_POLLING_TIMEOUT_COUNT         100
+
+#define IS_FW_HEADER_EXIST(_pfwhdr)    \
+       ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
+       (_pfwhdr->signature&0xFFF0) == 0x88C0)
+
+struct rtl92c_firmware_header {
+       u16 signature;
+       u8 category;
+       u8 function;
+       u16 version;
+       u8 subversion;
+       u8 rsvd1;
+       u8 month;
+       u8 date;
+       u8 hour;
+       u8 minute;
+       u16 ramcodeSize;
+       u16 rsvd2;
+       u32 svnindex;
+       u32 rsvd3;
+       u32 rsvd4;
+       u32 rsvd5;
+};
+
+enum rtl8192c_h2c_cmd {
+       H2C_AP_OFFLOAD = 0,
+       H2C_SETPWRMODE = 1,
+       H2C_JOINBSSRPT = 2,
+       H2C_RSVDPAGE = 3,
+       H2C_RSSI_REPORT = 5,
+       H2C_RA_MASK = 6,
+       MAX_H2CCMD
+};
+
+#define pagenum_128(_len)      (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)                 \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val)             \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val)        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val)               \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+                        u32 cmd_len, u8 *p_cmdbuffer);
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
new file mode 100644 (file)
index 0000000..2f624fc
--- /dev/null
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+
+
+MODULE_AUTHOR("lizhaoming      <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Georgia         <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang       <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger    <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
new file mode 100644 (file)
index 0000000..a702282
--- /dev/null
@@ -0,0 +1,2042 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "dm_common.h"
+#include "phy_common.h"
+
+/* Define macro to shorten lines */
+#define MCS_TXPWR      mcs_txpwrlevel_origoffset
+
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 returnvalue, originalvalue, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+                                              "bitmask(%#x)\n", regaddr,
+                                              bitmask));
+       originalvalue = rtl_read_dword(rtlpriv, regaddr);
+       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+       returnvalue = (originalvalue & bitmask) >> bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+                                              "Addr[0x%x]=0x%x\n", bitmask,
+                                              regaddr, originalvalue));
+
+       return returnvalue;
+
+}
+EXPORT_SYMBOL(rtl92c_phy_query_bb_reg);
+
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+                          u32 regaddr, u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 originalvalue, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+                                              " data(%#x)\n", regaddr, bitmask,
+                                              data));
+
+       if (bitmask != MASKDWORD) {
+               originalvalue = rtl_read_dword(rtlpriv, regaddr);
+               bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+               data = ((originalvalue & (~bitmask)) | (data << bitshift));
+       }
+
+       rtl_write_dword(rtlpriv, regaddr, data);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+                                              " data(%#x)\n", regaddr, bitmask,
+                                              data));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
+
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+                                        enum radio_path rfpath, u32 offset)
+{
+       RT_ASSERT(false, ("deprecated!\n"));
+       return 0;
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
+
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+                                          enum radio_path rfpath, u32 offset,
+                                          u32 data)
+{
+       RT_ASSERT(false, ("deprecated!\n"));
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
+
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+                                     enum radio_path rfpath, u32 offset)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+       u32 newoffset;
+       u32 tmplong, tmplong2;
+       u8 rfpi_enable = 0;
+       u32 retvalue;
+
+       offset &= 0x3f;
+       newoffset = offset;
+       if (RT_CANNOT_IO(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
+               return 0xFFFFFFFF;
+       }
+       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+       if (rfpath == RF90_PATH_A)
+               tmplong2 = tmplong;
+       else
+               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+       tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+           (newoffset << 23) | BLSSIREADEDGE;
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+                     tmplong & (~BLSSIREADEDGE));
+       mdelay(1);
+       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+       mdelay(1);
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+                     tmplong | BLSSIREADEDGE);
+       mdelay(1);
+       if (rfpath == RF90_PATH_A)
+               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+                                                BIT(8));
+       else if (rfpath == RF90_PATH_B)
+               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+                                                BIT(8));
+       if (rfpi_enable)
+               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+                                        BLSSIREADBACKDATA);
+       else
+               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+                                        BLSSIREADBACKDATA);
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+                                              rfpath, pphyreg->rflssi_readback,
+                                              retvalue));
+       return retvalue;
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
+
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+                                       enum radio_path rfpath, u32 offset,
+                                       u32 data)
+{
+       u32 data_and_addr;
+       u32 newoffset;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+       if (RT_CANNOT_IO(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
+               return;
+       }
+       offset &= 0x3f;
+       newoffset = offset;
+       data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+                                              rfpath, pphyreg->rf3wire_offset,
+                                              data_and_addr));
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+{
+       u32 i;
+
+       for (i = 0; i <= 31; i++) {
+               if (((bitmask >> i) & 0x1) == 1)
+                       break;
+       }
+       return i;
+}
+EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+
+static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+{
+       rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+       rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
+       rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
+       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
+       rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
+       rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
+}
+bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtlpriv->cfg->ops->phy_rf6052_config(hw);
+}
+EXPORT_SYMBOL(rtl92c_phy_rf_config);
+
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       bool rtstatus;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+       rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+                                                BASEBAND_CONFIG_PHY_REG);
+       if (rtstatus != true) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+               return false;
+       }
+       if (rtlphy->rf_type == RF_1T2R) {
+               _rtl92c_phy_bb_config_1t(hw);
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+       }
+       if (rtlefuse->autoload_failflag == false) {
+               rtlphy->pwrgroup_cnt = 0;
+               rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
+                                                  BASEBAND_CONFIG_PHY_REG);
+       }
+       if (rtstatus != true) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+               return false;
+       }
+       rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+                                                BASEBAND_CONFIG_AGC_TAB);
+       if (rtstatus != true) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+               return false;
+       }
+       rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+                                               RFPGA0_XA_HSSIPARAMETER2,
+                                               0x200));
+       return true;
+}
+EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
+
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+                                                  u32 regaddr, u32 bitmask,
+                                                  u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (regaddr == RTXAGC_A_RATE18_06) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
+       }
+       if (regaddr == RTXAGC_A_RATE54_24) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
+       }
+       if (regaddr == RTXAGC_A_CCK1_MCS32) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
+       }
+       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
+       }
+       if (regaddr == RTXAGC_A_MCS03_MCS00) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
+       }
+       if (regaddr == RTXAGC_A_MCS07_MCS04) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
+       }
+       if (regaddr == RTXAGC_A_MCS11_MCS08) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
+       }
+       if (regaddr == RTXAGC_A_MCS15_MCS12) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
+       }
+       if (regaddr == RTXAGC_B_RATE18_06) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
+       }
+       if (regaddr == RTXAGC_B_RATE54_24) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
+       }
+
+       if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
+       }
+
+       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS03_MCS00) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS07_MCS04) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS11_MCS08) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
+       }
+
+       if (regaddr == RTXAGC_B_MCS15_MCS12) {
+               rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+                         rtlphy->pwrgroup_cnt,
+                         rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
+
+               rtlphy->pwrgroup_cnt++;
+       }
+}
+EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
+
+void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->default_initialgain[0] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[1] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[2] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[3] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Default initial gain (c50=0x%x, "
+                 "c58=0x%x, c60=0x%x, c68=0x%x\n",
+                 rtlphy->default_initialgain[0],
+                 rtlphy->default_initialgain[1],
+                 rtlphy->default_initialgain[2],
+                 rtlphy->default_initialgain[3]));
+
+       rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+                                              ROFDM0_RXDETECTOR3, MASKBYTE0);
+       rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+                                             ROFDM0_RXDETECTOR2, MASKDWORD);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Default framesync (0x%x) = 0x%x\n",
+                 ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+           RFPGA0_XA_LSSIPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+           RFPGA0_XB_LSSIPARAMETER;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
+           RFPGA0_XAB_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
+           RFPGA0_XAB_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
+           RFPGA0_XCD_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
+           RFPGA0_XCD_SWITCHCONTROL;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
+           ROFDM0_XARXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
+           ROFDM0_XBRXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
+           ROFDM0_XCRXIQIMBANLANCE;
+       rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
+           ROFDM0_XDRXIQIMBALANCE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+       rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+       rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
+           ROFDM0_XATXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
+           ROFDM0_XBTXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
+           ROFDM0_XCTXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
+           ROFDM0_XDTXIQIMBALANCE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
+           RFPGA0_XA_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
+           RFPGA0_XB_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
+           RFPGA0_XC_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
+           RFPGA0_XD_LSSIREADBACK;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
+           TRANSCEIVEA_HSPI_READBACK;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
+           TRANSCEIVEB_HSPI_READBACK;
+
+}
+EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
+
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 txpwr_level;
+       long txpwr_dbm;
+
+       txpwr_level = rtlphy->cur_cck_txpwridx;
+       txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
+                                                WIRELESS_MODE_B, txpwr_level);
+       txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
+           rtlefuse->legacy_ht_txpowerdiff;
+       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+                                        WIRELESS_MODE_G,
+                                        txpwr_level) > txpwr_dbm)
+               txpwr_dbm =
+                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+                                                txpwr_level);
+       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+                                        WIRELESS_MODE_N_24G,
+                                        txpwr_level) > txpwr_dbm)
+               txpwr_dbm =
+                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+                                                txpwr_level);
+       *powerlevel = txpwr_dbm;
+}
+
+static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+                                     u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 index = (channel - 1);
+
+       cckpowerlevel[RF90_PATH_A] =
+           rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+       cckpowerlevel[RF90_PATH_B] =
+           rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+       if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
+               ofdmpowerlevel[RF90_PATH_A] =
+                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+               ofdmpowerlevel[RF90_PATH_B] =
+                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+       } else if (get_rf_type(rtlphy) == RF_2T2R) {
+               ofdmpowerlevel[RF90_PATH_A] =
+                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+               ofdmpowerlevel[RF90_PATH_B] =
+                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+       }
+}
+
+static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
+                                        u8 channel, u8 *cckpowerlevel,
+                                        u8 *ofdmpowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+       rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+       u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+       if (rtlefuse->txpwr_fromeprom == false)
+               return;
+       _rtl92c_get_txpower_index(hw, channel,
+                                 &cckpowerlevel[0], &ofdmpowerlevel[0]);
+       _rtl92c_ccxpower_index_check(hw,
+                                    channel, &cckpowerlevel[0],
+                                    &ofdmpowerlevel[0]);
+       rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+       rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0],
+                                                      channel);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_txpower_level);
+
+bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 idx;
+       u8 rf_path;
+
+       u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+                                                     WIRELESS_MODE_B,
+                                                     power_indbm);
+       u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+                                                      WIRELESS_MODE_N_24G,
+                                                      power_indbm);
+       if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
+               ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
+       else
+               ofdmtxpwridx = 0;
+       RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
+                ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+                 power_indbm, ccktxpwridx, ofdmtxpwridx));
+       for (idx = 0; idx < 14; idx++) {
+               for (rf_path = 0; rf_path < 2; rf_path++) {
+                       rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
+                       rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
+                           ofdmtxpwridx;
+                       rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
+                           ofdmtxpwridx;
+               }
+       }
+       rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+       return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
+
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
+{
+}
+EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
+
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+                               enum wireless_mode wirelessmode,
+                               long power_indbm)
+{
+       u8 txpwridx;
+       long offset;
+
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               offset = -7;
+               break;
+       case WIRELESS_MODE_G:
+       case WIRELESS_MODE_N_24G:
+               offset = -8;
+               break;
+       default:
+               offset = -8;
+               break;
+       }
+
+       if ((power_indbm - offset) > 0)
+               txpwridx = (u8) ((power_indbm - offset) * 2);
+       else
+               txpwridx = 0;
+
+       if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
+               txpwridx = MAX_TXPWR_IDX_NMODE_92S;
+
+       return txpwridx;
+}
+EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx);
+
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+                                 enum wireless_mode wirelessmode,
+                                 u8 txpwridx)
+{
+       long offset;
+       long pwrout_dbm;
+
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               offset = -7;
+               break;
+       case WIRELESS_MODE_G:
+       case WIRELESS_MODE_N_24G:
+               offset = -8;
+               break;
+       default:
+               offset = -8;
+               break;
+       }
+       pwrout_dbm = txpwridx / 2 + offset;
+       return pwrout_dbm;
+}
+EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
+
+void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       enum io_type iotype;
+
+       if (!is_hal_stop(rtlhal)) {
+               switch (operation) {
+               case SCAN_OPT_BACKUP:
+                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+
+                       break;
+               case SCAN_OPT_RESTORE:
+                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("Unknown Scan Backup operation.\n"));
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
+
+void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 tmp_bw = rtlphy->current_chan_bw;
+
+       if (rtlphy->set_bwmode_inprogress)
+               return;
+       rtlphy->set_bwmode_inprogress = true;
+       if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
+               rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
+       else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        ("FALSE driver sleep or unload\n"));
+               rtlphy->set_bwmode_inprogress = false;
+               rtlphy->current_chan_bw = tmp_bw;
+       }
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bw_mode);
+
+void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 delay;
+
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+                ("switch to channel%d\n", rtlphy->current_channel));
+       if (is_hal_stop(rtlhal))
+               return;
+       do {
+               if (!rtlphy->sw_chnl_inprogress)
+                       break;
+               if (!_rtl92c_phy_sw_chnl_step_by_step
+                   (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
+                    &rtlphy->sw_chnl_step, &delay)) {
+                       if (delay > 0)
+                               mdelay(delay);
+                       else
+                               continue;
+               } else
+                       rtlphy->sw_chnl_inprogress = false;
+               break;
+       } while (true);
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
+
+u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlphy->sw_chnl_inprogress)
+               return 0;
+       if (rtlphy->set_bwmode_inprogress)
+               return 0;
+       RT_ASSERT((rtlphy->current_channel <= 14),
+                 ("WIRELESS_MODE_G but channel>14"));
+       rtlphy->sw_chnl_inprogress = true;
+       rtlphy->sw_chnl_stage = 0;
+       rtlphy->sw_chnl_step = 0;
+       if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+               rtl92c_phy_sw_chnl_callback(hw);
+               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+                        ("sw_chnl_inprogress false schdule workitem\n"));
+               rtlphy->sw_chnl_inprogress = false;
+       } else {
+               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+                        ("sw_chnl_inprogress false driver sleep or"
+                         " unload\n"));
+               rtlphy->sw_chnl_inprogress = false;
+       }
+       return 1;
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
+
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+                                            u8 channel, u8 *stage, u8 *step,
+                                            u32 *delay)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+       u32 precommoncmdcnt;
+       struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+       u32 postcommoncmdcnt;
+       struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+       u32 rfdependcmdcnt;
+       struct swchnlcmd *currentcmd = NULL;
+       u8 rfpath;
+       u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+       precommoncmdcnt = 0;
+       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT,
+                                        CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
+       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+
+       postcommoncmdcnt = 0;
+
+       _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+                                        MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+
+       rfdependcmdcnt = 0;
+
+       RT_ASSERT((channel >= 1 && channel <= 14),
+                 ("illegal channel for Zebra: %d\n", channel));
+
+       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+                                        RF_CHNLBW, channel, 10);
+
+       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
+                                        0);
+
+       do {
+               switch (*stage) {
+               case 0:
+                       currentcmd = &precommoncmd[*step];
+                       break;
+               case 1:
+                       currentcmd = &rfdependcmd[*step];
+                       break;
+               case 2:
+                       currentcmd = &postcommoncmd[*step];
+                       break;
+               }
+
+               if (currentcmd->cmdid == CMDID_END) {
+                       if ((*stage) == 2) {
+                               return true;
+                       } else {
+                               (*stage)++;
+                               (*step) = 0;
+                               continue;
+                       }
+               }
+
+               switch (currentcmd->cmdid) {
+               case CMDID_SET_TXPOWEROWER_LEVEL:
+                       rtl92c_phy_set_txpower_level(hw, channel);
+                       break;
+               case CMDID_WRITEPORT_ULONG:
+                       rtl_write_dword(rtlpriv, currentcmd->para1,
+                                       currentcmd->para2);
+                       break;
+               case CMDID_WRITEPORT_USHORT:
+                       rtl_write_word(rtlpriv, currentcmd->para1,
+                                      (u16) currentcmd->para2);
+                       break;
+               case CMDID_WRITEPORT_UCHAR:
+                       rtl_write_byte(rtlpriv, currentcmd->para1,
+                                      (u8) currentcmd->para2);
+                       break;
+               case CMDID_RF_WRITEREG:
+                       for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+                               rtlphy->rfreg_chnlval[rfpath] =
+                                   ((rtlphy->rfreg_chnlval[rfpath] &
+                                     0xfffffc00) | currentcmd->para2);
+
+                               rtl_set_rfreg(hw, (enum radio_path)rfpath,
+                                             currentcmd->para1,
+                                             RFREG_OFFSET_MASK,
+                                             rtlphy->rfreg_chnlval[rfpath]);
+                       }
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("switch case not process\n"));
+                       break;
+               }
+
+               break;
+       } while (true);
+
+       (*delay) = currentcmd->msdelay;
+       (*step)++;
+       return false;
+}
+
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+                                            u32 cmdtableidx, u32 cmdtablesz,
+                                            enum swchnlcmd_id cmdid,
+                                            u32 para1, u32 para2, u32 msdelay)
+{
+       struct swchnlcmd *pcmd;
+
+       if (cmdtable == NULL) {
+               RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+               return false;
+       }
+
+       if (cmdtableidx >= cmdtablesz)
+               return false;
+
+       pcmd = cmdtable + cmdtableidx;
+       pcmd->cmdid = cmdid;
+       pcmd->para1 = para1;
+       pcmd->para2 = para2;
+       pcmd->msdelay = msdelay;
+       return true;
+}
+
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
+{
+       return true;
+}
+EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath);
+
+static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+       u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+       u8 result = 0x00;
+
+       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
+       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+       rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
+                     config_pathb ? 0x28160202 : 0x28160502);
+
+       if (config_pathb) {
+               rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+               rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
+       }
+
+       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+       mdelay(IQK_DELAY_TIME);
+
+       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+       reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+       reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+       reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+       if (!(reg_eac & BIT(28)) &&
+           (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+           (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+               result |= 0x01;
+       else
+               return result;
+
+       if (!(reg_eac & BIT(27)) &&
+           (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+           (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+               result |= 0x02;
+       return result;
+}
+
+static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+       u8 result = 0x00;
+
+       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+       mdelay(IQK_DELAY_TIME);
+       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+       reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+       reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+       reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+       reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+       if (!(reg_eac & BIT(31)) &&
+           (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+           (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+               result |= 0x01;
+       else
+               return result;
+
+       if (!(reg_eac & BIT(30)) &&
+           (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+           (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+               result |= 0x02;
+       return result;
+}
+
+static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+                                              bool iqk_ok, long result[][8],
+                                              u8 final_candidate, bool btxonly)
+{
+       u32 oldval_0, x, tx0_a, reg;
+       long y, tx0_c;
+
+       if (final_candidate == 0xFF)
+               return;
+       else if (iqk_ok) {
+               oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                         MASKDWORD) >> 22) & 0x3FF;
+               x = result[final_candidate][0];
+               if ((x & 0x00000200) != 0)
+                       x = x | 0xFFFFFC00;
+               tx0_a = (x * oldval_0) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+                             ((x * oldval_0 >> 7) & 0x1));
+               y = result[final_candidate][1];
+               if ((y & 0x00000200) != 0)
+                       y = y | 0xFFFFFC00;
+               tx0_c = (y * oldval_0) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+                             ((tx0_c & 0x3C0) >> 6));
+               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+                             (tx0_c & 0x3F));
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+                             ((y * oldval_0 >> 7) & 0x1));
+               if (btxonly)
+                       return;
+               reg = result[final_candidate][2];
+               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+               reg = result[final_candidate][3] & 0x3F;
+               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+               reg = (result[final_candidate][3] >> 6) & 0xF;
+               rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+       }
+}
+
+static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
+                                              bool iqk_ok, long result[][8],
+                                              u8 final_candidate, bool btxonly)
+{
+       u32 oldval_1, x, tx1_a, reg;
+       long y, tx1_c;
+
+       if (final_candidate == 0xFF)
+               return;
+       else if (iqk_ok) {
+               oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+                                         MASKDWORD) >> 22) & 0x3FF;
+               x = result[final_candidate][4];
+               if ((x & 0x00000200) != 0)
+                       x = x | 0xFFFFFC00;
+               tx1_a = (x * oldval_1) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
+                             ((x * oldval_1 >> 7) & 0x1));
+               y = result[final_candidate][5];
+               if ((y & 0x00000200) != 0)
+                       y = y | 0xFFFFFC00;
+               tx1_c = (y * oldval_1) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
+                             ((tx1_c & 0x3C0) >> 6));
+               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
+                             (tx1_c & 0x3F));
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
+                             ((y * oldval_1 >> 7) & 0x1));
+               if (btxonly)
+                       return;
+               reg = result[final_candidate][6];
+               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+               reg = result[final_candidate][7] & 0x3F;
+               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+               reg = (result[final_candidate][7] >> 6) & 0xF;
+               rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+       }
+}
+
+static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
+                                           u32 *addareg, u32 *addabackup,
+                                           u32 registernum)
+{
+       u32 i;
+
+       for (i = 0; i < registernum; i++)
+               addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+
+static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
+                                          u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+               macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+       macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+
+static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
+                                             u32 *addareg, u32 *addabackup,
+                                             u32 regiesternum)
+{
+       u32 i;
+
+       for (i = 0; i < regiesternum; i++)
+               rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+
+static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
+                                            u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+               rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+       rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
+                                    u32 *addareg, bool is_patha_on, bool is2t)
+{
+       u32 pathOn;
+       u32 i;
+
+       pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+       if (false == is2t) {
+               pathOn = 0x0bdb25a0;
+               rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+       } else {
+               rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
+       }
+
+       for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+               rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
+}
+
+static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+                                               u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+       for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+               rtl_write_byte(rtlpriv, macreg[i],
+                              (u8) (macbackup[i] & (~BIT(3))));
+       rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+
+static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+
+static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+       u32 mode;
+
+       mode = pi_mode ? 0x01000100 : 0x01000000;
+       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+
+static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
+                                          long result[][8], u8 c1, u8 c2)
+{
+       u32 i, j, diff, simularity_bitmap, bound;
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       u8 final_candidate[2] = { 0xFF, 0xFF };
+       bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+       if (is2t)
+               bound = 8;
+       else
+               bound = 4;
+
+       simularity_bitmap = 0;
+
+       for (i = 0; i < bound; i++) {
+               diff = (result[c1][i] > result[c2][i]) ?
+                   (result[c1][i] - result[c2][i]) :
+                   (result[c2][i] - result[c1][i]);
+
+               if (diff > MAX_TOLERANCE) {
+                       if ((i == 2 || i == 6) && !simularity_bitmap) {
+                               if (result[c1][i] + result[c1][i + 1] == 0)
+                                       final_candidate[(i / 4)] = c2;
+                               else if (result[c2][i] + result[c2][i + 1] == 0)
+                                       final_candidate[(i / 4)] = c1;
+                               else
+                                       simularity_bitmap = simularity_bitmap |
+                                           (1 << i);
+                       } else
+                               simularity_bitmap =
+                                   simularity_bitmap | (1 << i);
+               }
+       }
+
+       if (simularity_bitmap == 0) {
+               for (i = 0; i < (bound / 4); i++) {
+                       if (final_candidate[i] != 0xFF) {
+                               for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+                                       result[3][j] =
+                                           result[final_candidate[i]][j];
+                               bresult = false;
+                       }
+               }
+               return bresult;
+       } else if (!(simularity_bitmap & 0x0F)) {
+               for (i = 0; i < 4; i++)
+                       result[3][i] = result[c1][i];
+               return false;
+       } else if (!(simularity_bitmap & 0xF0) && is2t) {
+               for (i = 4; i < 8; i++)
+                       result[3][i] = result[c1][i];
+               return false;
+       } else {
+               return false;
+       }
+
+}
+
+static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
+                                    long result[][8], u8 t, bool is2t)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 i;
+       u8 patha_ok, pathb_ok;
+       u32 adda_reg[IQK_ADDA_REG_NUM] = {
+               0x85c, 0xe6c, 0xe70, 0xe74,
+               0xe78, 0xe7c, 0xe80, 0xe84,
+               0xe88, 0xe8c, 0xed0, 0xed4,
+               0xed8, 0xedc, 0xee0, 0xeec
+       };
+
+       u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+               0x522, 0x550, 0x551, 0x040
+       };
+
+       const u32 retrycount = 2;
+
+       u32 bbvalue;
+
+       if (t == 0) {
+               bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+
+               _rtl92c_phy_save_adda_registers(hw, adda_reg,
+                                               rtlphy->adda_backup, 16);
+               _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
+                                              rtlphy->iqk_mac_backup);
+       }
+       _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
+       if (t == 0) {
+               rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+                                                  RFPGA0_XA_HSSIPARAMETER1,
+                                                  BIT(8));
+       }
+       if (!rtlphy->rfpi_enable)
+               _rtl92c_phy_pi_mode_switch(hw, true);
+       if (t == 0) {
+               rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
+               rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
+               rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
+       }
+       rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+       rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+       rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+       if (is2t) {
+               rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
+       }
+       _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
+                                           rtlphy->iqk_mac_backup);
+       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
+       if (is2t)
+               rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
+       for (i = 0; i < retrycount; i++) {
+               patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
+               if (patha_ok == 0x03) {
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       break;
+               } else if (i == (retrycount - 1) && patha_ok == 0x01)
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94,
+                                                     MASKDWORD) & 0x3FF0000) >>
+                                                     16;
+               result[t][1] =
+                   (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
+
+       }
+
+       if (is2t) {
+               _rtl92c_phy_path_a_standby(hw);
+               _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
+               for (i = 0; i < retrycount; i++) {
+                       pathb_ok = _rtl92c_phy_path_b_iqk(hw);
+                       if (pathb_ok == 0x03) {
+                               result[t][4] = (rtl_get_bbreg(hw,
+                                                     0xeb4,
+                                                     MASKDWORD) &
+                                               0x3FF0000) >> 16;
+                               result[t][5] =
+                                   (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+                                    0x3FF0000) >> 16;
+                               result[t][6] =
+                                   (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
+                                    0x3FF0000) >> 16;
+                               result[t][7] =
+                                   (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
+                                    0x3FF0000) >> 16;
+                               break;
+                       } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+                               result[t][4] = (rtl_get_bbreg(hw,
+                                                     0xeb4,
+                                                     MASKDWORD) &
+                                               0x3FF0000) >> 16;
+                       }
+                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+               }
+       }
+       rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
+       rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
+       rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+       if (is2t)
+               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+       if (t != 0) {
+               if (!rtlphy->rfpi_enable)
+                       _rtl92c_phy_pi_mode_switch(hw, false);
+               _rtl92c_phy_reload_adda_registers(hw, adda_reg,
+                                                 rtlphy->adda_backup, 16);
+               _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
+                                                rtlphy->iqk_mac_backup);
+       }
+}
+
+static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
+                                    char delta, bool is2t)
+{
+       /* This routine is deliberately dummied out for later fixes */
+#if 0
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+       u32 reg_d[PATH_NUM];
+       u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
+
+       u32 bb_backup[APK_BB_REG_NUM];
+       u32 bb_reg[APK_BB_REG_NUM] = {
+               0x904, 0xc04, 0x800, 0xc08, 0x874
+       };
+       u32 bb_ap_mode[APK_BB_REG_NUM] = {
+               0x00000020, 0x00a05430, 0x02040000,
+               0x000800e4, 0x00204000
+       };
+       u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
+               0x00000020, 0x00a05430, 0x02040000,
+               0x000800e4, 0x22204000
+       };
+
+       u32 afe_backup[APK_AFE_REG_NUM];
+       u32 afe_reg[APK_AFE_REG_NUM] = {
+               0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
+               0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
+               0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
+               0xeec
+       };
+
+       u32 mac_backup[IQK_MAC_REG_NUM];
+       u32 mac_reg[IQK_MAC_REG_NUM] = {
+               0x522, 0x550, 0x551, 0x040
+       };
+
+       u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
+               {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
+       };
+
+       u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
+               {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
+       };
+
+       u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
+               {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
+       };
+
+       u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
+               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
+       };
+
+       u32 afe_on_off[PATH_NUM] = {
+               0x04db25a4, 0x0b1b25a4
+       };
+
+       u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
+
+       u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
+
+       u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
+
+       u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
+
+       const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
+               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+               {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
+       };
+
+       const u32 apk_normal_setting_value_1[13] = {
+               0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
+               0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
+               0x12680000, 0x00880000, 0x00880000
+       };
+
+       const u32 apk_normal_setting_value_2[16] = {
+               0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
+               0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
+               0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
+               0x00050006
+       };
+
+       const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
+
+       long bb_offset, delta_v, delta_offset;
+
+       if (!is2t)
+               pathbound = 1;
+
+       for (index = 0; index < PATH_NUM; index++) {
+               apk_offset[index] = apk_normal_offset[index];
+               apk_value[index] = apk_normal_value[index];
+               afe_on_off[index] = 0x6fdb25a4;
+       }
+
+       for (index = 0; index < APK_BB_REG_NUM; index++) {
+               for (path = 0; path < pathbound; path++) {
+                       apk_rf_init_value[path][index] =
+                           apk_normal_rf_init_value[path][index];
+                       apk_rf_value_0[path][index] =
+                           apk_normal_rf_value_0[path][index];
+               }
+               bb_ap_mode[index] = bb_normal_ap_mode[index];
+
+               apkbound = 6;
+       }
+
+       for (index = 0; index < APK_BB_REG_NUM; index++) {
+               if (index == 0)
+                       continue;
+               bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
+       }
+
+       _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
+
+       _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
+
+       for (path = 0; path < pathbound; path++) {
+               if (path == RF90_PATH_A) {
+                       offset = 0xb00;
+                       for (index = 0; index < 11; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+
+                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+                       offset = 0xb68;
+                       for (; index < 13; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+                       offset = 0xb00;
+                       for (index = 0; index < 16; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_2
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+               } else if (path == RF90_PATH_B) {
+                       offset = 0xb70;
+                       for (index = 0; index < 10; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+                       rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
+                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+                       offset = 0xb68;
+                       index = 11;
+                       for (; index < 13; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_1
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+                       offset = 0xb60;
+                       for (index = 0; index < 16; index++) {
+                               rtl_set_bbreg(hw, offset, MASKDWORD,
+                                             apk_normal_setting_value_2
+                                             [index]);
+
+                               offset += 0x04;
+                       }
+                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+               }
+
+               reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
+                                           0xd, MASKDWORD);
+
+               for (index = 0; index < APK_AFE_REG_NUM; index++)
+                       rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
+                                     afe_on_off[path]);
+
+               if (path == RF90_PATH_A) {
+                       for (index = 0; index < APK_BB_REG_NUM; index++) {
+                               if (index == 0)
+                                       continue;
+                               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
+                                             bb_ap_mode[index]);
+                       }
+               }
+
+               _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
+
+               if (path == 0) {
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
+               } else {
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
+                                     0x10000);
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+                                     0x1000f);
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+                                     0x20103);
+               }
+
+               delta_offset = ((delta + 14) / 2);
+               if (delta_offset < 0)
+                       delta_offset = 0;
+               else if (delta_offset > 12)
+                       delta_offset = 12;
+
+               for (index = 0; index < APK_BB_REG_NUM; index++) {
+                       if (index != 1)
+                               continue;
+
+                       tmpreg = apk_rf_init_value[path][index];
+
+                       if (!rtlefuse->apk_thermalmeterignore) {
+                               bb_offset = (tmpreg & 0xF0000) >> 16;
+
+                               if (!(tmpreg & BIT(15)))
+                                       bb_offset = -bb_offset;
+
+                               delta_v =
+                                   apk_delta_mapping[index][delta_offset];
+
+                               bb_offset += delta_v;
+
+                               if (bb_offset < 0) {
+                                       tmpreg = tmpreg & (~BIT(15));
+                                       bb_offset = -bb_offset;
+                               } else {
+                                       tmpreg = tmpreg | BIT(15);
+                               }
+
+                               tmpreg =
+                                   (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
+                       }
+
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
+                                     MASKDWORD, 0x8992e);
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
+                                     MASKDWORD, apk_rf_value_0[path][index]);
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+                                     MASKDWORD, tmpreg);
+
+                       i = 0;
+                       do {
+                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
+                               rtl_set_bbreg(hw, apk_offset[path],
+                                             MASKDWORD, apk_value[0]);
+                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
+                                       ("PHY_APCalibrate() offset 0x%x "
+                                        "value 0x%x\n",
+                                        apk_offset[path],
+                                        rtl_get_bbreg(hw, apk_offset[path],
+                                                      MASKDWORD)));
+
+                               mdelay(3);
+
+                               rtl_set_bbreg(hw, apk_offset[path],
+                                             MASKDWORD, apk_value[1]);
+                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
+                                       ("PHY_APCalibrate() offset 0x%x "
+                                        "value 0x%x\n",
+                                        apk_offset[path],
+                                        rtl_get_bbreg(hw, apk_offset[path],
+                                                      MASKDWORD)));
+
+                               mdelay(20);
+
+                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+
+                               if (path == RF90_PATH_A)
+                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
+                                                              0x03E00000);
+                               else
+                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
+                                                              0xF8000000);
+
+                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
+                                       ("PHY_APCalibrate() offset "
+                                        "0xbd8[25:21] %x\n", tmpreg));
+
+                               i++;
+
+                       } while (tmpreg > apkbound && i < 4);
+
+                       apk_result[path][index] = tmpreg;
+               }
+       }
+
+       _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
+
+       for (index = 0; index < APK_BB_REG_NUM; index++) {
+               if (index == 0)
+                       continue;
+               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
+       }
+
+       _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
+
+       for (path = 0; path < pathbound; path++) {
+               rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+                             MASKDWORD, reg_d[path]);
+
+               if (path == RF90_PATH_B) {
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+                                     0x1000f);
+                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+                                     0x20101);
+               }
+
+               if (apk_result[path][1] > 6)
+                       apk_result[path][1] = 6;
+       }
+
+       for (path = 0; path < pathbound; path++) {
+               rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
+                             ((apk_result[path][1] << 15) |
+                              (apk_result[path][1] << 10) |
+                              (apk_result[path][1] << 5) |
+                              apk_result[path][1]));
+
+               if (path == RF90_PATH_A)
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+                                     ((apk_result[path][1] << 15) |
+                                      (apk_result[path][1] << 10) |
+                                      (0x00 << 5) | 0x05));
+               else
+                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+                                     ((apk_result[path][1] << 15) |
+                                      (apk_result[path][1] << 10) |
+                                      (0x02 << 5) | 0x05));
+
+               rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
+                             ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
+                              0x08));
+
+       }
+
+       rtlphy->apk_done = true;
+#endif
+}
+
+static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+                                         bool bmain, bool is2t)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (is_hal_stop(rtlhal)) {
+               rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
+               rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+       }
+       if (is2t) {
+               if (bmain)
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(6), 0x1);
+               else
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(6), 0x2);
+       } else {
+               if (bmain)
+                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
+               else
+                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
+
+       }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       long result[4][8];
+       u8 i, final_candidate;
+       bool patha_ok, pathb_ok;
+       long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+           reg_ecc, reg_tmp = 0;
+       bool is12simular, is13simular, is23simular;
+       bool start_conttx = false, singletone = false;
+       u32 iqk_bb_reg[10] = {
+               ROFDM0_XARXIQIMBALANCE,
+               ROFDM0_XBRXIQIMBALANCE,
+               ROFDM0_ECCATHRESHOLD,
+               ROFDM0_AGCRSSITABLE,
+               ROFDM0_XATXIQIMBALANCE,
+               ROFDM0_XBTXIQIMBALANCE,
+               ROFDM0_XCTXIQIMBALANCE,
+               ROFDM0_XCTXAFE,
+               ROFDM0_XDTXAFE,
+               ROFDM0_RXIQEXTANTA
+       };
+
+       if (recovery) {
+               _rtl92c_phy_reload_adda_registers(hw,
+                                                 iqk_bb_reg,
+                                                 rtlphy->iqk_bb_backup, 10);
+               return;
+       }
+       if (start_conttx || singletone)
+               return;
+       for (i = 0; i < 8; i++) {
+               result[0][i] = 0;
+               result[1][i] = 0;
+               result[2][i] = 0;
+               result[3][i] = 0;
+       }
+       final_candidate = 0xff;
+       patha_ok = false;
+       pathb_ok = false;
+       is12simular = false;
+       is23simular = false;
+       is13simular = false;
+       for (i = 0; i < 3; i++) {
+               if (IS_92C_SERIAL(rtlhal->version))
+                       _rtl92c_phy_iq_calibrate(hw, result, i, true);
+               else
+                       _rtl92c_phy_iq_calibrate(hw, result, i, false);
+               if (i == 1) {
+                       is12simular = _rtl92c_phy_simularity_compare(hw,
+                                                                    result, 0,
+                                                                    1);
+                       if (is12simular) {
+                               final_candidate = 0;
+                               break;
+                       }
+               }
+               if (i == 2) {
+                       is13simular = _rtl92c_phy_simularity_compare(hw,
+                                                                    result, 0,
+                                                                    2);
+                       if (is13simular) {
+                               final_candidate = 0;
+                               break;
+                       }
+                       is23simular = _rtl92c_phy_simularity_compare(hw,
+                                                                    result, 1,
+                                                                    2);
+                       if (is23simular)
+                               final_candidate = 1;
+                       else {
+                               for (i = 0; i < 8; i++)
+                                       reg_tmp += result[3][i];
+
+                               if (reg_tmp != 0)
+                                       final_candidate = 3;
+                               else
+                                       final_candidate = 0xFF;
+                       }
+               }
+       }
+       for (i = 0; i < 4; i++) {
+               reg_e94 = result[i][0];
+               reg_e9c = result[i][1];
+               reg_ea4 = result[i][2];
+               reg_eac = result[i][3];
+               reg_eb4 = result[i][4];
+               reg_ebc = result[i][5];
+               reg_ec4 = result[i][6];
+               reg_ecc = result[i][7];
+       }
+       if (final_candidate != 0xff) {
+               rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
+               rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
+               reg_ea4 = result[final_candidate][2];
+               reg_eac = result[final_candidate][3];
+               rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
+               rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
+               reg_ec4 = result[final_candidate][6];
+               reg_ecc = result[final_candidate][7];
+               patha_ok = pathb_ok = true;
+       } else {
+               rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
+               rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
+       }
+       if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+               _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+                                                  final_candidate,
+                                                  (reg_ea4 == 0));
+       if (IS_92C_SERIAL(rtlhal->version)) {
+               if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
+                       _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok,
+                                                          result,
+                                                          final_candidate,
+                                                          (reg_ec4 == 0));
+       }
+       _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
+                                       rtlphy->iqk_bb_backup, 10);
+}
+EXPORT_SYMBOL(rtl92c_phy_iq_calibrate);
+
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool start_conttx = false, singletone = false;
+
+       if (start_conttx || singletone)
+               return;
+       if (IS_92C_SERIAL(rtlhal->version))
+               rtlpriv->cfg->ops->phy_lc_calibrate(hw, true);
+       else
+               rtlpriv->cfg->ops->phy_lc_calibrate(hw, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
+
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlphy->apk_done)
+               return;
+       if (IS_92C_SERIAL(rtlhal->version))
+               _rtl92c_phy_ap_calibrate(hw, delta, true);
+       else
+               _rtl92c_phy_ap_calibrate(hw, delta, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_ap_calibrate);
+
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (IS_92C_SERIAL(rtlhal->version))
+               _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
+       else
+               _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch);
+
+bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       bool postprocessing = false;
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+                 iotype, rtlphy->set_io_inprogress));
+       do {
+               switch (iotype) {
+               case IO_CMD_RESUME_DM_BY_SCAN:
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                                ("[IO CMD] Resume DM after scan.\n"));
+                       postprocessing = true;
+                       break;
+               case IO_CMD_PAUSE_DM_BY_SCAN:
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                                ("[IO CMD] Pause DM before scan.\n"));
+                       postprocessing = true;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("switch case not process\n"));
+                       break;
+               }
+       } while (false);
+       if (postprocessing && !rtlphy->set_io_inprogress) {
+               rtlphy->set_io_inprogress = true;
+               rtlphy->current_io_type = iotype;
+       } else {
+               return false;
+       }
+       rtl92c_phy_set_io(hw);
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+       return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
+
+void rtl92c_phy_set_io(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+                 rtlphy->current_io_type, rtlphy->set_io_inprogress));
+       switch (rtlphy->current_io_type) {
+       case IO_CMD_RESUME_DM_BY_SCAN:
+               dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+               rtl92c_dm_write_dig(hw);
+               rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+               break;
+       case IO_CMD_PAUSE_DM_BY_SCAN:
+               rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+               dm_digtable.cur_igvalue = 0x17;
+               rtl92c_dm_write_dig(hw);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       rtlphy->set_io_inprogress = false;
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                ("<---(%#x)\n", rtlphy->current_io_type));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io);
+
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+EXPORT_SYMBOL(rtl92ce_phy_set_rf_on);
+
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+       u32 u4b_tmp;
+       u8 delay = 5;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+       u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+       while (u4b_tmp != 0 && delay > 0) {
+               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+               u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+               delay--;
+       }
+       if (delay == 0) {
+               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+                        ("Switch RF timeout !!!.\n"));
+               return;
+       }
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
new file mode 100644 (file)
index 0000000..53ffb09
--- /dev/null
@@ -0,0 +1,246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_PHY_H__
+#define __RTL92C_PHY_H__
+
+#define MAX_PRECMD_CNT                 16
+#define MAX_RFDEPENDCMD_CNT            16
+#define MAX_POSTCMD_CNT                        16
+
+#define MAX_DOZE_WAITING_TIMES_9x      64
+
+#define RT_CANNOT_IO(hw)               false
+#define HIGHPOWER_RADIOA_ARRAYLEN      22
+
+#define MAX_TOLERANCE                  5
+#define        IQK_DELAY_TIME                  1
+
+#define        APK_BB_REG_NUM                  5
+#define        APK_AFE_REG_NUM                 16
+#define        APK_CURVE_REG_NUM               4
+#define        PATH_NUM                        2
+
+#define LOOP_LIMIT                     5
+#define MAX_STALL_TIME                 50
+#define AntennaDiversityValue          0x80
+#define MAX_TXPWR_IDX_NMODE_92S                63
+#define Reset_Cnt_Limit                        3
+
+#define IQK_ADDA_REG_NUM               16
+#define IQK_MAC_REG_NUM                        4
+
+#define RF90_PATH_MAX                  2
+
+#define CT_OFFSET_MAC_ADDR             0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX       0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX    0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF        0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET  0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET  0x72
+
+#define CT_OFFSET_CHANNEL_PLAH         0x75
+#define CT_OFFSET_THERMAL_METER                0x78
+#define CT_OFFSET_RF_OPTION            0x79
+#define CT_OFFSET_VERSION              0x7E
+#define CT_OFFSET_CUSTOMER_ID          0x7F
+
+#define RTL92C_MAX_PATH_NUM            2
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER        255
+enum swchnlcmd_id {
+       CMDID_END,
+       CMDID_SET_TXPOWEROWER_LEVEL,
+       CMDID_BBREGWRITE10,
+       CMDID_WRITEPORT_ULONG,
+       CMDID_WRITEPORT_USHORT,
+       CMDID_WRITEPORT_UCHAR,
+       CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+       enum swchnlcmd_id cmdid;
+       u32 para1;
+       u32 para2;
+       u32 msdelay;
+};
+
+enum hw90_block_e {
+       HW90_BLOCK_MAC = 0,
+       HW90_BLOCK_PHY0 = 1,
+       HW90_BLOCK_PHY1 = 2,
+       HW90_BLOCK_RF = 3,
+       HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+       BASEBAND_CONFIG_PHY_REG = 0,
+       BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+       RA_OFFSET_LEGACY_OFDM1,
+       RA_OFFSET_LEGACY_OFDM2,
+       RA_OFFSET_HT_OFDM1,
+       RA_OFFSET_HT_OFDM2,
+       RA_OFFSET_HT_OFDM3,
+       RA_OFFSET_HT_OFDM4,
+       RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+       ANTENNA_NONE,
+       ANTENNA_D,
+       ANTENNA_C,
+       ANTENNA_CD,
+       ANTENNA_B,
+       ANTENNA_BD,
+       ANTENNA_BC,
+       ANTENNA_BCD,
+       ANTENNA_A,
+       ANTENNA_AD,
+       ANTENNA_AC,
+       ANTENNA_ACD,
+       ANTENNA_AB,
+       ANTENNA_ABD,
+       ANTENNA_ABC,
+       ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+       u32 r_tx_antenna:4;
+       u32 r_ant_l:4;
+       u32 r_ant_non_ht:4;
+       u32 r_ant_ht1:4;
+       u32 r_ant_ht2:4;
+       u32 r_ant_ht_s1:4;
+       u32 r_ant_non_ht_s1:4;
+       u32 ofdm_txsc:2;
+       u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+       u8 r_cckrx_enable_2:2;
+       u8 r_cckrx_enable:2;
+       u8 r_ccktx_enable:4;
+};
+
+struct efuse_contents {
+       u8 mac_addr[ETH_ALEN];
+       u8 cck_tx_power_idx[6];
+       u8 ht40_1s_tx_power_idx[6];
+       u8 ht40_2s_tx_power_idx_diff[3];
+       u8 ht20_tx_power_idx_diff[3];
+       u8 ofdm_tx_power_idx_diff[3];
+       u8 ht40_max_power_offset[3];
+       u8 ht20_max_power_offset[3];
+       u8 channel_plan;
+       u8 thermal_meter;
+       u8 rf_option[5];
+       u8 version;
+       u8 oem_id;
+       u8 regulatory;
+};
+
+struct tx_power_struct {
+       u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 legacy_ht_txpowerdiff;
+       u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 pwrgroup_cnt;
+       u32 mcs_original_offset[4][16];
+};
+
+extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
+                                  u32 regaddr, u32 bitmask);
+extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+                                 u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+                                  enum radio_path rfpath, u32 regaddr,
+                                  u32 bitmask);
+extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+                                 enum radio_path rfpath, u32 regaddr,
+                                 u32 bitmask, u32 data);
+extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                                enum radio_path rfpath);
+extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                        long *powerlevel);
+extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+                                         long power_indbm);
+extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
+                                            u8 operation);
+extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+                                  enum nl80211_channel_type ch_type);
+extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
+                                        u16 beaconinterval);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
+                                             u32 rfpath);
+extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                         enum rf_pwrstate rfpwr_state);
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+                                 enum wireless_mode wirelessmode,
+                                 u8 txpwridx);
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+                               enum wireless_mode wirelessmode,
+                               long power_indbm);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+                                            u32 cmdtableidx, u32 cmdtablesz,
+                                            enum swchnlcmd_id cmdid, u32 para1,
+                                            u32 para2, u32 msdelay);
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+                                            u8 channel, u8 *stage, u8 *step,
+                                            u32 *delay);
+
+#endif
index 0f0be7c763b8274ec2a00cd4e8704932fbfe3ca9..c0cb0cfe7d370ec0a65abb65d521b5ff03e8f424 100644 (file)
@@ -1,6 +1,5 @@
 rtl8192ce-objs :=              \
                dm.o            \
-               fw.o            \
                hw.o            \
                led.o           \
                phy.o           \
@@ -10,3 +9,5 @@ rtl8192ce-objs :=              \
                trx.o
 
 obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
+
+ccflags-y += -D__CHECK_ENDIAN__
index 83cd64895292c913b1b5347c8c363c444727d17f..2f577c8828fc0fd2c45f9bfb28a34c04b142d806 100644 (file)
 #define CHIP_92C                       0x01
 #define CHIP_88C                       0x00
 
+/* Add vendor information into chip version definition.
+ * Add UMC B-Cut and RTL8723 chip info definition.
+ *
+ * BIT 7       Reserved
+ * BIT 6       UMC BCut
+ * BIT 5       Manufacturer(TSMC/UMC)
+ * BIT 4       TEST/NORMAL
+ * BIT 3       8723 Version
+ * BIT 2       8723?
+ * BIT 1       1T2R?
+ * BIT 0       88C/92C
+*/
+
 enum version_8192c {
        VERSION_A_CHIP_92C = 0x01,
        VERSION_A_CHIP_88C = 0x00,
        VERSION_B_CHIP_92C = 0x11,
        VERSION_B_CHIP_88C = 0x10,
+       VERSION_TEST_CHIP_88C = 0x00,
+       VERSION_TEST_CHIP_92C = 0x01,
+       VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
+       VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
+       VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
+       VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
+       VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
+       VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
+       VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
+       VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
+       VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
+       VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
+       VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
        VERSION_UNKNOWN = 0x88,
 };
 
@@ -254,4 +280,122 @@ struct h2c_cmd_8192c {
        u8 *p_cmdbuffer;
 };
 
+static inline u8 _rtl92c_get_chnl_group(u8 chnl)
+{
+       u8 group = 0;
+
+       if (chnl < 3)
+               group = 0;
+       else if (chnl < 9)
+               group = 1;
+       else
+               group = 2;
+
+       return group;
+}
+
+/* NOTE: reference to rtl8192c_rates struct */
+static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
+                                      u8 desc_rate, bool first_ampdu)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int rate_idx = 0;
+
+       if (first_ampdu) {
+               if (false == isHT) {
+                       switch (desc_rate) {
+                       case DESC92C_RATE1M:
+                               rate_idx = 0;
+                               break;
+                       case DESC92C_RATE2M:
+                               rate_idx = 1;
+                               break;
+                       case DESC92C_RATE5_5M:
+                               rate_idx = 2;
+                               break;
+                       case DESC92C_RATE11M:
+                               rate_idx = 3;
+                               break;
+                       case DESC92C_RATE6M:
+                               rate_idx = 4;
+                               break;
+                       case DESC92C_RATE9M:
+                               rate_idx = 5;
+                               break;
+                       case DESC92C_RATE12M:
+                               rate_idx = 6;
+                               break;
+                       case DESC92C_RATE18M:
+                               rate_idx = 7;
+                               break;
+                       case DESC92C_RATE24M:
+                               rate_idx = 8;
+                               break;
+                       case DESC92C_RATE36M:
+                               rate_idx = 9;
+                               break;
+                       case DESC92C_RATE48M:
+                               rate_idx = 10;
+                               break;
+                       case DESC92C_RATE54M:
+                               rate_idx = 11;
+                               break;
+                       default:
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+                                        ("Rate %d is not support, set to "
+                                       "1M rate.\n", desc_rate));
+                               rate_idx = 0;
+                               break;
+                       }
+               } else {
+                       rate_idx = 11;
+               }
+               return rate_idx;
+       }
+       switch (desc_rate) {
+       case DESC92C_RATE1M:
+               rate_idx = 0;
+               break;
+       case DESC92C_RATE2M:
+               rate_idx = 1;
+               break;
+       case DESC92C_RATE5_5M:
+               rate_idx = 2;
+               break;
+       case DESC92C_RATE11M:
+               rate_idx = 3;
+               break;
+       case DESC92C_RATE6M:
+               rate_idx = 4;
+               break;
+       case DESC92C_RATE9M:
+               rate_idx = 5;
+               break;
+       case DESC92C_RATE12M:
+               rate_idx = 6;
+               break;
+       case DESC92C_RATE18M:
+               rate_idx = 7;
+               break;
+       case DESC92C_RATE24M:
+               rate_idx = 8;
+               break;
+       case DESC92C_RATE36M:
+               rate_idx = 9;
+               break;
+       case DESC92C_RATE48M:
+               rate_idx = 10;
+               break;
+       case DESC92C_RATE54M:
+               rate_idx = 11;
+               break;
+       /* TODO: How to mapping MCS rate? */
+       /*  NOTE: referenc to __ieee80211_rx */
+       default:
+               rate_idx = 11;
+               break;
+       }
+       return rate_idx;
+}
+
 #endif
index 62e7c64e087b52ad9a7b756a1bff3e57afda6001..7d76504df4d18ea4173ef8196d74a81e69ee961b 100644 (file)
 #include "def.h"
 #include "phy.h"
 #include "dm.h"
-#include "fw.h"
 
-struct dig_t dm_digtable;
-static struct ps_t dm_pstable;
-
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
-       0x7f8001fe,
-       0x788001e2,
-       0x71c001c7,
-       0x6b8001ae,
-       0x65400195,
-       0x5fc0017f,
-       0x5a400169,
-       0x55400155,
-       0x50800142,
-       0x4c000130,
-       0x47c0011f,
-       0x43c0010f,
-       0x40000100,
-       0x3c8000f2,
-       0x390000e4,
-       0x35c000d7,
-       0x32c000cb,
-       0x300000c0,
-       0x2d4000b5,
-       0x2ac000ab,
-       0x288000a2,
-       0x26000098,
-       0x24000090,
-       0x22000088,
-       0x20000080,
-       0x1e400079,
-       0x1c800072,
-       0x1b00006c,
-       0x19800066,
-       0x18000060,
-       0x16c0005b,
-       0x15800056,
-       0x14400051,
-       0x1300004c,
-       0x12000048,
-       0x11000044,
-       0x10000040,
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
-       {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
-       {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
-       {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
-       {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
-       {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
-       {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
-       {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
-       {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
-       {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
-       {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
-       {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
-       {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
-       {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
-       {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
-       {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
-       {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
-       {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
-       {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
-       {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
-       {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
-       {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
-       {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
-       {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
-       {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
-       {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
-       {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
-       {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
-       {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
-       {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
-       {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
-       {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
-       {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
-       {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
-       {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
-       {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
-       {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
-       {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
-       {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
-       {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
-       {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
-       {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
-       {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
-       {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
-       {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
-       {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
-       {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
-       {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
-       {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
-       {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
-       {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
-       {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
-       {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
-       {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
-       {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
-       {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
-       {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
-       {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
-       {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
-       {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
-       {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
-       {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
-       {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
-       {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
-};
-
-static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
-{
-       dm_digtable.dig_enable_flag = true;
-       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-       dm_digtable.cur_igvalue = 0x20;
-       dm_digtable.pre_igvalue = 0x0;
-       dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-       dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
-       dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
-       dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
-       dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
-       dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
-       dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
-       dm_digtable.rx_gain_range_max = DM_DIG_MAX;
-       dm_digtable.rx_gain_range_min = DM_DIG_MIN;
-       dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-       dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
-       dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
-       dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
-       dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
-}
-
-static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       long rssi_val_min = 0;
-
-       if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
-           (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
-               if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
-                       rssi_val_min =
-                           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
-                            rtlpriv->dm.undecorated_smoothed_pwdb) ?
-                           rtlpriv->dm.undecorated_smoothed_pwdb :
-                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-               else
-                       rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-       } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
-                  dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
-               rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-       } else if (dm_digtable.curmultista_connectstate ==
-                  DIG_MULTISTA_CONNECT) {
-               rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-       }
-
-       return (u8) rssi_val_min;
-}
-
-static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
-{
-       u32 ret_value;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
-
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
-       falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
-
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
-       falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
-       falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
-
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
-       falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
-       falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
-           falsealm_cnt->cnt_rate_illegal +
-           falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
-
-       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
-       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
-       falsealm_cnt->cnt_cck_fail = ret_value;
-
-       ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
-       falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
-       falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
-                                falsealm_cnt->cnt_rate_illegal +
-                                falsealm_cnt->cnt_crc8_fail +
-                                falsealm_cnt->cnt_mcs_fail +
-                                falsealm_cnt->cnt_cck_fail);
-
-       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
-       rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
-       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
-       rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
-                 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
-                 falsealm_cnt->cnt_parity_fail,
-                 falsealm_cnt->cnt_rate_illegal,
-                 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
-                 falsealm_cnt->cnt_ofdm_fail,
-                 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
-}
-
-static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value_igi = dm_digtable.cur_igvalue;
-
-       if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
-               value_igi--;
-       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
-               value_igi += 0;
-       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
-               value_igi++;
-       else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
-               value_igi += 2;
-       if (value_igi > DM_DIG_FA_UPPER)
-               value_igi = DM_DIG_FA_UPPER;
-       else if (value_igi < DM_DIG_FA_LOWER)
-               value_igi = DM_DIG_FA_LOWER;
-       if (rtlpriv->falsealm_cnt.cnt_all > 10000)
-               value_igi = 0x32;
-
-       dm_digtable.cur_igvalue = value_igi;
-       rtl92c_dm_write_dig(hw);
-}
-
-static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
-               if ((dm_digtable.backoff_val - 2) <
-                   dm_digtable.backoff_val_range_min)
-                       dm_digtable.backoff_val =
-                           dm_digtable.backoff_val_range_min;
-               else
-                       dm_digtable.backoff_val -= 2;
-       } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
-               if ((dm_digtable.backoff_val + 2) >
-                   dm_digtable.backoff_val_range_max)
-                       dm_digtable.backoff_val =
-                           dm_digtable.backoff_val_range_max;
-               else
-                       dm_digtable.backoff_val += 2;
-       }
-
-       if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
-           dm_digtable.rx_gain_range_max)
-               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
-       else if ((dm_digtable.rssi_val_min + 10 -
-                 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
-               dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
-       else
-               dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
-                   dm_digtable.backoff_val;
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("rssi_val_min = %x backoff_val %x\n",
-                 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
-
-       rtl92c_dm_write_dig(hw);
-}
-
-static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
-{
-       static u8 binitialized; /* initialized to false */
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-       bool b_multi_sta = false;
-
-       if (mac->opmode == NL80211_IFTYPE_ADHOC)
-               b_multi_sta = true;
-
-       if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate !=
-                                      DIG_STA_DISCONNECT)) {
-               binitialized = false;
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-               return;
-       } else if (binitialized == false) {
-               binitialized = true;
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
-               dm_digtable.cur_igvalue = 0x20;
-               rtl92c_dm_write_dig(hw);
-       }
-
-       if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
-               if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
-                   (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
-
-                       if (dm_digtable.dig_ext_port_stage ==
-                           DIG_EXT_PORT_STAGE_2) {
-                               dm_digtable.cur_igvalue = 0x20;
-                               rtl92c_dm_write_dig(hw);
-                       }
-
-                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
-               } else if (rssi_strength > dm_digtable.rssi_highthresh) {
-                       dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
-                       rtl92c_dm_ctrl_initgain_by_fa(hw);
-               }
-       } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
-               dm_digtable.cur_igvalue = 0x20;
-               rtl92c_dm_write_dig(hw);
-       }
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("curmultista_connectstate = "
-                 "%x dig_ext_port_stage %x\n",
-                 dm_digtable.curmultista_connectstate,
-                 dm_digtable.dig_ext_port_stage));
-}
-
-static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("presta_connectstate = %x,"
-                 " cursta_connectctate = %x\n",
-                 dm_digtable.presta_connectstate,
-                 dm_digtable.cursta_connectctate));
-
-       if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
-           || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
-           || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-
-               if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
-                       dm_digtable.rssi_val_min =
-                           rtl92c_dm_initial_gain_min_pwdb(hw);
-                       rtl92c_dm_ctrl_initgain_by_rssi(hw);
-               }
-       } else {
-               dm_digtable.rssi_val_min = 0;
-               dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-               dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-               dm_digtable.cur_igvalue = 0x20;
-               dm_digtable.pre_igvalue = 0;
-               rtl92c_dm_write_dig(hw);
-       }
-}
-
-static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-               dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
-
-               if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
-                       if (dm_digtable.rssi_val_min <= 25)
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_LowRssi;
-                       else
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_HighRssi;
-               } else {
-                       if (dm_digtable.rssi_val_min <= 20)
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_LowRssi;
-                       else
-                               dm_digtable.cur_cck_pd_state =
-                                   CCK_PD_STAGE_HighRssi;
-               }
-       } else {
-               dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
-       }
-
-       if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
-               if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
-                       if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
-                               dm_digtable.cur_cck_fa_state =
-                                   CCK_FA_STAGE_High;
-                       else
-                               dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
-
-                       if (dm_digtable.pre_cck_fa_state !=
-                           dm_digtable.cur_cck_fa_state) {
-                               if (dm_digtable.cur_cck_fa_state ==
-                                   CCK_FA_STAGE_Low)
-                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
-                                                     0x83);
-                               else
-                                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
-                                                     0xcd);
-
-                               dm_digtable.pre_cck_fa_state =
-                                   dm_digtable.cur_cck_fa_state;
-                       }
-
-                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
-
-                       if (IS_92C_SERIAL(rtlhal->version))
-                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
-                                             MASKBYTE2, 0xd7);
-               } else {
-                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
-                       rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
-
-                       if (IS_92C_SERIAL(rtlhal->version))
-                               rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
-                                             MASKBYTE2, 0xd3);
-               }
-               dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
-       }
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
-}
-
-static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
-{
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
-       if (mac->act_scanning == true)
-               return;
-
-       if ((mac->link_state > MAC80211_NOLINK) &&
-           (mac->link_state < MAC80211_LINKED))
-               dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
-       else if (mac->link_state >= MAC80211_LINKED)
-               dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
-       else
-               dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-
-       rtl92c_dm_initial_gain_sta(hw);
-       rtl92c_dm_initial_gain_multi_sta(hw);
-       rtl92c_dm_cck_packet_detection_thresh(hw);
-
-       dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
-
-}
-
-static void rtl92c_dm_dig(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       if (rtlpriv->dm.b_dm_initialgain_enable == false)
-               return;
-       if (dm_digtable.dig_enable_flag == false)
-               return;
-
-       rtl92c_dm_ctrl_initgain_by_twoport(hw);
-
-}
-
-static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.bdynamic_txpower_enable = false;
-
-       rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
-       rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
-}
-
-static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        long undecorated_smoothed_pwdb;
 
-       if (!rtlpriv->dm.bdynamic_txpower_enable)
+       if (!rtlpriv->dm.dynamic_txpower_enable)
                return;
 
        if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -583,891 +111,3 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
 
        rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
 }
-
-void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
-                ("cur_igvalue = 0x%x, "
-                 "pre_igvalue = 0x%x, backoff_val = %d\n",
-                 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
-                 dm_digtable.backoff_val));
-
-       if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
-               rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
-                             dm_digtable.cur_igvalue);
-               rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
-                             dm_digtable.cur_igvalue);
-
-               dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
-       }
-}
-
-static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
-       u8 h2c_parameter[3] = { 0 };
-
-       return;
-
-       if (tmpentry_max_pwdb != 0) {
-               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
-                   tmpentry_max_pwdb;
-       } else {
-               rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
-       }
-
-       if (tmpentry_min_pwdb != 0xff) {
-               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
-                   tmpentry_min_pwdb;
-       } else {
-               rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
-       }
-
-       h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
-       h2c_parameter[0] = 0;
-
-       rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
-}
-
-void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       rtlpriv->dm.bcurrent_turbo_edca = false;
-       rtlpriv->dm.bis_any_nonbepkts = false;
-       rtlpriv->dm.bis_cur_rdlstate = false;
-}
-
-static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       static u64 last_txok_cnt;
-       static u64 last_rxok_cnt;
-       u64 cur_txok_cnt;
-       u64 cur_rxok_cnt;
-       u32 edca_be_ul = 0x5ea42b;
-       u32 edca_be_dl = 0x5ea42b;
-
-       if (mac->opmode == NL80211_IFTYPE_ADHOC)
-               goto dm_checkedcaturbo_exit;
-
-       if (mac->link_state != MAC80211_LINKED) {
-               rtlpriv->dm.bcurrent_turbo_edca = false;
-               return;
-       }
-
-       if (!mac->ht_enable) {  /*FIX MERGE */
-               if (!(edca_be_ul & 0xffff0000))
-                       edca_be_ul |= 0x005e0000;
-
-               if (!(edca_be_dl & 0xffff0000))
-                       edca_be_dl |= 0x005e0000;
-       }
-
-       if ((!rtlpriv->dm.bis_any_nonbepkts) &&
-           (!rtlpriv->dm.b_disable_framebursting)) {
-               cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
-               cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
-               if (cur_rxok_cnt > 4 * cur_txok_cnt) {
-                       if (!rtlpriv->dm.bis_cur_rdlstate ||
-                           !rtlpriv->dm.bcurrent_turbo_edca) {
-                               rtl_write_dword(rtlpriv,
-                                               REG_EDCA_BE_PARAM,
-                                               edca_be_dl);
-                               rtlpriv->dm.bis_cur_rdlstate = true;
-                       }
-               } else {
-                       if (rtlpriv->dm.bis_cur_rdlstate ||
-                           !rtlpriv->dm.bcurrent_turbo_edca) {
-                               rtl_write_dword(rtlpriv,
-                                               REG_EDCA_BE_PARAM,
-                                               edca_be_ul);
-                               rtlpriv->dm.bis_cur_rdlstate = false;
-                       }
-               }
-               rtlpriv->dm.bcurrent_turbo_edca = true;
-       } else {
-               if (rtlpriv->dm.bcurrent_turbo_edca) {
-                       u8 tmp = AC0_BE;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_AC_PARAM,
-                                                     (u8 *) (&tmp));
-                       rtlpriv->dm.bcurrent_turbo_edca = false;
-               }
-       }
-
-dm_checkedcaturbo_exit:
-       rtlpriv->dm.bis_any_nonbepkts = false;
-       last_txok_cnt = rtlpriv->stats.txbytesunicast;
-       last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
-}
-
-static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
-                                                            *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 thermalvalue, delta, delta_lck, delta_iqk;
-       long ele_a, ele_d, temp_cck, val_x, value32;
-       long val_y, ele_c;
-       u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
-       int i;
-       bool is2t = IS_92C_SERIAL(rtlhal->version);
-       u8 txpwr_level[2] = {0, 0};
-       u8 ofdm_min_index = 6, rf;
-
-       rtlpriv->dm.btxpower_trackingInit = true;
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
-
-       thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
-
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
-                 "eeprom_thermalmeter 0x%x\n",
-                 thermalvalue, rtlpriv->dm.thermalvalue,
-                 rtlefuse->eeprom_thermalmeter));
-
-       rtl92c_phy_ap_calibrate(hw, (thermalvalue -
-                                    rtlefuse->eeprom_thermalmeter));
-       if (is2t)
-               rf = 2;
-       else
-               rf = 1;
-
-       if (thermalvalue) {
-               ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                     MASKDWORD) & MASKOFDM_D;
-
-               for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
-                       if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
-                               ofdm_index_old[0] = (u8) i;
-
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                       ("Initial pathA ele_d reg0x%x = 0x%lx, "
-                                        "ofdm_index=0x%x\n",
-                                        ROFDM0_XATXIQIMBALANCE,
-                                        ele_d, ofdm_index_old[0]));
-                               break;
-                       }
-               }
-
-               if (is2t) {
-                       ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
-                                             MASKDWORD) & MASKOFDM_D;
-
-                       for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
-                               if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
-                                       ofdm_index_old[1] = (u8) i;
-
-                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
-                                          DBG_LOUD,
-                                          ("Initial pathB ele_d reg0x%x = "
-                                          "0x%lx, ofdm_index=0x%x\n",
-                                          ROFDM0_XBTXIQIMBALANCE, ele_d,
-                                          ofdm_index_old[1]));
-                                       break;
-                               }
-                       }
-               }
-
-               temp_cck =
-                   rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
-
-               for (i = 0; i < CCK_TABLE_LENGTH; i++) {
-                       if (rtlpriv->dm.b_cck_inch14) {
-                               if (memcmp((void *)&temp_cck,
-                                          (void *)&cckswing_table_ch14[i][2],
-                                          4) == 0) {
-                                       cck_index_old = (u8) i;
-
-                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
-                                                DBG_LOUD,
-                                                ("Initial reg0x%x = 0x%lx, "
-                                                 "cck_index=0x%x, ch 14 %d\n",
-                                                 RCCK0_TXFILTER2, temp_cck,
-                                                 cck_index_old,
-                                                 rtlpriv->dm.b_cck_inch14));
-                                       break;
-                               }
-                       } else {
-                               if (memcmp((void *)&temp_cck,
-                                          (void *)
-                                          &cckswing_table_ch1ch13[i][2],
-                                          4) == 0) {
-                                       cck_index_old = (u8) i;
-
-                                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
-                                                DBG_LOUD,
-                                                ("Initial reg0x%x = 0x%lx, "
-                                                 "cck_index=0x%x, ch14 %d\n",
-                                                 RCCK0_TXFILTER2, temp_cck,
-                                                 cck_index_old,
-                                                 rtlpriv->dm.b_cck_inch14));
-                                       break;
-                               }
-                       }
-               }
-
-               if (!rtlpriv->dm.thermalvalue) {
-                       rtlpriv->dm.thermalvalue =
-                           rtlefuse->eeprom_thermalmeter;
-                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
-                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
-                       for (i = 0; i < rf; i++)
-                               rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
-                       rtlpriv->dm.cck_index = cck_index_old;
-               }
-
-               delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
-                   (thermalvalue - rtlpriv->dm.thermalvalue) :
-                   (rtlpriv->dm.thermalvalue - thermalvalue);
-
-               delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
-                   (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
-                   (rtlpriv->dm.thermalvalue_lck - thermalvalue);
-
-               delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
-                   (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
-                   (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
-
-               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                       ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
-                        "eeprom_thermalmeter 0x%x delta 0x%x "
-                        "delta_lck 0x%x delta_iqk 0x%x\n",
-                        thermalvalue, rtlpriv->dm.thermalvalue,
-                        rtlefuse->eeprom_thermalmeter, delta, delta_lck,
-                        delta_iqk));
-
-               if (delta_lck > 1) {
-                       rtlpriv->dm.thermalvalue_lck = thermalvalue;
-                       rtl92c_phy_lc_calibrate(hw);
-               }
-
-               if (delta > 0 && rtlpriv->dm.txpower_track_control) {
-                       if (thermalvalue > rtlpriv->dm.thermalvalue) {
-                               for (i = 0; i < rf; i++)
-                                       rtlpriv->dm.ofdm_index[i] -= delta;
-                               rtlpriv->dm.cck_index -= delta;
-                       } else {
-                               for (i = 0; i < rf; i++)
-                                       rtlpriv->dm.ofdm_index[i] += delta;
-                               rtlpriv->dm.cck_index += delta;
-                       }
-
-                       if (is2t) {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("temp OFDM_A_index=0x%x, "
-                                         "OFDM_B_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         rtlpriv->dm.ofdm_index[0],
-                                         rtlpriv->dm.ofdm_index[1],
-                                         rtlpriv->dm.cck_index));
-                       } else {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("temp OFDM_A_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         rtlpriv->dm.ofdm_index[0],
-                                         rtlpriv->dm.cck_index));
-                       }
-
-                       if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
-                               for (i = 0; i < rf; i++)
-                                       ofdm_index[i] =
-                                           rtlpriv->dm.ofdm_index[i]
-                                           + 1;
-                               cck_index = rtlpriv->dm.cck_index + 1;
-                       } else {
-                               for (i = 0; i < rf; i++)
-                                       ofdm_index[i] =
-                                           rtlpriv->dm.ofdm_index[i];
-                               cck_index = rtlpriv->dm.cck_index;
-                       }
-
-                       for (i = 0; i < rf; i++) {
-                               if (txpwr_level[i] >= 0 &&
-                                   txpwr_level[i] <= 26) {
-                                       if (thermalvalue >
-                                           rtlefuse->eeprom_thermalmeter) {
-                                               if (delta < 5)
-                                                       ofdm_index[i] -= 1;
-
-                                               else
-                                                       ofdm_index[i] -= 2;
-                                       } else if (delta > 5 && thermalvalue <
-                                                  rtlefuse->
-                                                  eeprom_thermalmeter) {
-                                               ofdm_index[i] += 1;
-                                       }
-                               } else if (txpwr_level[i] >= 27 &&
-                                          txpwr_level[i] <= 32
-                                          && thermalvalue >
-                                          rtlefuse->eeprom_thermalmeter) {
-                                       if (delta < 5)
-                                               ofdm_index[i] -= 1;
-
-                                       else
-                                               ofdm_index[i] -= 2;
-                               } else if (txpwr_level[i] >= 32 &&
-                                          txpwr_level[i] <= 38 &&
-                                          thermalvalue >
-                                          rtlefuse->eeprom_thermalmeter
-                                          && delta > 5) {
-                                       ofdm_index[i] -= 1;
-                               }
-                       }
-
-                       if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
-                               if (thermalvalue >
-                                   rtlefuse->eeprom_thermalmeter) {
-                                       if (delta < 5)
-                                               cck_index -= 1;
-
-                                       else
-                                               cck_index -= 2;
-                               } else if (delta > 5 && thermalvalue <
-                                          rtlefuse->eeprom_thermalmeter) {
-                                       cck_index += 1;
-                               }
-                       } else if (txpwr_level[i] >= 27 &&
-                                  txpwr_level[i] <= 32 &&
-                                  thermalvalue >
-                                  rtlefuse->eeprom_thermalmeter) {
-                               if (delta < 5)
-                                       cck_index -= 1;
-
-                               else
-                                       cck_index -= 2;
-                       } else if (txpwr_level[i] >= 32 &&
-                                  txpwr_level[i] <= 38 &&
-                                  thermalvalue > rtlefuse->eeprom_thermalmeter
-                                  && delta > 5) {
-                               cck_index -= 1;
-                       }
-
-                       for (i = 0; i < rf; i++) {
-                               if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
-                                       ofdm_index[i] = OFDM_TABLE_SIZE - 1;
-
-                               else if (ofdm_index[i] < ofdm_min_index)
-                                       ofdm_index[i] = ofdm_min_index;
-                       }
-
-                       if (cck_index > CCK_TABLE_SIZE - 1)
-                               cck_index = CCK_TABLE_SIZE - 1;
-                       else if (cck_index < 0)
-                               cck_index = 0;
-
-                       if (is2t) {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("new OFDM_A_index=0x%x, "
-                                         "OFDM_B_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         ofdm_index[0], ofdm_index[1],
-                                         cck_index));
-                       } else {
-                               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                        ("new OFDM_A_index=0x%x,"
-                                         "cck_index=0x%x\n",
-                                         ofdm_index[0], cck_index));
-                       }
-               }
-
-               if (rtlpriv->dm.txpower_track_control && delta != 0) {
-                       ele_d =
-                           (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
-                       val_x = rtlphy->reg_e94;
-                       val_y = rtlphy->reg_e9c;
-
-                       if (val_x != 0) {
-                               if ((val_x & 0x00000200) != 0)
-                                       val_x = val_x | 0xFFFFFC00;
-                               ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
-
-                               if ((val_y & 0x00000200) != 0)
-                                       val_y = val_y | 0xFFFFFC00;
-                               ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
-
-                               value32 = (ele_d << 22) |
-                                   ((ele_c & 0x3F) << 16) | ele_a;
-
-                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                             MASKDWORD, value32);
-
-                               value32 = (ele_c & 0x000003C0) >> 6;
-                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
-                                             value32);
-
-                               value32 = ((val_x * ele_d) >> 7) & 0x01;
-                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                             BIT(31), value32);
-
-                               value32 = ((val_y * ele_d) >> 7) & 0x01;
-                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                             BIT(29), value32);
-                       } else {
-                               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                             MASKDWORD,
-                                             ofdmswing_table[ofdm_index[0]]);
-
-                               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
-                                             0x00);
-                               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                             BIT(31) | BIT(29), 0x00);
-                       }
-
-                       if (!rtlpriv->dm.b_cck_inch14) {
-                               rtl_write_byte(rtlpriv, 0xa22,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [0]);
-                               rtl_write_byte(rtlpriv, 0xa23,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [1]);
-                               rtl_write_byte(rtlpriv, 0xa24,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [2]);
-                               rtl_write_byte(rtlpriv, 0xa25,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [3]);
-                               rtl_write_byte(rtlpriv, 0xa26,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [4]);
-                               rtl_write_byte(rtlpriv, 0xa27,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [5]);
-                               rtl_write_byte(rtlpriv, 0xa28,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [6]);
-                               rtl_write_byte(rtlpriv, 0xa29,
-                                              cckswing_table_ch1ch13[cck_index]
-                                              [7]);
-                       } else {
-                               rtl_write_byte(rtlpriv, 0xa22,
-                                              cckswing_table_ch14[cck_index]
-                                              [0]);
-                               rtl_write_byte(rtlpriv, 0xa23,
-                                              cckswing_table_ch14[cck_index]
-                                              [1]);
-                               rtl_write_byte(rtlpriv, 0xa24,
-                                              cckswing_table_ch14[cck_index]
-                                              [2]);
-                               rtl_write_byte(rtlpriv, 0xa25,
-                                              cckswing_table_ch14[cck_index]
-                                              [3]);
-                               rtl_write_byte(rtlpriv, 0xa26,
-                                              cckswing_table_ch14[cck_index]
-                                              [4]);
-                               rtl_write_byte(rtlpriv, 0xa27,
-                                              cckswing_table_ch14[cck_index]
-                                              [5]);
-                               rtl_write_byte(rtlpriv, 0xa28,
-                                              cckswing_table_ch14[cck_index]
-                                              [6]);
-                               rtl_write_byte(rtlpriv, 0xa29,
-                                              cckswing_table_ch14[cck_index]
-                                              [7]);
-                       }
-
-                       if (is2t) {
-                               ele_d = (ofdmswing_table[ofdm_index[1]] &
-                                        0xFFC00000) >> 22;
-
-                               val_x = rtlphy->reg_eb4;
-                               val_y = rtlphy->reg_ebc;
-
-                               if (val_x != 0) {
-                                       if ((val_x & 0x00000200) != 0)
-                                               val_x = val_x | 0xFFFFFC00;
-                                       ele_a = ((val_x * ele_d) >> 8) &
-                                           0x000003FF;
-
-                                       if ((val_y & 0x00000200) != 0)
-                                               val_y = val_y | 0xFFFFFC00;
-                                       ele_c = ((val_y * ele_d) >> 8) &
-                                           0x00003FF;
-
-                                       value32 = (ele_d << 22) |
-                                           ((ele_c & 0x3F) << 16) | ele_a;
-                                       rtl_set_bbreg(hw,
-                                                     ROFDM0_XBTXIQIMBALANCE,
-                                                     MASKDWORD, value32);
-
-                                       value32 = (ele_c & 0x000003C0) >> 6;
-                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
-                                                     MASKH4BITS, value32);
-
-                                       value32 = ((val_x * ele_d) >> 7) & 0x01;
-                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                                     BIT(27), value32);
-
-                                       value32 = ((val_y * ele_d) >> 7) & 0x01;
-                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                                     BIT(25), value32);
-                               } else {
-                                       rtl_set_bbreg(hw,
-                                                     ROFDM0_XBTXIQIMBALANCE,
-                                                     MASKDWORD,
-                                                     ofdmswing_table[ofdm_index
-                                                                     [1]]);
-                                       rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
-                                                     MASKH4BITS, 0x00);
-                                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
-                                                     BIT(27) | BIT(25), 0x00);
-                               }
-
-                       }
-               }
-
-               if (delta_iqk > 3) {
-                       rtlpriv->dm.thermalvalue_iqk = thermalvalue;
-                       rtl92c_phy_iq_calibrate(hw, false);
-               }
-
-               if (rtlpriv->dm.txpower_track_control)
-                       rtlpriv->dm.thermalvalue = thermalvalue;
-       }
-
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
-
-}
-
-static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
-                                               struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.btxpower_tracking = true;
-       rtlpriv->dm.btxpower_trackingInit = false;
-
-       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                ("pMgntInfo->btxpower_tracking = %d\n",
-                 rtlpriv->dm.btxpower_tracking));
-}
-
-static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
-{
-       rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
-}
-
-static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
-{
-       rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
-}
-
-static void rtl92c_dm_check_txpower_tracking_thermal_meter(
-                                               struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       static u8 tm_trigger;
-
-       if (!rtlpriv->dm.btxpower_tracking)
-               return;
-
-       if (!tm_trigger) {
-               rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
-                             0x60);
-               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                        ("Trigger 92S Thermal Meter!!\n"));
-               tm_trigger = 1;
-               return;
-       } else {
-               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                        ("Schedule TxPowerTracking direct call!!\n"));
-               rtl92c_dm_txpower_tracking_directcall(hw);
-               tm_trigger = 0;
-       }
-}
-
-void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
-{
-       rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
-}
-
-void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rate_adaptive *p_ra = &(rtlpriv->ra);
-
-       p_ra->ratr_state = DM_RATR_STA_INIT;
-       p_ra->pre_ratr_state = DM_RATR_STA_INIT;
-
-       if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
-               rtlpriv->dm.b_useramask = true;
-       else
-               rtlpriv->dm.b_useramask = false;
-
-}
-
-static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       struct rate_adaptive *p_ra = &(rtlpriv->ra);
-       u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
-
-       if (is_hal_stop(rtlhal)) {
-               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                        ("<---- driver is going to unload\n"));
-               return;
-       }
-
-       if (!rtlpriv->dm.b_useramask) {
-               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                       ("<---- driver does not control rate adaptive mask\n"));
-               return;
-       }
-
-       if (mac->link_state == MAC80211_LINKED) {
-
-               switch (p_ra->pre_ratr_state) {
-               case DM_RATR_STA_HIGH:
-                       high_rssithresh_for_ra = 50;
-                       low_rssithresh_for_ra = 20;
-                       break;
-               case DM_RATR_STA_MIDDLE:
-                       high_rssithresh_for_ra = 55;
-                       low_rssithresh_for_ra = 20;
-                       break;
-               case DM_RATR_STA_LOW:
-                       high_rssithresh_for_ra = 50;
-                       low_rssithresh_for_ra = 25;
-                       break;
-               default:
-                       high_rssithresh_for_ra = 50;
-                       low_rssithresh_for_ra = 20;
-                       break;
-               }
-
-               if (rtlpriv->dm.undecorated_smoothed_pwdb >
-                   (long)high_rssithresh_for_ra)
-                       p_ra->ratr_state = DM_RATR_STA_HIGH;
-               else if (rtlpriv->dm.undecorated_smoothed_pwdb >
-                        (long)low_rssithresh_for_ra)
-                       p_ra->ratr_state = DM_RATR_STA_MIDDLE;
-               else
-                       p_ra->ratr_state = DM_RATR_STA_LOW;
-
-               if (p_ra->pre_ratr_state != p_ra->ratr_state) {
-                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                                ("RSSI = %ld\n",
-                                 rtlpriv->dm.undecorated_smoothed_pwdb));
-                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                                ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
-                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
-                                ("PreState = %d, CurState = %d\n",
-                                 p_ra->pre_ratr_state, p_ra->ratr_state));
-
-                       rtlpriv->cfg->ops->update_rate_mask(hw,
-                                       p_ra->ratr_state);
-
-                       p_ra->pre_ratr_state = p_ra->ratr_state;
-               }
-       }
-}
-
-static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
-{
-       dm_pstable.pre_ccastate = CCA_MAX;
-       dm_pstable.cur_ccasate = CCA_MAX;
-       dm_pstable.pre_rfstate = RF_MAX;
-       dm_pstable.cur_rfstate = RF_MAX;
-       dm_pstable.rssi_val_min = 0;
-}
-
-static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       if (dm_pstable.rssi_val_min != 0) {
-               if (dm_pstable.pre_ccastate == CCA_2R) {
-                       if (dm_pstable.rssi_val_min >= 35)
-                               dm_pstable.cur_ccasate = CCA_1R;
-                       else
-                               dm_pstable.cur_ccasate = CCA_2R;
-               } else {
-                       if (dm_pstable.rssi_val_min <= 30)
-                               dm_pstable.cur_ccasate = CCA_2R;
-                       else
-                               dm_pstable.cur_ccasate = CCA_1R;
-               }
-       } else {
-               dm_pstable.cur_ccasate = CCA_MAX;
-       }
-
-       if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
-               if (dm_pstable.cur_ccasate == CCA_1R) {
-                       if (get_rf_type(rtlphy) == RF_2T2R) {
-                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
-                                             MASKBYTE0, 0x13);
-                               rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
-                       } else {
-                               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
-                                             MASKBYTE0, 0x23);
-                               rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
-                       }
-               } else {
-                       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
-                                     0x33);
-                       rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
-               }
-               dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
-       }
-
-       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
-                                              (dm_pstable.cur_ccasate ==
-                                               0) ? "1RCCA" : "2RCCA"));
-}
-
-void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
-{
-       static u8 initialize;
-       static u32 reg_874, reg_c70, reg_85c, reg_a74;
-
-       if (initialize == 0) {
-               reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                        MASKDWORD) & 0x1CC000) >> 14;
-
-               reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
-                                        MASKDWORD) & BIT(3)) >> 3;
-
-               reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
-                                        MASKDWORD) & 0xFF000000) >> 24;
-
-               reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
-
-               initialize = 1;
-       }
-
-       if (!bforce_in_normal) {
-               if (dm_pstable.rssi_val_min != 0) {
-                       if (dm_pstable.pre_rfstate == RF_NORMAL) {
-                               if (dm_pstable.rssi_val_min >= 30)
-                                       dm_pstable.cur_rfstate = RF_SAVE;
-                               else
-                                       dm_pstable.cur_rfstate = RF_NORMAL;
-                       } else {
-                               if (dm_pstable.rssi_val_min <= 25)
-                                       dm_pstable.cur_rfstate = RF_NORMAL;
-                               else
-                                       dm_pstable.cur_rfstate = RF_SAVE;
-                       }
-               } else {
-                       dm_pstable.cur_rfstate = RF_MAX;
-               }
-       } else {
-               dm_pstable.cur_rfstate = RF_NORMAL;
-       }
-
-       if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
-               if (dm_pstable.cur_rfstate == RF_SAVE) {
-                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                     0x1C0000, 0x2);
-                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
-                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
-                                     0xFF000000, 0x63);
-                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                     0xC000, 0x2);
-                       rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
-                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
-                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
-               } else {
-                       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-                                     0x1CC000, reg_874);
-                       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
-                                     reg_c70);
-                       rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
-                                     reg_85c);
-                       rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
-                       rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
-               }
-
-               dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
-       }
-}
-
-static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (((mac->link_state == MAC80211_NOLINK)) &&
-           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
-               dm_pstable.rssi_val_min = 0;
-               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                        ("Not connected to any\n"));
-       }
-
-       if (mac->link_state == MAC80211_LINKED) {
-               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-                       dm_pstable.rssi_val_min =
-                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                                ("AP Client PWDB = 0x%lx\n",
-                                 dm_pstable.rssi_val_min));
-               } else {
-                       dm_pstable.rssi_val_min =
-                           rtlpriv->dm.undecorated_smoothed_pwdb;
-                       RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                                ("STA Default Port PWDB = 0x%lx\n",
-                                 dm_pstable.rssi_val_min));
-               }
-       } else {
-               dm_pstable.rssi_val_min =
-                   rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
-
-               RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
-                        ("AP Ext Port PWDB = 0x%lx\n",
-                         dm_pstable.rssi_val_min));
-       }
-
-       if (IS_92C_SERIAL(rtlhal->version))
-               rtl92c_dm_1r_cca(hw);
-}
-
-void rtl92c_dm_init(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
-       rtl92c_dm_diginit(hw);
-       rtl92c_dm_init_dynamic_txpower(hw);
-       rtl92c_dm_init_edca_turbo(hw);
-       rtl92c_dm_init_rate_adaptive_mask(hw);
-       rtl92c_dm_initialize_txpower_tracking(hw);
-       rtl92c_dm_init_dynamic_bb_powersaving(hw);
-}
-
-void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       bool b_fw_current_inpsmode = false;
-       bool b_fw_ps_awake = true;
-
-       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
-                                     (u8 *) (&b_fw_current_inpsmode));
-       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
-                                     (u8 *) (&b_fw_ps_awake));
-
-       if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) &&
-                                            b_fw_ps_awake)
-           && (!ppsc->rfchange_inprogress)) {
-               rtl92c_dm_pwdb_monitor(hw);
-               rtl92c_dm_dig(hw);
-               rtl92c_dm_false_alarm_counter_statistics(hw);
-               rtl92c_dm_dynamic_bb_powersaving(hw);
-               rtl92c_dm_dynamic_txpower(hw);
-               rtl92c_dm_check_txpower_tracking(hw);
-               rtl92c_dm_refresh_rate_adaptive_mask(hw);
-               rtl92c_dm_check_edca_turbo(hw);
-       }
-}
index 463439e4074cf7f56df12e3064628e2f69560556..36302ebae4a37544d67d83f47d0199dac2ec58f2 100644 (file)
@@ -192,5 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
 void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
 void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
 void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
deleted file mode 100644 (file)
index 11dd22b..0000000
+++ /dev/null
@@ -1,804 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010  Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#include <linux/firmware.h>
-#include "../wifi.h"
-#include "../pci.h"
-#include "../base.h"
-#include "reg.h"
-#include "def.h"
-#include "fw.h"
-#include "table.h"
-
-static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) {
-               u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (enable)
-                       value32 |= MCUFWDL_EN;
-               else
-                       value32 &= ~MCUFWDL_EN;
-               rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
-       } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) {
-               u8 tmp;
-               if (enable) {
-
-                       tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-                       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1,
-                                      tmp | 0x04);
-
-                       tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
-                       rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
-
-                       tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
-                       rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
-               } else {
-
-                       tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
-                       rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
-
-                       rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
-               }
-       }
-}
-
-static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
-                                  const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blockSize = sizeof(u32);
-       u8 *bufferPtr = (u8 *) buffer;
-       u32 *pu4BytePtr = (u32 *) buffer;
-       u32 i, offset, blockCount, remainSize;
-
-       blockCount = size / blockSize;
-       remainSize = size % blockSize;
-
-       for (i = 0; i < blockCount; i++) {
-               offset = i * blockSize;
-               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4BytePtr + i));
-       }
-
-       if (remainSize) {
-               offset = blockCount * blockSize;
-               bufferPtr += offset;
-               for (i = 0; i < remainSize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
-                                                offset + i), *(bufferPtr + i));
-               }
-       }
-}
-
-static void _rtl92c_fw_page_write(struct ieee80211_hw *hw,
-                                 u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8) (page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       _rtl92c_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8) (fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-
-       *pfwlen = fwlen;
-}
-
-static void _rtl92c_write_fw(struct ieee80211_hw *hw,
-                            enum version_8192c version, u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       bool is_version_b;
-       u8 *bufferPtr = (u8 *) buffer;
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
-
-       is_version_b = IS_CHIP_VER_B(version);
-       if (is_version_b) {
-               u32 pageNums, remainSize;
-               u32 page, offset;
-
-               if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
-                       _rtl92c_fill_dummy(bufferPtr, &size);
-
-               pageNums = size / FW_8192C_PAGE_SIZE;
-               remainSize = size % FW_8192C_PAGE_SIZE;
-
-               if (pageNums > 4) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("Page numbers should not greater then 4\n"));
-               }
-
-               for (page = 0; page < pageNums; page++) {
-                       offset = page * FW_8192C_PAGE_SIZE;
-                       _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
-                                             FW_8192C_PAGE_SIZE);
-               }
-
-               if (remainSize) {
-                       offset = pageNums * FW_8192C_PAGE_SIZE;
-                       page = pageNums;
-                       _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
-                                             remainSize);
-               }
-       } else {
-               _rtl92c_fw_block_write(hw, buffer, size);
-       }
-}
-
-static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       int err = -EIO;
-       u32 counter = 0;
-       u32 value32;
-
-       do {
-               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-       } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
-                (!(value32 & FWDL_ChkSum_rpt)));
-
-       if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
-                         value32));
-               goto exit;
-       }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
-
-       value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-       value32 |= MCUFWDL_RDY;
-       value32 &= ~WINTINI_RDY;
-       rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
-
-       counter = 0;
-
-       do {
-               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (value32 & WINTINI_RDY) {
-                       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                                ("Polling FW ready success!!"
-                                " REG_MCUFWDL:0x%08x .\n",
-                                value32));
-                       err = 0;
-                       goto exit;
-               }
-
-               mdelay(FW_8192C_POLLING_DELAY);
-
-       } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
-
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
-
-exit:
-       return err;
-}
-
-int rtl92c_download_fw(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
-       u8 *pfwdata;
-       u32 fwsize;
-       int err;
-       enum version_8192c version = rtlhal->version;
-
-       const struct firmware *firmware = NULL;
-
-       err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
-                              rtlpriv->io.dev);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("Failed to request firmware!\n"));
-               return 1;
-       }
-
-       if (firmware->size > 0x4000) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("Firmware is too big!\n"));
-               release_firmware(firmware);
-               return 1;
-       }
-
-       memcpy(rtlhal->pfirmware, firmware->data, firmware->size);
-       fwsize = firmware->size;
-       release_firmware(firmware);
-
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
-       pfwdata = (u8 *) rtlhal->pfirmware;
-
-       if (IS_FW_HEADER_EXIST(pfwheader)) {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
-                        ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
-                         pfwheader->version, pfwheader->signature,
-                         (uint)sizeof(struct rtl92c_firmware_header)));
-
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
-       }
-
-       _rtl92c_enable_fw_download(hw, true);
-       _rtl92c_write_fw(hw, version, pfwdata, fwsize);
-       _rtl92c_enable_fw_download(hw, false);
-
-       err = _rtl92c_fw_free_to_go(hw);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("Firmware is not ready to run!\n"));
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                        ("Firmware is ready to run!\n"));
-       }
-
-       return 0;
-}
-
-static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 val_hmetfr, val_mcutst_1;
-       bool result = false;
-
-       val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
-       val_mcutst_1 = rtl_read_byte(rtlpriv, (REG_MCUTST_1 + boxnum));
-
-       if (((val_hmetfr >> boxnum) & BIT(0)) == 0 && val_mcutst_1 == 0)
-               result = true;
-       return result;
-}
-
-static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
-                             u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       u8 boxnum;
-       u16 box_reg, box_extreg;
-       u8 u1b_tmp;
-       bool isfw_read = false;
-       u8 buf_index;
-       bool bwrite_sucess = false;
-       u8 wait_h2c_limmit = 100;
-       u8 wait_writeh2c_limmit = 100;
-       u8 boxcontent[4], boxextcontent[2];
-       u32 h2c_waitcounter = 0;
-       unsigned long flag;
-       u8 idx;
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
-
-       while (true) {
-               spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
-               if (rtlhal->b_h2c_setinprogress) {
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                ("H2C set in progress! Wait to set.."
-                                 "element_id(%d).\n", element_id));
-
-                       while (rtlhal->b_h2c_setinprogress) {
-                               spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
-                                                      flag);
-                               h2c_waitcounter++;
-                               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                        ("Wait 100 us (%d times)...\n",
-                                         h2c_waitcounter));
-                               udelay(100);
-
-                               if (h2c_waitcounter > 1000)
-                                       return;
-                               spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
-                                                 flag);
-                       }
-                       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
-               } else {
-                       rtlhal->b_h2c_setinprogress = true;
-                       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
-                       break;
-               }
-       }
-
-       while (!bwrite_sucess) {
-               wait_writeh2c_limmit--;
-               if (wait_writeh2c_limmit == 0) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("Write H2C fail because no trigger "
-                                 "for FW INT!\n"));
-                       break;
-               }
-
-               boxnum = rtlhal->last_hmeboxnum;
-               switch (boxnum) {
-               case 0:
-                       box_reg = REG_HMEBOX_0;
-                       box_extreg = REG_HMEBOX_EXT_0;
-                       break;
-               case 1:
-                       box_reg = REG_HMEBOX_1;
-                       box_extreg = REG_HMEBOX_EXT_1;
-                       break;
-               case 2:
-                       box_reg = REG_HMEBOX_2;
-                       box_extreg = REG_HMEBOX_EXT_2;
-                       break;
-               case 3:
-                       box_reg = REG_HMEBOX_3;
-                       box_extreg = REG_HMEBOX_EXT_3;
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("switch case not process\n"));
-                       break;
-               }
-
-               isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
-               while (!isfw_read) {
-
-                       wait_h2c_limmit--;
-                       if (wait_h2c_limmit == 0) {
-                               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                        ("Wating too long for FW read "
-                                         "clear HMEBox(%d)!\n", boxnum));
-                               break;
-                       }
-
-                       udelay(10);
-
-                       isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
-                       u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                ("Wating for FW read clear HMEBox(%d)!!! "
-                                 "0x1BF = %2x\n", boxnum, u1b_tmp));
-               }
-
-               if (!isfw_read) {
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                ("Write H2C register BOX[%d] fail!!!!! "
-                                 "Fw do not read.\n", boxnum));
-                       break;
-               }
-
-               memset(boxcontent, 0, sizeof(boxcontent));
-               memset(boxextcontent, 0, sizeof(boxextcontent));
-               boxcontent[0] = element_id;
-               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                        ("Write element_id box_reg(%4x) = %2x\n",
-                         box_reg, element_id));
-
-               switch (cmd_len) {
-               case 1:
-                       boxcontent[0] &= ~(BIT(7));
-                       memcpy((u8 *) (boxcontent) + 1,
-                              p_cmdbuffer + buf_index, 1);
-
-                       for (idx = 0; idx < 4; idx++) {
-                               rtl_write_byte(rtlpriv, box_reg + idx,
-                                              boxcontent[idx]);
-                       }
-                       break;
-               case 2:
-                       boxcontent[0] &= ~(BIT(7));
-                       memcpy((u8 *) (boxcontent) + 1,
-                              p_cmdbuffer + buf_index, 2);
-
-                       for (idx = 0; idx < 4; idx++) {
-                               rtl_write_byte(rtlpriv, box_reg + idx,
-                                              boxcontent[idx]);
-                       }
-                       break;
-               case 3:
-                       boxcontent[0] &= ~(BIT(7));
-                       memcpy((u8 *) (boxcontent) + 1,
-                              p_cmdbuffer + buf_index, 3);
-
-                       for (idx = 0; idx < 4; idx++) {
-                               rtl_write_byte(rtlpriv, box_reg + idx,
-                                              boxcontent[idx]);
-                       }
-                       break;
-               case 4:
-                       boxcontent[0] |= (BIT(7));
-                       memcpy((u8 *) (boxextcontent),
-                              p_cmdbuffer + buf_index, 2);
-                       memcpy((u8 *) (boxcontent) + 1,
-                              p_cmdbuffer + buf_index + 2, 2);
-
-                       for (idx = 0; idx < 2; idx++) {
-                               rtl_write_byte(rtlpriv, box_extreg + idx,
-                                              boxextcontent[idx]);
-                       }
-
-                       for (idx = 0; idx < 4; idx++) {
-                               rtl_write_byte(rtlpriv, box_reg + idx,
-                                              boxcontent[idx]);
-                       }
-                       break;
-               case 5:
-                       boxcontent[0] |= (BIT(7));
-                       memcpy((u8 *) (boxextcontent),
-                              p_cmdbuffer + buf_index, 2);
-                       memcpy((u8 *) (boxcontent) + 1,
-                              p_cmdbuffer + buf_index + 2, 3);
-
-                       for (idx = 0; idx < 2; idx++) {
-                               rtl_write_byte(rtlpriv, box_extreg + idx,
-                                              boxextcontent[idx]);
-                       }
-
-                       for (idx = 0; idx < 4; idx++) {
-                               rtl_write_byte(rtlpriv, box_reg + idx,
-                                              boxcontent[idx]);
-                       }
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("switch case not process\n"));
-                       break;
-               }
-
-               bwrite_sucess = true;
-
-               rtlhal->last_hmeboxnum = boxnum + 1;
-               if (rtlhal->last_hmeboxnum == 4)
-                       rtlhal->last_hmeboxnum = 0;
-
-               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                        ("pHalData->last_hmeboxnum  = %d\n",
-                         rtlhal->last_hmeboxnum));
-       }
-
-       spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
-       rtlhal->b_h2c_setinprogress = false;
-       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
-}
-
-void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
-                        u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       u32 tmp_cmdbuf[2];
-
-       if (rtlhal->bfw_ready == false) {
-               RT_ASSERT(false, ("return H2C cmd because of Fw "
-                                 "download fail!!!\n"));
-               return;
-       }
-
-       memset(tmp_cmdbuf, 0, 8);
-       memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
-       _rtl92c_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
-
-       return;
-}
-
-void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
-{
-       u8 u1b_tmp;
-       u8 delay = 100;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
-       u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-
-       while (u1b_tmp & BIT(2)) {
-               delay--;
-               if (delay == 0) {
-                       RT_ASSERT(false, ("8051 reset fail.\n"));
-                       break;
-               }
-               udelay(50);
-               u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-       }
-}
-
-void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 u1_h2c_set_pwrmode[3] = {0};
-       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
-       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
-
-       SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
-       SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
-       SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
-                                             ppsc->reg_max_lps_awakeintvl);
-
-       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
-                     "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
-                     u1_h2c_set_pwrmode, 3);
-       rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-
-}
-
-static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
-                                   struct sk_buff *skb)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       struct rtl8192_tx_ring *ring;
-       struct rtl_tx_desc *pdesc;
-       u8 own;
-       unsigned long flags;
-       struct sk_buff *pskb = NULL;
-
-       ring = &rtlpci->tx_ring[BEACON_QUEUE];
-
-       pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
-               kfree_skb(pskb);
-
-       spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
-
-       pdesc = &ring->desc[0];
-       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
-
-       rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
-
-       __skb_queue_tail(&ring->queue, skb);
-
-       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
-       rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
-
-       return true;
-}
-
-#define BEACON_PG              0 /*->1*/
-#define PSPOLL_PG              2
-#define NULL_PG                        3
-#define PROBERSP_PG            4 /*->5*/
-
-#define TOTAL_RESERVED_PKT_LEN 768
-
-static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
-       /* page 0 beacon */
-       0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
-       0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
-       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
-       0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
-       0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
-       0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
-       0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
-       0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
-       0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
-       0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
-       /* page 1 beacon */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
-       /* page 2  ps-poll */
-       0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
-       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
-       0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
-       /* page 3  null */
-       0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
-       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
-       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
-       0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
-       /* page 4  probe_resp */
-       0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
-       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
-       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
-       0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
-       0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
-       0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
-       0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
-       0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
-       0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
-       0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
-       0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
-       0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-
-       /* page 5  probe_resp */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       struct sk_buff *skb = NULL;
-
-       u32 totalpacketlen;
-       bool rtstatus;
-       u8 u1RsvdPageLoc[3] = {0};
-       bool b_dlok = false;
-
-       u8 *beacon;
-       u8 *p_pspoll;
-       u8 *nullfunc;
-       u8 *p_probersp;
-       /*---------------------------------------------------------
-                               (1) beacon
-       ---------------------------------------------------------*/
-       beacon = &reserved_page_packet[BEACON_PG * 128];
-       SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
-       SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
-
-       /*-------------------------------------------------------
-                               (2) ps-poll
-       --------------------------------------------------------*/
-       p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
-       SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
-       SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
-       SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
-
-       SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
-
-       /*--------------------------------------------------------
-                               (3) null data
-       ---------------------------------------------------------*/
-       nullfunc = &reserved_page_packet[NULL_PG * 128];
-       SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
-       SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
-       SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
-
-       SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
-
-       /*---------------------------------------------------------
-                               (4) probe response
-       ----------------------------------------------------------*/
-       p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
-       SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
-       SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
-       SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
-
-       SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
-
-       totalpacketlen = TOTAL_RESERVED_PKT_LEN;
-
-       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
-                     "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
-                     &reserved_page_packet[0], totalpacketlen);
-       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
-                     "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
-                     u1RsvdPageLoc, 3);
-
-
-       skb = dev_alloc_skb(totalpacketlen);
-       memcpy((u8 *) skb_put(skb, totalpacketlen),
-              &reserved_page_packet, totalpacketlen);
-
-       rtstatus = _rtl92c_cmd_send_packet(hw, skb);
-
-       if (rtstatus)
-               b_dlok = true;
-
-       if (b_dlok) {
-               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
-                        ("Set RSVD page location to Fw.\n"));
-               RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
-                               "H2C_RSVDPAGE:\n",
-                               u1RsvdPageLoc, 3);
-               rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
-                                   sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
-       } else
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                        ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
-}
-
-void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
-{
-       u8 u1_joinbssrpt_parm[1] = {0};
-
-       SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
-
-       rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h
deleted file mode 100644 (file)
index 3db33bd..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010  Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-
-#ifndef __RTL92C__FW__H__
-#define __RTL92C__FW__H__
-
-#define FW_8192C_SIZE                          0x3000
-#define FW_8192C_START_ADDRESS                 0x1000
-#define FW_8192C_END_ADDRESS                   0x3FFF
-#define FW_8192C_PAGE_SIZE                     4096
-#define FW_8192C_POLLING_DELAY                 5
-#define FW_8192C_POLLING_TIMEOUT_COUNT         100
-
-#define IS_FW_HEADER_EXIST(_pfwhdr)    \
-       ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
-       (_pfwhdr->signature&0xFFF0) == 0x88C0)
-
-struct rtl92c_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodeSize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
-enum rtl8192c_h2c_cmd {
-       H2C_AP_OFFLOAD = 0,
-       H2C_SETPWRMODE = 1,
-       H2C_JOINBSSRPT = 2,
-       H2C_RSVDPAGE = 3,
-       H2C_RSSI_REPORT = 5,
-       H2C_RA_MASK = 6,
-       MAX_H2CCMD
-};
-
-#define pagenum_128(_len)      (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
-
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)                 \
-       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val)             \
-       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val)        \
-       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val)            \
-       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val)            \
-       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val)               \
-       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
-#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)            \
-       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
-
-int rtl92c_download_fw(struct ieee80211_hw *hw);
-void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
-                        u32 cmd_len, u8 *p_cmdbuffer);
-void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
-void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
-void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
-void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
-
-#endif
index 1c41a0c93506b733e6f457ce698b66b37a76789c..05477f465a7531b6b499f1679c0c63a3edcfd9a6 100644 (file)
@@ -37,7 +37,6 @@
 #include "def.h"
 #include "phy.h"
 #include "dm.h"
-#include "fw.h"
 #include "led.h"
 #include "hw.h"
 
@@ -124,7 +123,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_FW_PSMODE_STATUS:
-               *((bool *) (val)) = ppsc->b_fw_current_inpsmode;
+               *((bool *) (val)) = ppsc->fw_current_inpsmode;
                break;
        case HW_VAR_CORRECT_TSF:{
                u64 tsf;
@@ -173,15 +172,15 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_BASIC_RATE:{
-                       u16 b_rate_cfg = ((u16 *) val)[0];
+                       u16 rate_cfg = ((u16 *) val)[0];
                        u8 rate_index = 0;
-                       b_rate_cfg = b_rate_cfg & 0x15f;
-                       b_rate_cfg |= 0x01;
-                       rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
+                       rate_cfg &= 0x15f;
+                       rate_cfg |= 0x01;
+                       rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
                        rtl_write_byte(rtlpriv, REG_RRSR + 1,
-                                      (b_rate_cfg >> 8)&0xff);
-                       while (b_rate_cfg > 0x1) {
-                               b_rate_cfg = (b_rate_cfg >> 1);
+                                      (rate_cfg >> 8)&0xff);
+                       while (rate_cfg > 0x1) {
+                               rate_cfg = (rate_cfg >> 1);
                                rate_index++;
                        }
                        rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
@@ -318,15 +317,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
        case HW_VAR_AC_PARAM:{
                        u8 e_aci = *((u8 *) val);
-                       u32 u4b_ac_param = 0;
+                       u32 u4b_ac_param;
+                       u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+                       u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+                       u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
 
-                       u4b_ac_param |= (u32) mac->ac[e_aci].aifs;
-                       u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min
+                       u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+                       u4b_ac_param |= ((u32)cw_min
                                         & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
-                       u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max &
+                       u4b_ac_param |= ((u32)cw_max &
                                         0xF) << AC_PARAM_ECW_MAX_OFFSET;
-                       u4b_ac_param |= (u32) mac->ac[e_aci].tx_op
-                           << AC_PARAM_TXOP_LIMIT_OFFSET;
+                       u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
 
                        RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
                                 ("queue:%x, ac_param:%x\n", e_aci,
@@ -469,12 +470,12 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_FW_PSMODE_STATUS:
-               ppsc->b_fw_current_inpsmode = *((bool *) val);
+               ppsc->fw_current_inpsmode = *((bool *) val);
                break;
        case HW_VAR_H2C_FW_JOINBSSRPT:{
                        u8 mstatus = (*(u8 *) val);
                        u8 tmp_regcr, tmp_reg422;
-                       bool b_recover = false;
+                       bool recover = false;
 
                        if (mstatus == RT_MEDIA_CONNECT) {
                                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
@@ -491,7 +492,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                    rtl_read_byte(rtlpriv,
                                                  REG_FWHW_TXQ_CTRL + 2);
                                if (tmp_reg422 & BIT(6))
-                                       b_recover = true;
+                                       recover = true;
                                rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
                                               tmp_reg422 & (~BIT(6)));
 
@@ -500,7 +501,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
                                _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
 
-                               if (b_recover) {
+                               if (recover) {
                                        rtl_write_byte(rtlpriv,
                                                       REG_FWHW_TXQ_CTRL + 2,
                                                       tmp_reg422);
@@ -868,7 +869,7 @@ static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
        rtl_write_word(rtlpriv, 0x350, 0x870c);
        rtl_write_byte(rtlpriv, 0x352, 0x1);
 
-       if (ppsc->b_support_backdoor)
+       if (ppsc->support_backdoor)
                rtl_write_byte(rtlpriv, 0x349, 0x1b);
        else
                rtl_write_byte(rtlpriv, 0x349, 0x03);
@@ -940,15 +941,15 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
                         ("Failed to download FW. Init HW "
                          "without FW now..\n"));
                err = 1;
-               rtlhal->bfw_ready = false;
+               rtlhal->fw_ready = false;
                return err;
        } else {
-               rtlhal->bfw_ready = true;
+               rtlhal->fw_ready = true;
        }
 
        rtlhal->last_hmeboxnum = 0;
-       rtl92c_phy_mac_config(hw);
-       rtl92c_phy_bb_config(hw);
+       rtl92ce_phy_mac_config(hw);
+       rtl92ce_phy_bb_config(hw);
        rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
        rtl92c_phy_rf_config(hw);
        rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
@@ -1170,21 +1171,20 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
        u32 u4b_ac_param;
+       u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
+       u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
+       u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
 
        rtl92c_dm_init_edca_turbo(hw);
-
        u4b_ac_param = (u32) mac->ac[aci].aifs;
-       u4b_ac_param |=
-           ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
-       u4b_ac_param |=
-           ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET;
-       u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET;
+       u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
+       u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
+       u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
        RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
                 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
-                 aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min,
-                 mac->ac[aci].cw_max, mac->ac[aci].tx_op));
+                 aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
+                 cw_max, tx_op));
        switch (aci) {
        case AC1_BK:
                rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
@@ -1237,7 +1237,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
        rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
        rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
-       if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready)
+       if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
                rtl92c_firmware_selfreset(hw);
        rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
        rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
@@ -1335,19 +1335,6 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
        rtl92ce_enable_interrupt(hw);
 }
 
-static u8 _rtl92c_get_chnl_group(u8 chnl)
-{
-       u8 group;
-
-       if (chnl < 3)
-               group = 0;
-       else if (chnl < 9)
-               group = 1;
-       else
-               group = 2;
-       return group;
-}
-
 static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
                                                 bool autoload_fail,
                                                 u8 *hwinfo)
@@ -1568,7 +1555,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
        rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
 
        if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
-               rtlefuse->b_apk_thermalmeterignore = true;
+               rtlefuse->apk_thermalmeterignore = true;
 
        rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
        RTPRINT(rtlpriv, FINIT, INIT_TxPower,
@@ -1625,7 +1612,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
 
        rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
        rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
-       rtlefuse->b_txpwr_fromeprom = true;
+       rtlefuse->txpwr_fromeprom = true;
        rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -1668,7 +1655,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
 
        switch (rtlhal->oem_id) {
        case RT_CID_819x_HP:
-               pcipriv->ledctl.bled_opendrain = true;
+               pcipriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819x_Lenovo:
        case RT_CID_DEFAULT:
@@ -1693,10 +1680,10 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
 
        rtlhal->version = _rtl92ce_read_chip_version(hw);
        if (get_rf_type(rtlphy) == RF_1T1R)
-               rtlpriv->dm.brfpath_rxenable[0] = true;
+               rtlpriv->dm.rfpath_rxenable[0] = true;
        else
-               rtlpriv->dm.brfpath_rxenable[0] =
-                   rtlpriv->dm.brfpath_rxenable[1] = true;
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
                                                rtlhal->version));
        tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
@@ -1725,18 +1712,18 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
        u32 ratr_value = (u32) mac->basic_rates;
-       u8 *p_mcsrate = mac->mcs;
+       u8 *mcsrate = mac->mcs;
        u8 ratr_index = 0;
-       u8 b_nmode = mac->ht_enable;
+       u8 nmode = mac->ht_enable;
        u8 mimo_ps = 1;
        u16 shortgi_rate;
        u32 tmp_ratr_value;
-       u8 b_curtxbw_40mhz = mac->bw_40;
-       u8 b_curshortgi_40mhz = mac->sgi_40;
-       u8 b_curshortgi_20mhz = mac->sgi_20;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
        enum wireless_mode wirelessmode = mac->mode;
 
-       ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12;
+       ratr_value |= ((*(u16 *) (mcsrate))) << 12;
 
        switch (wirelessmode) {
        case WIRELESS_MODE_B:
@@ -1750,7 +1737,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
                break;
        case WIRELESS_MODE_N_24G:
        case WIRELESS_MODE_N_5G:
-               b_nmode = 1;
+               nmode = 1;
                if (mimo_ps == 0) {
                        ratr_value &= 0x0007F005;
                } else {
@@ -1776,9 +1763,8 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
 
        ratr_value &= 0x0FFFFFFF;
 
-       if (b_nmode && ((b_curtxbw_40mhz &&
-                        b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
-                                                b_curshortgi_20mhz))) {
+       if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz &&
+                      curshortgi_20mhz))) {
 
                ratr_value |= 0x10000000;
                tmp_ratr_value = (ratr_value >> 12);
@@ -1806,11 +1792,11 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
        u32 ratr_bitmap = (u32) mac->basic_rates;
        u8 *p_mcsrate = mac->mcs;
        u8 ratr_index;
-       u8 b_curtxbw_40mhz = mac->bw_40;
-       u8 b_curshortgi_40mhz = mac->sgi_40;
-       u8 b_curshortgi_20mhz = mac->sgi_20;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
        enum wireless_mode wirelessmode = mac->mode;
-       bool b_shortgi = false;
+       bool shortgi = false;
        u8 rate_mask[5];
        u8 macid = 0;
        u8 mimops = 1;
@@ -1852,7 +1838,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
                } else {
                        if (rtlphy->rf_type == RF_1T2R ||
                            rtlphy->rf_type == RF_1T1R) {
-                               if (b_curtxbw_40mhz) {
+                               if (curtxbw_40mhz) {
                                        if (rssi_level == 1)
                                                ratr_bitmap &= 0x000f0000;
                                        else if (rssi_level == 2)
@@ -1868,7 +1854,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
                                                ratr_bitmap &= 0x000ff005;
                                }
                        } else {
-                               if (b_curtxbw_40mhz) {
+                               if (curtxbw_40mhz) {
                                        if (rssi_level == 1)
                                                ratr_bitmap &= 0x0f0f0000;
                                        else if (rssi_level == 2)
@@ -1886,13 +1872,13 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
                        }
                }
 
-               if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
-                   (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
+               if ((curtxbw_40mhz && curshortgi_40mhz) ||
+                   (!curtxbw_40mhz && curshortgi_20mhz)) {
 
                        if (macid == 0)
-                               b_shortgi = true;
+                               shortgi = true;
                        else if (macid == 1)
-                               b_shortgi = false;
+                               shortgi = false;
                }
                break;
        default:
@@ -1906,9 +1892,9 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
        }
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
                 ("ratr_bitmap :%x\n", ratr_bitmap));
-       *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
-                                      (ratr_index << 28));
-       rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
+       *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+                                      (ratr_index << 28);
+       rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
                                                 "ratr_val:%x, %x:%x:%x:%x:%x\n",
                                                 ratr_index, ratr_bitmap,
@@ -1940,13 +1926,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
        u8 u1tmp;
-       bool b_actuallyset = false;
+       bool actuallyset = false;
        unsigned long flag;
 
        if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
                return false;
 
-       if (ppsc->b_swrf_processing)
+       if (ppsc->swrf_processing)
                return false;
 
        spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
@@ -1972,24 +1958,24 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
        u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
        e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
 
-       if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+       if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
                RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                         ("GPIOChangeRF  - HW Radio ON, RF ON\n"));
 
                e_rfpowerstate_toset = ERFON;
-               ppsc->b_hwradiooff = false;
-               b_actuallyset = true;
-       } else if ((ppsc->b_hwradiooff == false)
+               ppsc->hwradiooff = false;
+               actuallyset = true;
+       } else if ((ppsc->hwradiooff == false)
                   && (e_rfpowerstate_toset == ERFOFF)) {
                RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
                         ("GPIOChangeRF  - HW Radio OFF, RF OFF\n"));
 
                e_rfpowerstate_toset = ERFOFF;
-               ppsc->b_hwradiooff = true;
-               b_actuallyset = true;
+               ppsc->hwradiooff = true;
+               actuallyset = true;
        }
 
-       if (b_actuallyset) {
+       if (actuallyset) {
                if (e_rfpowerstate_toset == ERFON) {
                        if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
                            RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
@@ -2028,7 +2014,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
        }
 
        *valid = 1;
-       return !ppsc->b_hwradiooff;
+       return !ppsc->hwradiooff;
 
 }
 
index 305c819c8c78b4807caf8ccd029f36bd0807c3dd..a3dfdb6351688c4fb3a4075e33fc96d040e36ecc 100644 (file)
@@ -30,6 +30,8 @@
 #ifndef __RTL92CE_HW_H__
 #define __RTL92CE_HW_H__
 
+#define H2C_RA_MASK    6
+
 void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -53,5 +55,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
 void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
                     u8 *p_macaddr, bool is_group, u8 enc_algo,
                     bool is_wepkey, bool clear_all);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+                        u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
 
 #endif
index 78a0569208eada11cc342070998bf4b26b46694d..7b1da8d7508f2416dce677bbe214d90f6b2011f0 100644 (file)
@@ -57,7 +57,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                         ("switch case not process\n"));
                break;
        }
-       pled->b_ledon = true;
+       pled->ledon = true;
 }
 
 void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -76,7 +76,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.bled_opendrain == true)
+               if (pcipriv->ledctl.led_opendrain == true)
                        rtl_write_byte(rtlpriv, REG_LEDCFG2,
                                       (ledcfg | BIT(1) | BIT(5) | BIT(6)));
                else
@@ -92,7 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                         ("switch case not process\n"));
                break;
        }
-       pled->b_ledon = false;
+       pled->ledon = false;
 }
 
 void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
index 45044117139a779de9f16f6dacc80041576cb482..d0541e8c6012eb80bf0ca046f9413610c2d105c3 100644 (file)
 #include "../ps.h"
 #include "reg.h"
 #include "def.h"
+#include "hw.h"
 #include "phy.h"
 #include "rf.h"
 #include "dm.h"
 #include "table.h"
 
-static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset);
-static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data);
-static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset);
-static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data);
-static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
-static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
-                                                 u8 configtype);
-static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
-                                                   u8 configtype);
-static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
-static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
-                                            u32 cmdtableidx, u32 cmdtablesz,
-                                            enum swchnlcmd_id cmdid, u32 para1,
-                                            u32 para2, u32 msdelay);
-static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
-                                            u8 channel, u8 *stage, u8 *step,
-                                            u32 *delay);
-static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
-                                      enum wireless_mode wirelessmode,
-                                      long power_indbm);
-static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
-                                             enum radio_path rfpath);
-static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
-                                        enum wireless_mode wirelessmode,
-                                        u8 txpwridx);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 returnvalue, originalvalue, bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
-                                              "bitmask(%#x)\n", regaddr,
-                                              bitmask));
-       originalvalue = rtl_read_dword(rtlpriv, regaddr);
-       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
-       returnvalue = (originalvalue & bitmask) >> bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
-                                              "Addr[0x%x]=0x%x\n", bitmask,
-                                              regaddr, originalvalue));
-
-       return returnvalue;
-
-}
-
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
-                          u32 regaddr, u32 bitmask, u32 data)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 originalvalue, bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
-                                              " data(%#x)\n", regaddr, bitmask,
-                                              data));
-
-       if (bitmask != MASKDWORD) {
-               originalvalue = rtl_read_dword(rtlpriv, regaddr);
-               bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
-               data = ((originalvalue & (~bitmask)) | (data << bitshift));
-       }
-
-       rtl_write_dword(rtlpriv, regaddr, data);
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
-                                              " data(%#x)\n", regaddr, bitmask,
-                                              data));
-
-}
-
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
                            enum radio_path rfpath, u32 regaddr, u32 bitmask)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -149,7 +73,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
        return readback_value;
 }
 
-void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
                           enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data)
 {
@@ -197,137 +121,25 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
                                               bitmask, data, rfpath));
 }
 
-static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset)
-{
-       RT_ASSERT(false, ("deprecated!\n"));
-       return 0;
-}
-
-static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data)
-{
-       RT_ASSERT(false, ("deprecated!\n"));
-}
-
-static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-       u32 newoffset;
-       u32 tmplong, tmplong2;
-       u8 rfpi_enable = 0;
-       u32 retvalue;
-
-       offset &= 0x3f;
-       newoffset = offset;
-       if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
-               return 0xFFFFFFFF;
-       }
-       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
-       if (rfpath == RF90_PATH_A)
-               tmplong2 = tmplong;
-       else
-               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
-       tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
-           (newoffset << 23) | BLSSIREADEDGE;
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
-                     tmplong & (~BLSSIREADEDGE));
-       mdelay(1);
-       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
-       mdelay(1);
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
-                     tmplong | BLSSIREADEDGE);
-       mdelay(1);
-       if (rfpath == RF90_PATH_A)
-               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
-                                                BIT(8));
-       else if (rfpath == RF90_PATH_B)
-               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
-                                                BIT(8));
-       if (rfpi_enable)
-               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
-                                        BLSSIREADBACKDATA);
-       else
-               retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
-                                        BLSSIREADBACKDATA);
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
-                                              rfpath, pphyreg->rflssi_readback,
-                                              retvalue));
-       return retvalue;
-}
-
-static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data)
-{
-       u32 data_and_addr;
-       u32 newoffset;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
-       if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
-               return;
-       }
-       offset &= 0x3f;
-       newoffset = offset;
-       data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
-       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
-                                              rfpath, pphyreg->rf3wire_offset,
-                                              data_and_addr));
-}
-
-static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
-{
-       u32 i;
-
-       for (i = 0; i <= 31; i++) {
-               if (((bitmask >> i) & 0x1) == 1)
-                       break;
-       }
-       return i;
-}
-
-static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
-{
-       rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
-       rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
-       rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
-       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
-       rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
-       rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
-       rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
-}
-
-bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        bool is92c = IS_92C_SERIAL(rtlhal->version);
-       bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw);
+       bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
 
        if (is92c)
                rtl_write_byte(rtlpriv, 0x14, 0x71);
        return rtstatus;
 }
 
-bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
 {
        bool rtstatus = true;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u16 regval;
        u32 regvaldw;
-       u8 b_reg_hwparafile = 1;
+       u8 reg_hwparafile = 1;
 
        _rtl92c_phy_init_bb_rf_register_definition(hw);
        regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
@@ -342,56 +154,12 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
        regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
        rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
-       if (b_reg_hwparafile == 1)
+       if (reg_hwparafile == 1)
                rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
        return rtstatus;
 }
 
-bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
-{
-       return rtl92c_phy_rf6052_config(hw);
-}
-
-static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       bool rtstatus;
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
-       rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
-                                                BASEBAND_CONFIG_PHY_REG);
-       if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
-               return false;
-       }
-       if (rtlphy->rf_type == RF_1T2R) {
-               _rtl92c_phy_bb_config_1t(hw);
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
-       }
-       if (rtlefuse->autoload_failflag == false) {
-               rtlphy->pwrgroup_cnt = 0;
-               rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
-                                                  BASEBAND_CONFIG_PHY_REG);
-       }
-       if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
-               return false;
-       }
-       rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
-                                                BASEBAND_CONFIG_AGC_TAB);
-       if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
-               return false;
-       }
-       rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
-                                               RFPGA0_XA_HSSIPARAMETER2,
-                                               0x200));
-       return true;
-}
-
-static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u32 i;
@@ -408,11 +176,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
        return true;
 }
 
-void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
-{
-}
-
-static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                                  u8 configtype)
 {
        int i;
@@ -456,7 +220,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                  phy_regarray_table[i],
                                  phy_regarray_table[i + 1]));
                }
-               rtl92c_phy_config_bb_external_pa(hw);
        } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
                for (i = 0; i < agctab_arraylen; i = i + 2) {
                        rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
@@ -472,175 +235,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
        return true;
 }
 
-static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
-                                                  u32 regaddr, u32 bitmask,
-                                                  u32 data)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       if (regaddr == RTXAGC_A_RATE18_06) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][0]));
-       }
-       if (regaddr == RTXAGC_A_RATE54_24) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][1]));
-       }
-       if (regaddr == RTXAGC_A_CCK1_MCS32) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][6]));
-       }
-       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][7]));
-       }
-       if (regaddr == RTXAGC_A_MCS03_MCS00) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][2]));
-       }
-       if (regaddr == RTXAGC_A_MCS07_MCS04) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][3]));
-       }
-       if (regaddr == RTXAGC_A_MCS11_MCS08) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][4]));
-       }
-       if (regaddr == RTXAGC_A_MCS15_MCS12) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][5]));
-       }
-       if (regaddr == RTXAGC_B_RATE18_06) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
-                   data;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][8]));
-       }
-       if (regaddr == RTXAGC_B_RATE54_24) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][9]));
-       }
-
-       if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][14]));
-       }
-
-       if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][15]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS03_MCS00) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][10]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS07_MCS04) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][11]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS11_MCS08) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][12]));
-       }
-
-       if (regaddr == RTXAGC_B_MCS15_MCS12) {
-               rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
-                   data;
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                        ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
-                         rtlphy->pwrgroup_cnt,
-                         rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
-                                                           pwrgroup_cnt][13]));
-
-               rtlphy->pwrgroup_cnt++;
-       }
-}
-
-static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
                                                    u8 configtype)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -679,13 +274,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
        return true;
 }
 
-static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
-                                             enum radio_path rfpath)
-{
-       return true;
-}
-
-bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath)
 {
 
@@ -740,7 +329,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                udelay(1);
                        }
                }
-               _rtl92c_phy_config_rf_external_pa(hw, rfpath);
                break;
        case RF90_PATH_B:
                for (i = 0; i < radiob_arraylen; i = i + 2) {
@@ -776,346 +364,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        return true;
 }
 
-void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->default_initialgain[0] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
-       rtlphy->default_initialgain[1] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
-       rtlphy->default_initialgain[2] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
-       rtlphy->default_initialgain[3] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                ("Default initial gain (c50=0x%x, "
-                 "c58=0x%x, c60=0x%x, c68=0x%x\n",
-                 rtlphy->default_initialgain[0],
-                 rtlphy->default_initialgain[1],
-                 rtlphy->default_initialgain[2],
-                 rtlphy->default_initialgain[3]));
-
-       rtlphy->framesync = (u8) rtl_get_bbreg(hw,
-                                              ROFDM0_RXDETECTOR3, MASKBYTE0);
-       rtlphy->framesync_c34 = rtl_get_bbreg(hw,
-                                             ROFDM0_RXDETECTOR2, MASKDWORD);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                ("Default framesync (0x%x) = 0x%x\n",
-                 ROFDM0_RXDETECTOR3, rtlphy->framesync));
-}
-
-static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
-           RFPGA0_XA_LSSIPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
-           RFPGA0_XB_LSSIPARAMETER;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
-       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
-       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
-           RFPGA0_XAB_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
-           RFPGA0_XAB_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
-           RFPGA0_XCD_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
-           RFPGA0_XCD_SWITCHCONTROL;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
-           ROFDM0_XARXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
-           ROFDM0_XBRXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
-           ROFDM0_XCRXIQIMBANLANCE;
-       rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
-           ROFDM0_XDRXIQIMBALANCE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
-       rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
-       rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
-           ROFDM0_XATXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
-           ROFDM0_XBTXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
-           ROFDM0_XCTXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
-           ROFDM0_XDTXIQIMBALANCE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
-           RFPGA0_XA_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
-           RFPGA0_XB_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
-           RFPGA0_XC_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
-           RFPGA0_XD_LSSIREADBACK;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
-           TRANSCEIVEA_HSPI_READBACK;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
-           TRANSCEIVEB_HSPI_READBACK;
-
-}
-
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 txpwr_level;
-       long txpwr_dbm;
-
-       txpwr_level = rtlphy->cur_cck_txpwridx;
-       txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
-                                                WIRELESS_MODE_B, txpwr_level);
-       txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
-           rtlefuse->legacy_ht_txpowerdiff;
-       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
-                                        WIRELESS_MODE_G,
-                                        txpwr_level) > txpwr_dbm)
-               txpwr_dbm =
-                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
-                                                txpwr_level);
-       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
-       if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
-                                        WIRELESS_MODE_N_24G,
-                                        txpwr_level) > txpwr_dbm)
-               txpwr_dbm =
-                   _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
-                                                txpwr_level);
-       *powerlevel = txpwr_dbm;
-}
-
-static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
-                                     u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 index = (channel - 1);
-
-       cckpowerlevel[RF90_PATH_A] =
-           rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
-       cckpowerlevel[RF90_PATH_B] =
-           rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
-       if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
-               ofdmpowerlevel[RF90_PATH_A] =
-                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
-               ofdmpowerlevel[RF90_PATH_B] =
-                   rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
-       } else if (get_rf_type(rtlphy) == RF_2T2R) {
-               ofdmpowerlevel[RF90_PATH_A] =
-                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
-               ofdmpowerlevel[RF90_PATH_B] =
-                   rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
-       }
-}
-
-static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
-                                        u8 channel, u8 *cckpowerlevel,
-                                        u8 *ofdmpowerlevel)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
-       rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
-}
-
-void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
-{
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 cckpowerlevel[2], ofdmpowerlevel[2];
-
-       if (rtlefuse->b_txpwr_fromeprom == false)
-               return;
-       _rtl92c_get_txpower_index(hw, channel,
-                                 &cckpowerlevel[0], &ofdmpowerlevel[0]);
-       _rtl92c_ccxpower_index_check(hw,
-                                    channel, &cckpowerlevel[0],
-                                    &ofdmpowerlevel[0]);
-       rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
-       rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
-}
-
-bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-       u8 idx;
-       u8 rf_path;
-
-       u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
-                                                     WIRELESS_MODE_B,
-                                                     power_indbm);
-       u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
-                                                      WIRELESS_MODE_N_24G,
-                                                      power_indbm);
-       if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
-               ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
-       else
-               ofdmtxpwridx = 0;
-       RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
-                ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
-                 power_indbm, ccktxpwridx, ofdmtxpwridx));
-       for (idx = 0; idx < 14; idx++) {
-               for (rf_path = 0; rf_path < 2; rf_path++) {
-                       rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
-                       rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
-                           ofdmtxpwridx;
-                       rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
-                           ofdmtxpwridx;
-               }
-       }
-       rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
-       return true;
-}
-
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
-{
-}
-
-static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
-                                      enum wireless_mode wirelessmode,
-                                      long power_indbm)
-{
-       u8 txpwridx;
-       long offset;
-
-       switch (wirelessmode) {
-       case WIRELESS_MODE_B:
-               offset = -7;
-               break;
-       case WIRELESS_MODE_G:
-       case WIRELESS_MODE_N_24G:
-               offset = -8;
-               break;
-       default:
-               offset = -8;
-               break;
-       }
-
-       if ((power_indbm - offset) > 0)
-               txpwridx = (u8) ((power_indbm - offset) * 2);
-       else
-               txpwridx = 0;
-
-       if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
-               txpwridx = MAX_TXPWR_IDX_NMODE_92S;
-
-       return txpwridx;
-}
-
-static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
-                                        enum wireless_mode wirelessmode,
-                                        u8 txpwridx)
-{
-       long offset;
-       long pwrout_dbm;
-
-       switch (wirelessmode) {
-       case WIRELESS_MODE_B:
-               offset = -7;
-               break;
-       case WIRELESS_MODE_G:
-       case WIRELESS_MODE_N_24G:
-               offset = -8;
-               break;
-       default:
-               offset = -8;
-               break;
-       }
-       pwrout_dbm = txpwridx / 2 + offset;
-       return pwrout_dbm;
-}
-
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       enum io_type iotype;
-
-       if (!is_hal_stop(rtlhal)) {
-               switch (operation) {
-               case SCAN_OPT_BACKUP:
-                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-
-                       break;
-               case SCAN_OPT_RESTORE:
-                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("Unknown Scan Backup operation.\n"));
-                       break;
-               }
-       }
-}
-
-void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1183,656 +432,18 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
 }
 
-void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
-                           enum nl80211_channel_type ch_type)
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
 {
+       u8 tmpreg;
+       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       u8 tmp_bw = rtlphy->current_chan_bw;
 
-       if (rtlphy->set_bwmode_inprogress)
-               return;
-       rtlphy->set_bwmode_inprogress = true;
-       if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
-               rtl92c_phy_set_bw_mode_callback(hw);
-       else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                        ("FALSE driver sleep or unload\n"));
-               rtlphy->set_bwmode_inprogress = false;
-               rtlphy->current_chan_bw = tmp_bw;
-       }
-}
+       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
 
-void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u32 delay;
-
-       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
-                ("switch to channel%d\n", rtlphy->current_channel));
-       if (is_hal_stop(rtlhal))
-               return;
-       do {
-               if (!rtlphy->sw_chnl_inprogress)
-                       break;
-               if (!_rtl92c_phy_sw_chnl_step_by_step
-                   (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
-                    &rtlphy->sw_chnl_step, &delay)) {
-                       if (delay > 0)
-                               mdelay(delay);
-                       else
-                               continue;
-               } else
-                       rtlphy->sw_chnl_inprogress = false;
-               break;
-       } while (true);
-       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
-}
-
-u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (rtlphy->sw_chnl_inprogress)
-               return 0;
-       if (rtlphy->set_bwmode_inprogress)
-               return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 ("WIRELESS_MODE_G but channel>14"));
-       rtlphy->sw_chnl_inprogress = true;
-       rtlphy->sw_chnl_stage = 0;
-       rtlphy->sw_chnl_step = 0;
-       if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
-               rtl92c_phy_sw_chnl_callback(hw);
-               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        ("sw_chnl_inprogress false schdule workitem\n"));
-               rtlphy->sw_chnl_inprogress = false;
-       } else {
-               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        ("sw_chnl_inprogress false driver sleep or"
-                         " unload\n"));
-               rtlphy->sw_chnl_inprogress = false;
-       }
-       return 1;
-}
-
-static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
-                                            u8 channel, u8 *stage, u8 *step,
-                                            u32 *delay)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
-       u32 precommoncmdcnt;
-       struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
-       u32 postcommoncmdcnt;
-       struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
-       u32 rfdependcmdcnt;
-       struct swchnlcmd *currentcmd = NULL;
-       u8 rfpath;
-       u8 num_total_rfpath = rtlphy->num_total_rfpath;
-
-       precommoncmdcnt = 0;
-       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
-                                        MAX_PRECMD_CNT,
-                                        CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
-       _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
-                                        MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
-
-       postcommoncmdcnt = 0;
-
-       _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
-                                        MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
-
-       rfdependcmdcnt = 0;
-
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 ("illegal channel for Zebra: %d\n", channel));
-
-       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
-                                        MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
-                                        RF_CHNLBW, channel, 10);
-
-       _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
-                                        MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
-                                        0);
-
-       do {
-               switch (*stage) {
-               case 0:
-                       currentcmd = &precommoncmd[*step];
-                       break;
-               case 1:
-                       currentcmd = &rfdependcmd[*step];
-                       break;
-               case 2:
-                       currentcmd = &postcommoncmd[*step];
-                       break;
-               }
-
-               if (currentcmd->cmdid == CMDID_END) {
-                       if ((*stage) == 2) {
-                               return true;
-                       } else {
-                               (*stage)++;
-                               (*step) = 0;
-                               continue;
-                       }
-               }
-
-               switch (currentcmd->cmdid) {
-               case CMDID_SET_TXPOWEROWER_LEVEL:
-                       rtl92c_phy_set_txpower_level(hw, channel);
-                       break;
-               case CMDID_WRITEPORT_ULONG:
-                       rtl_write_dword(rtlpriv, currentcmd->para1,
-                                       currentcmd->para2);
-                       break;
-               case CMDID_WRITEPORT_USHORT:
-                       rtl_write_word(rtlpriv, currentcmd->para1,
-                                      (u16) currentcmd->para2);
-                       break;
-               case CMDID_WRITEPORT_UCHAR:
-                       rtl_write_byte(rtlpriv, currentcmd->para1,
-                                      (u8) currentcmd->para2);
-                       break;
-               case CMDID_RF_WRITEREG:
-                       for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
-                               rtlphy->rfreg_chnlval[rfpath] =
-                                   ((rtlphy->rfreg_chnlval[rfpath] &
-                                     0xfffffc00) | currentcmd->para2);
-
-                               rtl_set_rfreg(hw, (enum radio_path)rfpath,
-                                             currentcmd->para1,
-                                             RFREG_OFFSET_MASK,
-                                             rtlphy->rfreg_chnlval[rfpath]);
-                       }
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("switch case not process\n"));
-                       break;
-               }
-
-               break;
-       } while (true);
-
-       (*delay) = currentcmd->msdelay;
-       (*step)++;
-       return false;
-}
-
-static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
-                                            u32 cmdtableidx, u32 cmdtablesz,
-                                            enum swchnlcmd_id cmdid,
-                                            u32 para1, u32 para2, u32 msdelay)
-{
-       struct swchnlcmd *pcmd;
-
-       if (cmdtable == NULL) {
-               RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
-               return false;
-       }
-
-       if (cmdtableidx >= cmdtablesz)
-               return false;
-
-       pcmd = cmdtable + cmdtableidx;
-       pcmd->cmdid = cmdid;
-       pcmd->para1 = para1;
-       pcmd->para2 = para2;
-       pcmd->msdelay = msdelay;
-       return true;
-}
-
-bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
-{
-       return true;
-}
-
-static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
-{
-       u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
-       u8 result = 0x00;
-
-       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
-       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
-       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
-       rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
-                     config_pathb ? 0x28160202 : 0x28160502);
-
-       if (config_pathb) {
-               rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
-               rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
-       }
-
-       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
-       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
-       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
-
-       mdelay(IQK_DELAY_TIME);
-
-       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
-       reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
-       reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
-       reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
-
-       if (!(reg_eac & BIT(28)) &&
-           (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
-           (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
-               result |= 0x01;
-       else
-               return result;
-
-       if (!(reg_eac & BIT(27)) &&
-           (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
-           (((reg_eac & 0x03FF0000) >> 16) != 0x36))
-               result |= 0x02;
-       return result;
-}
-
-static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
-{
-       u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
-       u8 result = 0x00;
-
-       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
-       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
-       mdelay(IQK_DELAY_TIME);
-       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
-       reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
-       reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
-       reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
-       reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
-       if (!(reg_eac & BIT(31)) &&
-           (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
-           (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
-               result |= 0x01;
-       else
-               return result;
-
-       if (!(reg_eac & BIT(30)) &&
-           (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
-           (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
-               result |= 0x02;
-       return result;
-}
-
-static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
-                                              bool b_iqk_ok, long result[][8],
-                                              u8 final_candidate, bool btxonly)
-{
-       u32 oldval_0, x, tx0_a, reg;
-       long y, tx0_c;
-
-       if (final_candidate == 0xFF)
-               return;
-       else if (b_iqk_ok) {
-               oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                         MASKDWORD) >> 22) & 0x3FF;
-               x = result[final_candidate][0];
-               if ((x & 0x00000200) != 0)
-                       x = x | 0xFFFFFC00;
-               tx0_a = (x * oldval_0) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
-                             ((x * oldval_0 >> 7) & 0x1));
-               y = result[final_candidate][1];
-               if ((y & 0x00000200) != 0)
-                       y = y | 0xFFFFFC00;
-               tx0_c = (y * oldval_0) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
-                             ((tx0_c & 0x3C0) >> 6));
-               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
-                             (tx0_c & 0x3F));
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
-                             ((y * oldval_0 >> 7) & 0x1));
-               if (btxonly)
-                       return;
-               reg = result[final_candidate][2];
-               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
-               reg = result[final_candidate][3] & 0x3F;
-               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
-               reg = (result[final_candidate][3] >> 6) & 0xF;
-               rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
-       }
-}
-
-static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
-                                              bool b_iqk_ok, long result[][8],
-                                              u8 final_candidate, bool btxonly)
-{
-       u32 oldval_1, x, tx1_a, reg;
-       long y, tx1_c;
-
-       if (final_candidate == 0xFF)
-               return;
-       else if (b_iqk_ok) {
-               oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
-                                         MASKDWORD) >> 22) & 0x3FF;
-               x = result[final_candidate][4];
-               if ((x & 0x00000200) != 0)
-                       x = x | 0xFFFFFC00;
-               tx1_a = (x * oldval_1) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
-                             ((x * oldval_1 >> 7) & 0x1));
-               y = result[final_candidate][5];
-               if ((y & 0x00000200) != 0)
-                       y = y | 0xFFFFFC00;
-               tx1_c = (y * oldval_1) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
-                             ((tx1_c & 0x3C0) >> 6));
-               rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
-                             (tx1_c & 0x3F));
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
-                             ((y * oldval_1 >> 7) & 0x1));
-               if (btxonly)
-                       return;
-               reg = result[final_candidate][6];
-               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
-               reg = result[final_candidate][7] & 0x3F;
-               rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
-               reg = (result[final_candidate][7] >> 6) & 0xF;
-               rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
-       }
-}
-
-static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
-                                           u32 *addareg, u32 *addabackup,
-                                           u32 registernum)
-{
-       u32 i;
-
-       for (i = 0; i < registernum; i++)
-               addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
-}
-
-static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
-                                          u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
-               macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
-       macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
-static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
-                                             u32 *addareg, u32 *addabackup,
-                                             u32 regiesternum)
-{
-       u32 i;
-
-       for (i = 0; i < regiesternum; i++)
-               rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
-}
-
-static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
-                                            u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
-               rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
-       rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
-}
-
-static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
-                                    u32 *addareg, bool is_patha_on, bool is2t)
-{
-       u32 pathOn;
-       u32 i;
-
-       pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
-       if (false == is2t) {
-               pathOn = 0x0bdb25a0;
-               rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
-       } else {
-               rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
-       }
-
-       for (i = 1; i < IQK_ADDA_REG_NUM; i++)
-               rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
-}
-
-static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
-                                               u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       rtl_write_byte(rtlpriv, macreg[0], 0x3F);
-
-       for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
-               rtl_write_byte(rtlpriv, macreg[i],
-                              (u8) (macbackup[i] & (~BIT(3))));
-       rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
-static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
-{
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
-       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-}
-
-static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
-{
-       u32 mode;
-
-       mode = pi_mode ? 0x01000100 : 0x01000000;
-       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
-       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
-}
-
-static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
-                                          long result[][8], u8 c1, u8 c2)
-{
-       u32 i, j, diff, simularity_bitmap, bound;
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       u8 final_candidate[2] = { 0xFF, 0xFF };
-       bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
-
-       if (is2t)
-               bound = 8;
-       else
-               bound = 4;
-
-       simularity_bitmap = 0;
-
-       for (i = 0; i < bound; i++) {
-               diff = (result[c1][i] > result[c2][i]) ?
-                   (result[c1][i] - result[c2][i]) :
-                   (result[c2][i] - result[c1][i]);
-
-               if (diff > MAX_TOLERANCE) {
-                       if ((i == 2 || i == 6) && !simularity_bitmap) {
-                               if (result[c1][i] + result[c1][i + 1] == 0)
-                                       final_candidate[(i / 4)] = c2;
-                               else if (result[c2][i] + result[c2][i + 1] == 0)
-                                       final_candidate[(i / 4)] = c1;
-                               else
-                                       simularity_bitmap = simularity_bitmap |
-                                           (1 << i);
-                       } else
-                               simularity_bitmap =
-                                   simularity_bitmap | (1 << i);
-               }
-       }
-
-       if (simularity_bitmap == 0) {
-               for (i = 0; i < (bound / 4); i++) {
-                       if (final_candidate[i] != 0xFF) {
-                               for (j = i * 4; j < (i + 1) * 4 - 2; j++)
-                                       result[3][j] =
-                                           result[final_candidate[i]][j];
-                               bresult = false;
-                       }
-               }
-               return bresult;
-       } else if (!(simularity_bitmap & 0x0F)) {
-               for (i = 0; i < 4; i++)
-                       result[3][i] = result[c1][i];
-               return false;
-       } else if (!(simularity_bitmap & 0xF0) && is2t) {
-               for (i = 4; i < 8; i++)
-                       result[3][i] = result[c1][i];
-               return false;
-       } else {
-               return false;
-       }
-
-}
-
-static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
-                                    long result[][8], u8 t, bool is2t)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u32 i;
-       u8 patha_ok, pathb_ok;
-       u32 adda_reg[IQK_ADDA_REG_NUM] = {
-               0x85c, 0xe6c, 0xe70, 0xe74,
-               0xe78, 0xe7c, 0xe80, 0xe84,
-               0xe88, 0xe8c, 0xed0, 0xed4,
-               0xed8, 0xedc, 0xee0, 0xeec
-       };
-
-       u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
-               0x522, 0x550, 0x551, 0x040
-       };
-
-       const u32 retrycount = 2;
-
-       u32 bbvalue;
-
-       if (t == 0) {
-               bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
-
-               _rtl92c_phy_save_adda_registers(hw, adda_reg,
-                                               rtlphy->adda_backup, 16);
-               _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
-                                              rtlphy->iqk_mac_backup);
-       }
-       _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
-       if (t == 0) {
-               rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw,
-                                                  RFPGA0_XA_HSSIPARAMETER1,
-                                                  BIT(8));
-       }
-       if (!rtlphy->b_rfpi_enable)
-               _rtl92c_phy_pi_mode_switch(hw, true);
-       if (t == 0) {
-               rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
-               rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
-               rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
-       }
-       rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
-       rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
-       rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
-       if (is2t) {
-               rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
-               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
-       }
-       _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
-                                           rtlphy->iqk_mac_backup);
-       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
-       if (is2t)
-               rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
-       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
-       for (i = 0; i < retrycount; i++) {
-               patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
-               if (patha_ok == 0x03) {
-                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-                       break;
-               } else if (i == (retrycount - 1) && patha_ok == 0x01)
-                       result[t][0] = (rtl_get_bbreg(hw, 0xe94,
-                                                     MASKDWORD) & 0x3FF0000) >>
-                                                     16;
-               result[t][1] =
-                   (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
-
-       }
-
-       if (is2t) {
-               _rtl92c_phy_path_a_standby(hw);
-               _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
-               for (i = 0; i < retrycount; i++) {
-                       pathb_ok = _rtl92c_phy_path_b_iqk(hw);
-                       if (pathb_ok == 0x03) {
-                               result[t][4] = (rtl_get_bbreg(hw,
-                                                     0xeb4,
-                                                     MASKDWORD) &
-                                               0x3FF0000) >> 16;
-                               result[t][5] =
-                                   (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
-                                    0x3FF0000) >> 16;
-                               result[t][6] =
-                                   (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
-                                    0x3FF0000) >> 16;
-                               result[t][7] =
-                                   (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
-                                    0x3FF0000) >> 16;
-                               break;
-                       } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
-                               result[t][4] = (rtl_get_bbreg(hw,
-                                                     0xeb4,
-                                                     MASKDWORD) &
-                                               0x3FF0000) >> 16;
-                       }
-                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
-                                       0x3FF0000) >> 16;
-               }
-       }
-       rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
-       rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
-       rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
-       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
-       if (is2t)
-               rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
-       if (t != 0) {
-               if (!rtlphy->b_rfpi_enable)
-                       _rtl92c_phy_pi_mode_switch(hw, false);
-               _rtl92c_phy_reload_adda_registers(hw, adda_reg,
-                                                 rtlphy->adda_backup, 16);
-               _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
-                                                rtlphy->iqk_mac_backup);
-       }
-}
-
-static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
-{
-       u8 tmpreg;
-       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
-
-       if ((tmpreg & 0x70) != 0)
-               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
-       else
-               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       if ((tmpreg & 0x70) != 0)
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+       else
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
 
        if ((tmpreg & 0x70) != 0) {
                rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
@@ -1866,666 +477,6 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
        }
 }
 
-static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
-                                    char delta, bool is2t)
-{
-       /* This routine is deliberately dummied out for later fixes */
-#if 0
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
-
-       u32 reg_d[PATH_NUM];
-       u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
-
-       u32 bb_backup[APK_BB_REG_NUM];
-       u32 bb_reg[APK_BB_REG_NUM] = {
-               0x904, 0xc04, 0x800, 0xc08, 0x874
-       };
-       u32 bb_ap_mode[APK_BB_REG_NUM] = {
-               0x00000020, 0x00a05430, 0x02040000,
-               0x000800e4, 0x00204000
-       };
-       u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
-               0x00000020, 0x00a05430, 0x02040000,
-               0x000800e4, 0x22204000
-       };
-
-       u32 afe_backup[APK_AFE_REG_NUM];
-       u32 afe_reg[APK_AFE_REG_NUM] = {
-               0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
-               0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
-               0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
-               0xeec
-       };
-
-       u32 mac_backup[IQK_MAC_REG_NUM];
-       u32 mac_reg[IQK_MAC_REG_NUM] = {
-               0x522, 0x550, 0x551, 0x040
-       };
-
-       u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
-               {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
-       };
-
-       u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
-               {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
-       };
-
-       u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
-               {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
-       };
-
-       u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
-               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
-               {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
-       };
-
-       u32 afe_on_off[PATH_NUM] = {
-               0x04db25a4, 0x0b1b25a4
-       };
-
-       u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
-
-       u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
-
-       u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
-
-       u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
-
-       const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
-               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
-               {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
-       };
-
-       const u32 apk_normal_setting_value_1[13] = {
-               0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
-               0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
-               0x12680000, 0x00880000, 0x00880000
-       };
-
-       const u32 apk_normal_setting_value_2[16] = {
-               0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
-               0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
-               0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
-               0x00050006
-       };
-
-       const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
-
-       long bb_offset, delta_v, delta_offset;
-
-       if (!is2t)
-               pathbound = 1;
-
-       for (index = 0; index < PATH_NUM; index++) {
-               apk_offset[index] = apk_normal_offset[index];
-               apk_value[index] = apk_normal_value[index];
-               afe_on_off[index] = 0x6fdb25a4;
-       }
-
-       for (index = 0; index < APK_BB_REG_NUM; index++) {
-               for (path = 0; path < pathbound; path++) {
-                       apk_rf_init_value[path][index] =
-                           apk_normal_rf_init_value[path][index];
-                       apk_rf_value_0[path][index] =
-                           apk_normal_rf_value_0[path][index];
-               }
-               bb_ap_mode[index] = bb_normal_ap_mode[index];
-
-               apkbound = 6;
-       }
-
-       for (index = 0; index < APK_BB_REG_NUM; index++) {
-               if (index == 0)
-                       continue;
-               bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
-       }
-
-       _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
-
-       _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
-
-       for (path = 0; path < pathbound; path++) {
-               if (path == RF90_PATH_A) {
-                       offset = 0xb00;
-                       for (index = 0; index < 11; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-
-                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
-
-                       offset = 0xb68;
-                       for (; index < 13; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
-
-                       offset = 0xb00;
-                       for (index = 0; index < 16; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_2
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-               } else if (path == RF90_PATH_B) {
-                       offset = 0xb70;
-                       for (index = 0; index < 10; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-                       rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
-                       rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
-
-                       offset = 0xb68;
-                       index = 11;
-                       for (; index < 13; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_1
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
-
-                       offset = 0xb60;
-                       for (index = 0; index < 16; index++) {
-                               rtl_set_bbreg(hw, offset, MASKDWORD,
-                                             apk_normal_setting_value_2
-                                             [index]);
-
-                               offset += 0x04;
-                       }
-                       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-               }
-
-               reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
-                                           0xd, MASKDWORD);
-
-               for (index = 0; index < APK_AFE_REG_NUM; index++)
-                       rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
-                                     afe_on_off[path]);
-
-               if (path == RF90_PATH_A) {
-                       for (index = 0; index < APK_BB_REG_NUM; index++) {
-                               if (index == 0)
-                                       continue;
-                               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
-                                             bb_ap_mode[index]);
-                       }
-               }
-
-               _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
-
-               if (path == 0) {
-                       rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
-               } else {
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
-                                     0x10000);
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
-                                     0x1000f);
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
-                                     0x20103);
-               }
-
-               delta_offset = ((delta + 14) / 2);
-               if (delta_offset < 0)
-                       delta_offset = 0;
-               else if (delta_offset > 12)
-                       delta_offset = 12;
-
-               for (index = 0; index < APK_BB_REG_NUM; index++) {
-                       if (index != 1)
-                               continue;
-
-                       tmpreg = apk_rf_init_value[path][index];
-
-                       if (!rtlefuse->b_apk_thermalmeterignore) {
-                               bb_offset = (tmpreg & 0xF0000) >> 16;
-
-                               if (!(tmpreg & BIT(15)))
-                                       bb_offset = -bb_offset;
-
-                               delta_v =
-                                   apk_delta_mapping[index][delta_offset];
-
-                               bb_offset += delta_v;
-
-                               if (bb_offset < 0) {
-                                       tmpreg = tmpreg & (~BIT(15));
-                                       bb_offset = -bb_offset;
-                               } else {
-                                       tmpreg = tmpreg | BIT(15);
-                               }
-
-                               tmpreg =
-                                   (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
-                       }
-
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
-                                     MASKDWORD, 0x8992e);
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
-                                     MASKDWORD, apk_rf_value_0[path][index]);
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
-                                     MASKDWORD, tmpreg);
-
-                       i = 0;
-                       do {
-                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
-                               rtl_set_bbreg(hw, apk_offset[path],
-                                             MASKDWORD, apk_value[0]);
-                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
-                                       ("PHY_APCalibrate() offset 0x%x "
-                                        "value 0x%x\n",
-                                        apk_offset[path],
-                                        rtl_get_bbreg(hw, apk_offset[path],
-                                                      MASKDWORD)));
-
-                               mdelay(3);
-
-                               rtl_set_bbreg(hw, apk_offset[path],
-                                             MASKDWORD, apk_value[1]);
-                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
-                                       ("PHY_APCalibrate() offset 0x%x "
-                                        "value 0x%x\n",
-                                        apk_offset[path],
-                                        rtl_get_bbreg(hw, apk_offset[path],
-                                                      MASKDWORD)));
-
-                               mdelay(20);
-
-                               rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
-
-                               if (path == RF90_PATH_A)
-                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
-                                                              0x03E00000);
-                               else
-                                       tmpreg = rtl_get_bbreg(hw, 0xbd8,
-                                                              0xF8000000);
-
-                               RTPRINT(rtlpriv, FINIT, INIT_IQK,
-                                       ("PHY_APCalibrate() offset "
-                                        "0xbd8[25:21] %x\n", tmpreg));
-
-                               i++;
-
-                       } while (tmpreg > apkbound && i < 4);
-
-                       apk_result[path][index] = tmpreg;
-               }
-       }
-
-       _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
-
-       for (index = 0; index < APK_BB_REG_NUM; index++) {
-               if (index == 0)
-                       continue;
-               rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
-       }
-
-       _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
-
-       for (path = 0; path < pathbound; path++) {
-               rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
-                             MASKDWORD, reg_d[path]);
-
-               if (path == RF90_PATH_B) {
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
-                                     0x1000f);
-                       rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
-                                     0x20101);
-               }
-
-               if (apk_result[path][1] > 6)
-                       apk_result[path][1] = 6;
-       }
-
-       for (path = 0; path < pathbound; path++) {
-               rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
-                             ((apk_result[path][1] << 15) |
-                              (apk_result[path][1] << 10) |
-                              (apk_result[path][1] << 5) |
-                              apk_result[path][1]));
-
-               if (path == RF90_PATH_A)
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
-                                     ((apk_result[path][1] << 15) |
-                                      (apk_result[path][1] << 10) |
-                                      (0x00 << 5) | 0x05));
-               else
-                       rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
-                                     ((apk_result[path][1] << 15) |
-                                      (apk_result[path][1] << 10) |
-                                      (0x02 << 5) | 0x05));
-
-               rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
-                             ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
-                              0x08));
-
-       }
-
-       rtlphy->b_apk_done = true;
-#endif
-}
-
-static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
-                                         bool bmain, bool is2t)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (is_hal_stop(rtlhal)) {
-               rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
-               rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
-       }
-       if (is2t) {
-               if (bmain)
-                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
-                                     BIT(5) | BIT(6), 0x1);
-               else
-                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
-                                     BIT(5) | BIT(6), 0x2);
-       } else {
-               if (bmain)
-                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
-               else
-                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
-
-       }
-}
-
-#undef IQK_ADDA_REG_NUM
-#undef IQK_DELAY_TIME
-
-void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       long result[4][8];
-       u8 i, final_candidate;
-       bool b_patha_ok, b_pathb_ok;
-       long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
-           reg_ecc, reg_tmp = 0;
-       bool is12simular, is13simular, is23simular;
-       bool b_start_conttx = false, b_singletone = false;
-       u32 iqk_bb_reg[10] = {
-               ROFDM0_XARXIQIMBALANCE,
-               ROFDM0_XBRXIQIMBALANCE,
-               ROFDM0_ECCATHRESHOLD,
-               ROFDM0_AGCRSSITABLE,
-               ROFDM0_XATXIQIMBALANCE,
-               ROFDM0_XBTXIQIMBALANCE,
-               ROFDM0_XCTXIQIMBALANCE,
-               ROFDM0_XCTXAFE,
-               ROFDM0_XDTXAFE,
-               ROFDM0_RXIQEXTANTA
-       };
-
-       if (b_recovery) {
-               _rtl92c_phy_reload_adda_registers(hw,
-                                                 iqk_bb_reg,
-                                                 rtlphy->iqk_bb_backup, 10);
-               return;
-       }
-       if (b_start_conttx || b_singletone)
-               return;
-       for (i = 0; i < 8; i++) {
-               result[0][i] = 0;
-               result[1][i] = 0;
-               result[2][i] = 0;
-               result[3][i] = 0;
-       }
-       final_candidate = 0xff;
-       b_patha_ok = false;
-       b_pathb_ok = false;
-       is12simular = false;
-       is23simular = false;
-       is13simular = false;
-       for (i = 0; i < 3; i++) {
-               if (IS_92C_SERIAL(rtlhal->version))
-                       _rtl92c_phy_iq_calibrate(hw, result, i, true);
-               else
-                       _rtl92c_phy_iq_calibrate(hw, result, i, false);
-               if (i == 1) {
-                       is12simular = _rtl92c_phy_simularity_compare(hw,
-                                                                    result, 0,
-                                                                    1);
-                       if (is12simular) {
-                               final_candidate = 0;
-                               break;
-                       }
-               }
-               if (i == 2) {
-                       is13simular = _rtl92c_phy_simularity_compare(hw,
-                                                                    result, 0,
-                                                                    2);
-                       if (is13simular) {
-                               final_candidate = 0;
-                               break;
-                       }
-                       is23simular = _rtl92c_phy_simularity_compare(hw,
-                                                                    result, 1,
-                                                                    2);
-                       if (is23simular)
-                               final_candidate = 1;
-                       else {
-                               for (i = 0; i < 8; i++)
-                                       reg_tmp += result[3][i];
-
-                               if (reg_tmp != 0)
-                                       final_candidate = 3;
-                               else
-                                       final_candidate = 0xFF;
-                       }
-               }
-       }
-       for (i = 0; i < 4; i++) {
-               reg_e94 = result[i][0];
-               reg_e9c = result[i][1];
-               reg_ea4 = result[i][2];
-               reg_eac = result[i][3];
-               reg_eb4 = result[i][4];
-               reg_ebc = result[i][5];
-               reg_ec4 = result[i][6];
-               reg_ecc = result[i][7];
-       }
-       if (final_candidate != 0xff) {
-               rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
-               rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
-               reg_ea4 = result[final_candidate][2];
-               reg_eac = result[final_candidate][3];
-               rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
-               rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
-               reg_ec4 = result[final_candidate][6];
-               reg_ecc = result[final_candidate][7];
-               b_patha_ok = b_pathb_ok = true;
-       } else {
-               rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
-               rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
-       }
-       if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
-               _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result,
-                                                  final_candidate,
-                                                  (reg_ea4 == 0));
-       if (IS_92C_SERIAL(rtlhal->version)) {
-               if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
-                       _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok,
-                                                          result,
-                                                          final_candidate,
-                                                          (reg_ec4 == 0));
-       }
-       _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
-                                       rtlphy->iqk_bb_backup, 10);
-}
-
-void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       bool b_start_conttx = false, b_singletone = false;
-
-       if (b_start_conttx || b_singletone)
-               return;
-       if (IS_92C_SERIAL(rtlhal->version))
-               _rtl92c_phy_lc_calibrate(hw, true);
-       else
-               _rtl92c_phy_lc_calibrate(hw, false);
-}
-
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (rtlphy->b_apk_done)
-               return;
-       if (IS_92C_SERIAL(rtlhal->version))
-               _rtl92c_phy_ap_calibrate(hw, delta, true);
-       else
-               _rtl92c_phy_ap_calibrate(hw, delta, false);
-}
-
-void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
-{
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-
-       if (IS_92C_SERIAL(rtlhal->version))
-               _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
-       else
-               _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
-}
-
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       bool b_postprocessing = false;
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
-                 iotype, rtlphy->set_io_inprogress));
-       do {
-               switch (iotype) {
-               case IO_CMD_RESUME_DM_BY_SCAN:
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                                ("[IO CMD] Resume DM after scan.\n"));
-                       b_postprocessing = true;
-                       break;
-               case IO_CMD_PAUSE_DM_BY_SCAN:
-                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                                ("[IO CMD] Pause DM before scan.\n"));
-                       b_postprocessing = true;
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                ("switch case not process\n"));
-                       break;
-               }
-       } while (false);
-       if (b_postprocessing && !rtlphy->set_io_inprogress) {
-               rtlphy->set_io_inprogress = true;
-               rtlphy->current_io_type = iotype;
-       } else {
-               return false;
-       }
-       rtl92c_phy_set_io(hw);
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
-       return true;
-}
-
-void rtl92c_phy_set_io(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                ("--->Cmd(%#x), set_io_inprogress(%d)\n",
-                 rtlphy->current_io_type, rtlphy->set_io_inprogress));
-       switch (rtlphy->current_io_type) {
-       case IO_CMD_RESUME_DM_BY_SCAN:
-               dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
-               rtl92c_dm_write_dig(hw);
-               rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
-               break;
-       case IO_CMD_PAUSE_DM_BY_SCAN:
-               rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
-               dm_digtable.cur_igvalue = 0x17;
-               rtl92c_dm_write_dig(hw);
-               break;
-       default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        ("switch case not process\n"));
-               break;
-       }
-       rtlphy->set_io_inprogress = false;
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
-                ("<---(%#x)\n", rtlphy->current_io_type));
-}
-
-void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
-}
-
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
-       u32 u4b_tmp;
-       u8 delay = 5;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
-       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
-       rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-       u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
-       while (u4b_tmp != 0 && delay > 0) {
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
-               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-               u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
-               delay--;
-       }
-       if (delay == 0) {
-               rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
-               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
-               RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
-                        ("Switch RF timeout !!!.\n"));
-               return;
-       }
-       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
-       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
 static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                            enum rf_pwrstate rfpwr_state)
 {
@@ -2648,7 +599,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                  jiffies_to_msecs(jiffies -
                                                   ppsc->last_awake_jiffies)));
                        ppsc->last_sleep_jiffies = jiffies;
-                       _rtl92ce_phy_set_rf_sleep(hw);
+                       _rtl92c_phy_set_rf_sleep(hw);
                        break;
                }
        default:
@@ -2663,7 +614,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
        return bresult;
 }
 
-bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                   enum rf_pwrstate rfpwr_state)
 {
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
index ca4daee6e9a8f86c469afe3e51e288c7bfb8b265..a37267e3fc22b2e825652e2e2991daf9d01235c9 100644 (file)
@@ -57,8 +57,6 @@
 #define IQK_MAC_REG_NUM                        4
 
 #define RF90_PATH_MAX                  2
-#define CHANNEL_MAX_NUMBER             14
-#define CHANNEL_GROUP_MAX              3
 
 #define CT_OFFSET_MAC_ADDR             0X16
 
@@ -78,9 +76,7 @@
 #define CT_OFFSET_CUSTOMER_ID          0x7F
 
 #define RTL92C_MAX_PATH_NUM            2
-#define CHANNEL_MAX_NUMBER             14
-#define CHANNEL_GROUP_MAX              3
-
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER        255
 enum swchnlcmd_id {
        CMDID_END,
        CMDID_SET_TXPOWEROWER_LEVEL,
@@ -195,11 +191,11 @@ extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
 extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
                                   enum radio_path rfpath, u32 regaddr,
                                   u32 bitmask);
-extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
                                  enum radio_path rfpath, u32 regaddr,
                                  u32 bitmask, u32 data);
 extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
 extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
 extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
                                                 enum radio_path rfpath);
@@ -227,11 +223,32 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
 extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
                                              u32 rfpath);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                                          enum rf_pwrstate rfpwr_state);
-void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
 void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+                                     enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+                                        enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+                                       enum radio_path rfpath, u32 offset,
+                                       u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+                                                  u32 regaddr, u32 bitmask,
+                                                  u32 data);
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+                                          enum radio_path rfpath, u32 offset,
+                                          u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+                                                  u32 regaddr, u32 bitmask,
+                                                  u32 data);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
 
 #endif
index 875d514652252bea60a58026ca70bb8bbd05bc96..b0868a613841f0f05e6cc316b3eea39b43f1bea8 100644 (file)
 #define REG_LEDCFG3                            0x004F
 #define REG_FSIMR                              0x0050
 #define REG_FSISR                              0x0054
-
+#define REG_HSIMR                              0x0058
+#define REG_HSISR                              0x005c
+
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2                    0x0060
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2                      0x0062
+/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL                    0x0068
 #define REG_MCUFWDL                            0x0080
 
 #define REG_HMEBOX_EXT_0                       0x0088
@@ -79,6 +87,7 @@
 #define REG_PCIE_MIO_INTD                      0x00E8
 #define REG_HPON_FSM                           0x00EC
 #define REG_SYS_CFG                            0x00F0
+#define REG_GPIO_OUTSTS                                0x00F4  /* For RTL8723 only.*/
 
 #define REG_CR                                 0x0100
 #define REG_PBP                                        0x0104
 #define REG_RDG_PIFS                           0x0513
 #define REG_SIFS_CTX                           0x0514
 #define REG_SIFS_TRX                           0x0516
+#define REG_SIFS_CCK                           0x0514
+#define REG_SIFS_OFDM                          0x0516
 #define REG_AGGR_BREAK_TIME                    0x051A
 #define REG_SLOT                               0x051B
 #define REG_TX_PTCL_CTRL                       0x0520
 #define REG_MAC_SPEC_SIFS                      0x063A
 #define REG_RESP_SIFS_CCK                      0x063C
 #define REG_RESP_SIFS_OFDM                     0x063E
+/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS                           0x063C
+/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS                           0x063E
 #define REG_ACKTO                              0x0640
 #define REG_CTS2TO                             0x0641
 #define REG_EIFS                               0x0642
 #define        STOPBE                                  BIT(1)
 #define        STOPBK                                  BIT(0)
 
-#define        RCR_APPFCS                              BIT(31)
+#define        RCR_APP_FCS                             BIT(31)
 #define        RCR_APP_MIC                             BIT(30)
 #define        RCR_APP_ICV                             BIT(29)
+#define        RCR_APP_PHYSTS                          BIT(28)
 #define        RCR_APP_PHYST_RXFF                      BIT(28)
 #define        RCR_APP_BA_SSN                          BIT(27)
 #define        RCR_ENMBID                              BIT(24)
 
 #define        BOOT_FROM_EEPROM                        BIT(4)
 #define        EEPROM_EN                               BIT(5)
+#define        EEPROMSEL                               BOOT_FROM_EEPROM
 
 #define AFE_BGEN                               BIT(0)
 #define AFE_MBEN                               BIT(1)
 #define BD_MAC2                                        BIT(9)
 #define BD_MAC1                                        BIT(10)
 #define IC_MACPHY_MODE                         BIT(11)
+#define BT_FUNC                                        BIT(16)
+#define VENDOR_ID                              BIT(19)
 #define PAD_HWPD_IDN                           BIT(22)
 #define TRP_VAUX_EN                            BIT(23)
 #define TRP_BT_EN                              BIT(24)
 #define BD_HCI_SEL                             BIT(26)
 #define TYPE_ID                                        BIT(27)
 
+/* REG_GPIO_OUTSTS (For RTL8723 only) */
+#define        EFS_HCI_SEL                             (BIT(0)|BIT(1))
+#define        PAD_HCI_SEL                             (BIT(2)|BIT(3))
+#define        HCI_SEL                                 (BIT(4)|BIT(5))
+#define        PKG_SEL_HCI                             BIT(6)
+#define        FEN_GPS                                 BIT(7)
+#define        FEN_BT                                  BIT(8)
+#define        FEN_WL                                  BIT(9)
+#define        FEN_PCI                                 BIT(10)
+#define        FEN_USB                                 BIT(11)
+#define        BTRF_HWPDN_N                            BIT(12)
+#define        WLRF_HWPDN_N                            BIT(13)
+#define        PDN_BT_N                                BIT(14)
+#define        PDN_GPS_N                               BIT(15)
+#define        BT_CTL_HWPDN                            BIT(16)
+#define        GPS_CTL_HWPDN                           BIT(17)
+#define        PPHY_SUSB                               BIT(20)
+#define        UPHY_SUSB                               BIT(21)
+#define        PCI_SUSEN                               BIT(22)
+#define        USB_SUSEN                               BIT(23)
+#define        RF_RL_ID                        (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
 #define CHIP_VER_RTL_MASK                      0xF000
 #define CHIP_VER_RTL_SHIFT                     12
 
 #define _RARF_RC7(x)                           (((x) & 0x1F) << 16)
 #define _RARF_RC8(x)                           (((x) & 0x1F) << 24)
 
-#define AC_PARAM_TXOP_LIMIT_OFFSET             16
+#define AC_PARAM_TXOP_OFFSET                   16
 #define AC_PARAM_ECW_MAX_OFFSET                        12
 #define AC_PARAM_ECW_MIN_OFFSET                        8
 #define AC_PARAM_AIFS_OFFSET                   0
 
 #define        HAL_8192C_HW_GPIO_WPS_BIT               BIT(2)
 
+/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+/* Enable GPIO[9] as WiFi HW PDn source */
+#define        WL_HWPDN_EN                             BIT(0)
+/* WiFi HW PDn polarity control */
+#define        WL_HWPDN_SL                             BIT(1)
+/* WiFi function enable */
+#define        WL_FUNC_EN                              BIT(2)
+/* Enable GPIO[9] as WiFi RF HW PDn source */
+#define        WL_HWROF_EN                             BIT(3)
+/* Enable GPIO[11] as BT HW PDn source */
+#define        BT_HWPDN_EN                             BIT(16)
+/* BT HW PDn polarity control */
+#define        BT_HWPDN_SL                             BIT(17)
+/* BT function enable */
+#define        BT_FUNC_EN                              BIT(18)
+/* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define        BT_HWROF_EN                             BIT(19)
+/* Enable GPIO[10] as GPS HW PDn source */
+#define        GPS_HWPDN_EN                            BIT(20)
+/* GPS HW PDn polarity control */
+#define        GPS_HWPDN_SL                            BIT(21)
+/* GPS function enable */
+#define        GPS_FUNC_EN                             BIT(22)
+
 #define        RPMAC_RESET                             0x100
 #define        RPMAC_TXSTART                           0x104
 #define        RPMAC_TXLEGACYSIG                       0x108
 #define        BTXHTSTBC                               0x30
 #define        BTXHTADVANCECODING                      0x40
 #define        BTXHTSHORTGI                            0x80
-#define        BTXHTNUMBERHT_LT        F               0x300
+#define        BTXHTNUMBERHT_LTF                       0x300
 #define        BTXHTCRC8                               0x3fc00
 #define        BCOUNTERRESET                           0x10000
 #define        BNUMOFOFDMTX                            0xffff
index ffd8e04c40287d48619fddcfd44240e6f3a24053..669b1168dbec8d75ce57477569b6f26efcb01933 100644 (file)
@@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
        }
 }
 
-void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                       u8 *ppowerlevel)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -410,7 +410,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
        }
 }
 
-void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
                                        u8 *ppowerlevel, u8 channel)
 {
        u32 writeVal[2], powerBase0[2], powerBase1[2];
@@ -430,7 +430,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
        }
 }
 
-bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw)
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -484,11 +484,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
 
                switch (rfpath) {
                case RF90_PATH_A:
-                       rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
+                       rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
                                        (enum radio_path) rfpath);
                        break;
                case RF90_PATH_B:
-                       rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
+                       rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
                                        (enum radio_path) rfpath);
                        break;
                case RF90_PATH_C:
index d3014f99bb7b379ce2e3bf04f39f9e8d31e323cb..3aa520c1c171de26bd2a096c5be9a816714fd4bc 100644 (file)
@@ -40,5 +40,8 @@ extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                              u8 *ppowerlevel);
 extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
                                               u8 *ppowerlevel, u8 channel);
-extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+
 #endif
index b366e8862929047fd9442ee2b0173ab9a3722344..b1cc4d44f534b464198e63bfe7612b9da8b1ac1c 100644 (file)
@@ -37,6 +37,7 @@
 #include "phy.h"
 #include "dm.h"
 #include "hw.h"
+#include "rf.h"
 #include "sw.h"
 #include "trx.h"
 #include "led.h"
@@ -46,13 +47,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtlpriv->dm.b_dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
-       rtlpriv->dm.b_disable_framebursting = 0;;
+       rtlpriv->dm.disable_framebursting = 0;
        rtlpriv->dm.thermalvalue = 0;
        rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
 
-       rtlpci->receive_config = (RCR_APPFCS |
+       rtlpci->receive_config = (RCR_APP_FCS |
                                  RCR_AMF |
                                  RCR_ADF |
                                  RCR_APP_MIC |
@@ -122,7 +123,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .switch_channel = rtl92c_phy_sw_chnl,
        .dm_watchdog = rtl92c_dm_watchdog,
        .scan_operation_backup = rtl92c_phy_scan_operation_backup,
-       .set_rf_power_state = rtl92c_phy_set_rf_power_state,
+       .set_rf_power_state = rtl92ce_phy_set_rf_power_state,
        .led_control = rtl92ce_led_control,
        .set_desc = rtl92ce_set_desc,
        .get_desc = rtl92ce_get_desc,
@@ -133,8 +134,17 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .deinit_sw_leds = rtl92ce_deinit_sw_leds,
        .get_bbreg = rtl92c_phy_query_bb_reg,
        .set_bbreg = rtl92c_phy_set_bb_reg,
-       .get_rfreg = rtl92c_phy_query_rf_reg,
-       .set_rfreg = rtl92c_phy_set_rf_reg,
+       .get_rfreg = rtl92ce_phy_query_rf_reg,
+       .set_rfreg = rtl92ce_phy_set_rf_reg,
+       .cmd_send_packet = _rtl92c_cmd_send_packet,
+       .phy_rf6052_config = rtl92ce_phy_rf6052_config,
+       .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
+       .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
+       .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
+       .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
+       .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
+       .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
+       .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
 };
 
 static struct rtl_mod_params rtl92ce_mod_params = {
index de1198c38d4e65a49b3a02014a6206d3e5c66baf..36e657668c1e720d974a07c71a0473b2812eaa2c 100644 (file)
 int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
 void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
 void rtl92c_init_var_map(struct ieee80211_hw *hw);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+                            struct sk_buff *skb);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                        u8 *ppowerlevel, u8 channel);
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                                 u8 configtype);
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                   u8 configtype);
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
 
 #endif
index bf5852f2d634ca0fc0445e8d005a1cf393833c58..aa2b5815600fa8a5db87f37f9fa1eb180bdd1941 100644 (file)
@@ -36,7 +36,7 @@
 #include "trx.h"
 #include "led.h"
 
-static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc,
+static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc,
                                                          unsigned int
                                                          skb_queue)
 {
@@ -245,24 +245,24 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                                       struct rtl_stats *pstats,
                                       struct rx_desc_92c *pdesc,
                                       struct rx_fwinfo_92c *p_drvinfo,
-                                      bool bpacket_match_bssid,
-                                      bool bpacket_toself,
-                                      bool b_packet_beacon)
+                                      bool packet_match_bssid,
+                                      bool packet_toself,
+                                      bool packet_beacon)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct phy_sts_cck_8192s_t *cck_buf;
        s8 rx_pwr_all, rx_pwr[4];
-       u8 rf_rx_num, evm, pwdb_all;
+       u8 evm, pwdb_all, rf_rx_num = 0;
        u8 i, max_spatial_stream;
-       u32 rssi, total_rssi;
+       u32 rssi, total_rssi = 0;
        bool is_cck_rate;
 
        is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
-       pstats->b_packet_matchbssid = bpacket_match_bssid;
-       pstats->b_packet_toself = bpacket_toself;
-       pstats->b_is_cck = is_cck_rate;
-       pstats->b_packet_beacon = b_packet_beacon;
-       pstats->b_is_cck = is_cck_rate;
+       pstats->packet_matchbssid = packet_match_bssid;
+       pstats->packet_toself = packet_toself;
+       pstats->is_cck = is_cck_rate;
+       pstats->packet_beacon = packet_beacon;
+       pstats->is_cck = is_cck_rate;
        pstats->rx_mimo_signalquality[0] = -1;
        pstats->rx_mimo_signalquality[1] = -1;
 
@@ -315,7 +315,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                pstats->rx_pwdb_all = pwdb_all;
                pstats->recvsignalpower = rx_pwr_all;
 
-               if (bpacket_match_bssid) {
+               if (packet_match_bssid) {
                        u8 sq;
                        if (pstats->rx_pwdb_all > 40)
                                sq = 100;
@@ -334,10 +334,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                        pstats->rx_mimo_signalquality[1] = -1;
                }
        } else {
-               rtlpriv->dm.brfpath_rxenable[0] =
-                   rtlpriv->dm.brfpath_rxenable[1] = true;
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
                for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
-                       if (rtlpriv->dm.brfpath_rxenable[i])
+                       if (rtlpriv->dm.rfpath_rxenable[i])
                                rf_rx_num++;
 
                        rx_pwr[i] =
@@ -347,7 +347,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                        rtlpriv->stats.rx_snr_db[i] =
                            (long)(p_drvinfo->rxsnr[i] / 2);
 
-                       if (bpacket_match_bssid)
+                       if (packet_match_bssid)
                                pstats->rx_mimo_signalstrength[i] = (u8) rssi;
                }
 
@@ -366,7 +366,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
                for (i = 0; i < max_spatial_stream; i++) {
                        evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
 
-                       if (bpacket_match_bssid) {
+                       if (packet_match_bssid) {
                                if (i == 0)
                                        pstats->signalquality =
                                            (u8) (evm & 0xff);
@@ -393,7 +393,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
        u8 rfpath;
        u32 last_rssi, tmpval;
 
-       if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+       if (pstats->packet_toself || pstats->packet_beacon) {
                rtlpriv->stats.rssi_calculate_cnt++;
 
                if (rtlpriv->stats.ui_rssi.total_num++ >=
@@ -421,7 +421,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
                pstats->rssi = rtlpriv->stats.signal_strength;
        }
 
-       if (!pstats->b_is_cck && pstats->b_packet_toself) {
+       if (!pstats->is_cck && pstats->packet_toself) {
                for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
                     rfpath++) {
 
@@ -463,7 +463,7 @@ static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
                                               struct rtl_stats *pstats)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       int weighting;
+       int weighting = 0;
 
        if (rtlpriv->stats.recv_signal_power == 0)
                rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
@@ -493,7 +493,7 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
                    rtlpriv->dm.undecorated_smoothed_pwdb;
        }
 
-       if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+       if (pstats->packet_toself || pstats->packet_beacon) {
                if (undecorated_smoothed_pwdb < 0)
                        undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
 
@@ -525,7 +525,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
        u32 last_evm, n_spatialstream, tmpval;
 
        if (pstats->signalquality != 0) {
-               if (pstats->b_packet_toself || pstats->b_packet_beacon) {
+               if (pstats->packet_toself || pstats->packet_beacon) {
 
                        if (rtlpriv->stats.ui_link_quality.total_num++ >=
                            PHY_LINKQUALITY_SLID_WIN_MAX) {
@@ -595,8 +595,8 @@ static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
                                     struct rtl_stats *pcurrent_stats)
 {
 
-       if (!pcurrent_stats->b_packet_matchbssid &&
-           !pcurrent_stats->b_packet_beacon)
+       if (!pcurrent_stats->packet_matchbssid &&
+           !pcurrent_stats->packet_beacon)
                return;
 
        _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
@@ -617,34 +617,36 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        u8 *tmp_buf;
        u8 *praddr;
        u8 *psaddr;
-       u16 fc, type;
-       bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
+       __le16 fc;
+       u16 type, c_fc;
+       bool packet_matchbssid, packet_toself, packet_beacon;
 
        tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
 
        hdr = (struct ieee80211_hdr *)tmp_buf;
-       fc = le16_to_cpu(hdr->frame_control);
+       fc = hdr->frame_control;
+       c_fc = le16_to_cpu(fc);
        type = WLAN_FC_GET_TYPE(fc);
        praddr = hdr->addr1;
        psaddr = hdr->addr2;
 
-       b_packet_matchbssid =
+       packet_matchbssid =
            ((IEEE80211_FTYPE_CTL != type) &&
             (!compare_ether_addr(mac->bssid,
-                                 (fc & IEEE80211_FCTL_TODS) ?
-                                 hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+                                 (c_fc & IEEE80211_FCTL_TODS) ?
+                                 hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
                                  hdr->addr2 : hdr->addr3)) &&
-            (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv));
+            (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
 
-       b_packet_toself = b_packet_matchbssid &&
+       packet_toself = packet_matchbssid &&
            (!compare_ether_addr(praddr, rtlefuse->dev_addr));
 
        if (ieee80211_is_beacon(fc))
-               b_packet_beacon = true;
+               packet_beacon = true;
 
        _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
-                                  b_packet_matchbssid, b_packet_toself,
-                                  b_packet_beacon);
+                                  packet_matchbssid, packet_toself,
+                                  packet_beacon);
 
        _rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
 }
@@ -662,14 +664,14 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
        stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
            RX_DRV_INFO_SIZE_UNIT;
        stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
-       stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
-       stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
-       stats->b_hwerror = (stats->b_crc | stats->b_icv);
+       stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+       stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+       stats->hwerror = (stats->crc | stats->icv);
        stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
        stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
-       stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
-       stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
-       stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+       stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+       stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+       stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
                                   && (GET_RX_DESC_FAGGR(pdesc) == 1));
        stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
        stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
@@ -689,7 +691,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
        if (GET_RX_DESC_RXHT(pdesc))
                rx_status->flag |= RX_FLAG_HT;
 
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
 
        if (stats->decrypted)
                rx_status->flag |= RX_FLAG_DECRYPTED;
@@ -727,27 +729,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       bool b_defaultadapter = true;
-
-       struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
-
+       bool defaultadapter = true;
+       struct ieee80211_sta *sta;
        u8 *pdesc = (u8 *) pdesc_tx;
        struct rtl_tcb_desc tcb_desc;
        u8 *qc = ieee80211_get_qos_ctl(hdr);
        u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
        u16 seq_number;
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
        u8 rate_flag = info->control.rates[0].flags;
 
        enum rtl_desc_qsel fw_qsel =
-           _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control),
-                                           queue_index);
+           _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
 
-       bool b_firstseg = ((hdr->seq_ctrl &
-                           cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+       bool firstseg = ((hdr->seq_ctrl &
+                         cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
 
-       bool b_lastseg = ((hdr->frame_control &
-                          cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+       bool lastseg = ((hdr->frame_control &
+                        cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
 
        dma_addr_t mapping = pci_map_single(rtlpci->pdev,
                                            skb->data, skb->len,
@@ -759,7 +758,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
 
        CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
 
-       if (b_firstseg) {
+       if (firstseg) {
                SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
 
                SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
@@ -774,25 +773,25 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                }
                SET_TX_DESC_SEQ(pdesc, seq_number);
 
-               SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable &&
+               SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable &&
                                                !tcb_desc.
-                                               b_cts_enable) ? 1 : 0));
+                                               cts_enable) ? 1 : 0));
                SET_TX_DESC_HW_RTS_ENABLE(pdesc,
-                                         ((tcb_desc.b_rts_enable
-                                           || tcb_desc.b_cts_enable) ? 1 : 0));
-               SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0));
-               SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0));
+                                         ((tcb_desc.rts_enable
+                                           || tcb_desc.cts_enable) ? 1 : 0));
+               SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+               SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
 
                SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
                SET_TX_DESC_RTS_BW(pdesc, 0);
                SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
                SET_TX_DESC_RTS_SHORT(pdesc,
                                      ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
-                                     (tcb_desc.b_rts_use_shortpreamble ? 1 : 0)
-                                     : (tcb_desc.b_rts_use_shortgi ? 1 : 0)));
+                                     (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+                                     : (tcb_desc.rts_use_shortgi ? 1 : 0)));
 
                if (mac->bw_40) {
-                       if (tcb_desc.b_packet_bw) {
+                       if (tcb_desc.packet_bw) {
                                SET_TX_DESC_DATA_BW(pdesc, 1);
                                SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
                        } else {
@@ -811,10 +810,13 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_LINIP(pdesc, 0);
                SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
 
+               rcu_read_lock();
+               sta = ieee80211_find_sta(mac->vif, mac->bssid);
                if (sta) {
                        u8 ampdu_density = sta->ht_cap.ampdu_density;
                        SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
                }
+               rcu_read_unlock();
 
                if (info->control.hw_key) {
                        struct ieee80211_key_conf *keyconf =
@@ -854,14 +856,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                }
        }
 
-       SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
-       SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
+       SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+       SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
 
        SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
 
        SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
 
-       if (rtlpriv->dm.b_useramask) {
+       if (rtlpriv->dm.useramask) {
                SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
                SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
        } else {
@@ -869,16 +871,16 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
        }
 
-       if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps &&
-           ppsc->b_fwctrl_lps) {
+       if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+           ppsc->fwctrl_lps) {
                SET_TX_DESC_HWSEQ_EN(pdesc, 1);
                SET_TX_DESC_PKT_ID(pdesc, 8);
 
-               if (!b_defaultadapter)
+               if (!defaultadapter)
                        SET_TX_DESC_QOS(pdesc, 1);
        }
 
-       SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
+       SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
 
        if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
            is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
@@ -889,8 +891,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
 }
 
 void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
-                            u8 *pdesc, bool b_firstseg,
-                            bool b_lastseg, struct sk_buff *skb)
+                            u8 *pdesc, bool firstseg,
+                            bool lastseg, struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -901,11 +903,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
                                            PCI_DMA_TODEVICE);
 
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
-       u16 fc = le16_to_cpu(hdr->frame_control);
+       __le16 fc = hdr->frame_control;
 
        CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
 
-       if (b_firstseg)
+       if (firstseg)
                SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
 
        SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
@@ -1029,3 +1031,36 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
                               BIT(0) << (hw_queue));
        }
 }
+
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+                            struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring;
+       struct rtl_tx_desc *pdesc;
+       u8 own;
+       unsigned long flags;
+       struct sk_buff *pskb = NULL;
+
+       ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+       spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+       pskb = __skb_dequeue(&ring->queue);
+       if (pskb)
+               kfree_skb(pskb);
+
+       pdesc = &ring->desc[0];
+       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+       rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+       __skb_queue_tail(&ring->queue, skb);
+
+       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+       rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+       return true;
+}
index 53d0e0a5af5c42cb639019d5d1da3760a4f98455..803adcc80c969539d9ce417ae13752b081998ec8 100644 (file)
 #define USB_HWDESC_HEADER_LEN                  32
 #define CRCLENGTH                              4
 
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask)            \
+       ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+       BIT_LEN_MASK_32(__mask))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val)     \
+       (*(__le32 *)(__pdesc) =                                 \
+       (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) &     \
+       (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) |                \
+       (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read/write various fields in RX or TX descriptors */
+
 #define SET_TX_DESC_PKT_SIZE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
 #define SET_TX_DESC_OFFSET(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
 #define SET_TX_DESC_BMC(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
 #define SET_TX_DESC_HTC(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
 #define SET_TX_DESC_LAST_SEG(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
 #define SET_TX_DESC_FIRST_SEG(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
 #define SET_TX_DESC_LINIP(__pdesc, __val)              \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
 #define SET_TX_DESC_NO_ACM(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
 #define SET_TX_DESC_GF(__pdesc, __val)                 \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
 #define SET_TX_DESC_OWN(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
 
 #define GET_TX_DESC_PKT_SIZE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+       SHIFT_AND_MASK_LE(__pdesc, 0, 16)
 #define GET_TX_DESC_OFFSET(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+       SHIFT_AND_MASK_LE(__pdesc, 16, 8)
 #define GET_TX_DESC_BMC(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 24, 1)
 #define GET_TX_DESC_HTC(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 25, 1)
 #define GET_TX_DESC_LAST_SEG(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 26, 1)
 #define GET_TX_DESC_FIRST_SEG(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 27, 1)
 #define GET_TX_DESC_LINIP(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 28, 1)
 #define GET_TX_DESC_NO_ACM(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 29, 1)
 #define GET_TX_DESC_GF(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 30, 1)
 #define GET_TX_DESC_OWN(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 31, 1)
 
 #define SET_TX_DESC_MACID(__pdesc, __val)              \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
 #define SET_TX_DESC_AGG_BREAK(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
 #define SET_TX_DESC_BK(__pdesc, __val)                 \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
 #define SET_TX_DESC_RDG_ENABLE(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
 #define SET_TX_DESC_QUEUE_SEL(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
 #define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val)        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
 #define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val)       \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
 #define SET_TX_DESC_PIFS(__pdesc, __val)               \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
 #define SET_TX_DESC_RATE_ID(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
 #define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val)        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
 #define SET_TX_DESC_EN_DESC_ID(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
 #define SET_TX_DESC_SEC_TYPE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
 #define SET_TX_DESC_PKT_OFFSET(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
 
 #define GET_TX_DESC_MACID(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
 #define GET_TX_DESC_AGG_ENABLE(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
 #define GET_TX_DESC_AGG_BREAK(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
 #define GET_TX_DESC_RDG_ENABLE(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
 #define GET_TX_DESC_QUEUE_SEL(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
 #define GET_TX_DESC_RDG_NAV_EXT(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
 #define GET_TX_DESC_LSIG_TXOP_EN(__pdesc)              \
-       LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
 #define GET_TX_DESC_PIFS(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
 #define GET_TX_DESC_RATE_ID(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
 #define GET_TX_DESC_NAV_USE_HDR(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
 #define GET_TX_DESC_EN_DESC_ID(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
 #define GET_TX_DESC_SEC_TYPE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
+       SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
 #define GET_TX_DESC_PKT_OFFSET(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
+       SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
 
 #define SET_TX_DESC_RTS_RC(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
 #define SET_TX_DESC_DATA_RC(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
 #define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
 #define SET_TX_DESC_MORE_FRAG(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
 #define SET_TX_DESC_RAW(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
 #define SET_TX_DESC_CCX(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
 #define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
 #define SET_TX_DESC_ANTSEL_A(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
 #define SET_TX_DESC_ANTSEL_B(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
 #define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
 #define SET_TX_DESC_TX_ANTL(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
 #define SET_TX_DESC_TX_ANT_HT(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
 
 #define GET_TX_DESC_RTS_RC(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
+       SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
 #define GET_TX_DESC_DATA_RC(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
+       SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
 #define GET_TX_DESC_BAR_RTY_TH(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
 #define GET_TX_DESC_MORE_FRAG(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
 #define GET_TX_DESC_RAW(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
 #define GET_TX_DESC_CCX(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
 #define GET_TX_DESC_AMPDU_DENSITY(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
+       SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
 #define GET_TX_DESC_ANTSEL_A(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
 #define GET_TX_DESC_ANTSEL_B(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
 #define GET_TX_DESC_TX_ANT_CCK(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
 #define GET_TX_DESC_TX_ANTL(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
 #define GET_TX_DESC_TX_ANT_HT(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
+       SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
 
 #define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
 #define SET_TX_DESC_TAIL_PAGE(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
 #define SET_TX_DESC_SEQ(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
 #define SET_TX_DESC_PKT_ID(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
 
 #define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
+       SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
 #define GET_TX_DESC_TAIL_PAGE(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
+       SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
 #define GET_TX_DESC_SEQ(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
+       SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
 #define GET_TX_DESC_PKT_ID(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+12, 28, 4)
+       SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
 
 #define SET_TX_DESC_RTS_RATE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
 #define SET_TX_DESC_AP_DCFE(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
 #define SET_TX_DESC_QOS(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
 #define SET_TX_DESC_HWSEQ_EN(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
 #define SET_TX_DESC_USE_RATE(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
 #define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
 #define SET_TX_DESC_DISABLE_FB(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
 #define SET_TX_DESC_CTS2SELF(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
 #define SET_TX_DESC_RTS_ENABLE(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
 #define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
 #define SET_TX_DESC_PORT_ID(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
 #define SET_TX_DESC_WAIT_DCTS(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
 #define SET_TX_DESC_CTS2AP_EN(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
 #define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
 #define SET_TX_DESC_TX_STBC(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
 #define SET_TX_DESC_DATA_SHORT(__pdesc, __val)         \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
 #define SET_TX_DESC_DATA_BW(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
 #define SET_TX_DESC_RTS_SHORT(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
 #define SET_TX_DESC_RTS_BW(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
 #define SET_TX_DESC_RTS_SC(__pdesc, __val)             \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
 #define SET_TX_DESC_RTS_STBC(__pdesc, __val)           \
-       SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
+       SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
 
 #define GET_TX_DESC_RTS_RATE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
 #define GET_TX_DESC_AP_DCFE(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
 #define GET_TX_DESC_QOS(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
 #define GET_TX_DESC_HWSEQ_EN(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
 #define GET_TX_DESC_USE_RATE(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
 #define GET_TX_DESC_DISABLE_RTS_FB(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
 #define GET_TX_DESC_DISABLE_FB(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
 #define GET_TX_DESC_CTS2SELF(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
 #define GET_TX_DESC_RTS_ENABLE(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
 #define GET_TX_DESC_HW_RTS_ENABLE(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
 #define GET_TX_DESC_PORT_ID(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
 #define GET_TX_DESC_WAIT_DCTS(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
 #define GET_TX_DESC_CTS2AP_EN(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
 #define GET_TX_DESC_TX_SUB_CARRIER(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
 #define GET_TX_DESC_TX_STBC(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
 #define GET_TX_DESC_DATA_SHORT(__pdesc)                        \
-       LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
 #define GET_TX_DESC_DATA_BW(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
 #define GET_TX_DESC_RTS_SHORT(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
 #define GET_TX_DESC_RTS_BW(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
 #define GET_TX_DESC_RTS_SC(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
 #define GET_TX_DESC_RTS_STBC(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
+       SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
 
 #define SET_TX_DESC_TX_RATE(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
 #define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)       \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
 #define SET_TX_DESC_CCX_TAG(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
 #define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
 #define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
 #define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
 #define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val)   \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
 #define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
+       SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
 
 #define GET_TX_DESC_TX_RATE(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
+       SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
 #define GET_TX_DESC_DATA_SHORTGI(__pdesc)              \
-       LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
 #define GET_TX_DESC_CCX_TAG(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
+       SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
 #define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc)                \
-       LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
+       SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
 #define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
+       SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
 #define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc)                \
-       LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
+       SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
 #define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc)          \
-       LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
+       SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
 #define GET_TX_DESC_USB_TXAGG_NUM(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
+       SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
 
 #define SET_TX_DESC_TXAGC_A(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
 #define SET_TX_DESC_TXAGC_B(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
 #define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val)                \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
 #define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val)                \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
 #define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
 #define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
 #define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
 #define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val)   \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
 
 #define GET_TX_DESC_TXAGC_A(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
 #define GET_TX_DESC_TXAGC_B(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
+       SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
 #define GET_TX_DESC_USE_MAX_LEN(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
+       SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
 #define GET_TX_DESC_MAX_AGG_NUM(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
+       SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
 #define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
 #define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
 #define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
 #define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc)          \
-       LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
+       SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
 
 #define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val)     \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
 #define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
 #define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
 #define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val)      \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
 #define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
 
 #define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc)            \
-       LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+       SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
 #define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+28, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
 #define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+28, 20, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
 #define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc+28, 24, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
 #define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+28, 28, 4)
+       SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
 
 #define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
 #define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
 
 #define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
 #define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc)       \
-       LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
 
 #define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val)  \
-       SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
 #define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
-       SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
 
 #define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc)         \
-       LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
 #define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc)       \
-       LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
 
 #define GET_RX_DESC_PKT_LEN(__pdesc)                   \
-       LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+       SHIFT_AND_MASK_LE(__pdesc, 0, 14)
 #define GET_RX_DESC_CRC32(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 14, 1)
 #define GET_RX_DESC_ICV(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 15, 1)
 #define GET_RX_DESC_DRV_INFO_SIZE(__pdesc)             \
-       LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc, 16, 4)
 #define GET_RX_DESC_SECURITY(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+       SHIFT_AND_MASK_LE(__pdesc, 20, 3)
 #define GET_RX_DESC_QOS(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 23, 1)
 #define GET_RX_DESC_SHIFT(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+       SHIFT_AND_MASK_LE(__pdesc, 24, 2)
 #define GET_RX_DESC_PHYST(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 26, 1)
 #define GET_RX_DESC_SWDEC(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 27, 1)
 #define GET_RX_DESC_LS(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 28, 1)
 #define GET_RX_DESC_FS(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 29, 1)
 #define GET_RX_DESC_EOR(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 30, 1)
 #define GET_RX_DESC_OWN(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc, 31, 1)
 
 #define SET_RX_DESC_PKT_LEN(__pdesc, __val)            \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
 #define SET_RX_DESC_EOR(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
 #define SET_RX_DESC_OWN(__pdesc, __val)                        \
-       SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+       SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
 
 #define GET_RX_DESC_MACID(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
 #define GET_RX_DESC_TID(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+4, 5, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
 #define GET_RX_DESC_HWRSVD(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+4, 9, 5)
+       SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
 #define GET_RX_DESC_PAGGR(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
 #define GET_RX_DESC_FAGGR(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
 #define GET_RX_DESC_A1_FIT(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
 #define GET_RX_DESC_A2_FIT(__pdesc)                    \
-       LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
+       SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
 #define GET_RX_DESC_PAM(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
 #define GET_RX_DESC_PWR(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
 #define GET_RX_DESC_MD(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
 #define GET_RX_DESC_MF(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
 #define GET_RX_DESC_TYPE(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+       SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
 #define GET_RX_DESC_MC(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
 #define GET_RX_DESC_BC(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
 #define GET_RX_DESC_SEQ(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+       SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
 #define GET_RX_DESC_FRAG(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+       SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
 #define GET_RX_DESC_NEXT_PKT_LEN(__pdesc)              \
-       LE_BITS_TO_4BYTE(__pdesc+8, 16, 14)
+       SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
 #define GET_RX_DESC_NEXT_IND(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+8, 30, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
 #define GET_RX_DESC_RSVD(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+8, 31, 1)
+       SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
 
 #define GET_RX_DESC_RXMCS(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
+       SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
 #define GET_RX_DESC_RXHT(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
 #define GET_RX_DESC_SPLCP(__pdesc)                     \
-       LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
 #define GET_RX_DESC_BW(__pdesc)                                \
-       LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
 #define GET_RX_DESC_HTC(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
 #define GET_RX_DESC_HWPC_ERR(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+12, 14, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
 #define GET_RX_DESC_HWPC_IND(__pdesc)                  \
-       LE_BITS_TO_4BYTE(__pdesc+12, 15, 1)
+       SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
 #define GET_RX_DESC_IV0(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+12, 16, 16)
+       SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
 
 #define GET_RX_DESC_IV1(__pdesc)                       \
-       LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
 #define GET_RX_DESC_TSFL(__pdesc)                      \
-       LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
 
 #define GET_RX_DESC_BUFF_ADDR(__pdesc)                 \
-       LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
 #define GET_RX_DESC_BUFF_ADDR64(__pdesc)               \
-       LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+       SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
 
 #define SET_RX_DESC_BUFF_ADDR(__pdesc, __val)          \
-       SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
 #define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val)        \
-       SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+       SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
 
 #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)      \
 do {                                                   \
@@ -711,4 +735,6 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
 void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
                             bool b_firstseg, bool b_lastseg,
                             struct sk_buff *skb);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
new file mode 100644 (file)
index 0000000..ad2de6b
--- /dev/null
@@ -0,0 +1,14 @@
+rtl8192cu-objs :=              \
+               dm.o            \
+               hw.o            \
+               led.o           \
+               mac.o           \
+               phy.o           \
+               rf.o            \
+               sw.o            \
+               table.o         \
+               trx.o
+
+obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
new file mode 100644 (file)
index 0000000..c54940e
--- /dev/null
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/def.h"
+
+/*-------------------------------------------------------------------------
+ *     Chip specific
+ *-------------------------------------------------------------------------*/
+#define CHIP_8723                      BIT(2) /* RTL8723 With BT feature */
+#define CHIP_8723_DRV_REV              BIT(3) /* RTL8723 Driver Revised */
+#define NORMAL_CHIP                    BIT(4)
+#define CHIP_VENDOR_UMC                        BIT(5)
+#define CHIP_VENDOR_UMC_B_CUT          BIT(6)
+
+#define IS_NORMAL_CHIP(version)                \
+       (((version) & NORMAL_CHIP) ? true : false)
+
+#define IS_8723_SERIES(version)                \
+       (((version) & CHIP_8723) ? true : false)
+
+#define IS_92C_1T2R(version)           \
+       (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+
+#define IS_VENDOR_UMC(version)         \
+       (((version) & CHIP_VENDOR_UMC) ? true : false)
+
+#define IS_VENDOR_UMC_A_CUT(version)   \
+       (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
+       false : true) : false)
+
+#define IS_VENDOR_8723_A_CUT(version)  \
+       (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
+       false : true) : false)
+
+#define CHIP_BONDING_92C_1T2R  0x1
+#define CHIP_BONDING_IDENTIFIER(_value)        (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
new file mode 100644 (file)
index 0000000..f311bae
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       long undecorated_smoothed_pwdb;
+
+       if (!rtlpriv->dm.dynamic_txpower_enable)
+               return;
+
+       if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+               return;
+       }
+
+       if ((mac->link_state < MAC80211_LINKED) &&
+           (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+                        ("Not connected to any\n"));
+
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+
+               rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+               return;
+       }
+
+       if (mac->link_state >= MAC80211_LINKED) {
+               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                       undecorated_smoothed_pwdb =
+                           rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                                ("AP Client PWDB = 0x%lx\n",
+                                 undecorated_smoothed_pwdb));
+               } else {
+                       undecorated_smoothed_pwdb =
+                           rtlpriv->dm.undecorated_smoothed_pwdb;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                                ("STA Default Port PWDB = 0x%lx\n",
+                                 undecorated_smoothed_pwdb));
+               }
+       } else {
+               undecorated_smoothed_pwdb =
+                   rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("AP Ext Port PWDB = 0x%lx\n",
+                         undecorated_smoothed_pwdb));
+       }
+
+       if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+       } else if ((undecorated_smoothed_pwdb <
+                   (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+                  (undecorated_smoothed_pwdb >=
+                   TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+       } else if (undecorated_smoothed_pwdb <
+                  (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+               rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("TXHIGHPWRLEVEL_NORMAL\n"));
+       }
+
+       if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
+                         rtlphy->current_channel));
+               rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+       }
+
+       rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
new file mode 100644 (file)
index 0000000..7f966c6
--- /dev/null
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
new file mode 100644 (file)
index 0000000..9444e76
--- /dev/null
@@ -0,0 +1,2504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../usb.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "hw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+
+static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+
+       rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH;
+       rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
+       if (IS_HIGHT_PA(rtlefuse->board_type)) {
+               rtlphy->hwparam_tables[PHY_REG_PG].length =
+                       RTL8192CUPHY_REG_Array_PG_HPLength;
+               rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+                       RTL8192CUPHY_REG_Array_PG_HP;
+       } else {
+               rtlphy->hwparam_tables[PHY_REG_PG].length =
+                       RTL8192CUPHY_REG_ARRAY_PGLENGTH;
+               rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+                       RTL8192CUPHY_REG_ARRAY_PG;
+       }
+       /* 2T */
+       rtlphy->hwparam_tables[PHY_REG_2T].length =
+                       RTL8192CUPHY_REG_2TARRAY_LENGTH;
+       rtlphy->hwparam_tables[PHY_REG_2T].pdata =
+                       RTL8192CUPHY_REG_2TARRAY;
+       rtlphy->hwparam_tables[RADIOA_2T].length =
+                       RTL8192CURADIOA_2TARRAYLENGTH;
+       rtlphy->hwparam_tables[RADIOA_2T].pdata =
+                       RTL8192CURADIOA_2TARRAY;
+       rtlphy->hwparam_tables[RADIOB_2T].length =
+                       RTL8192CURADIOB_2TARRAYLENGTH;
+       rtlphy->hwparam_tables[RADIOB_2T].pdata =
+                       RTL8192CU_RADIOB_2TARRAY;
+       rtlphy->hwparam_tables[AGCTAB_2T].length =
+                       RTL8192CUAGCTAB_2TARRAYLENGTH;
+       rtlphy->hwparam_tables[AGCTAB_2T].pdata =
+                       RTL8192CUAGCTAB_2TARRAY;
+       /* 1T */
+       if (IS_HIGHT_PA(rtlefuse->board_type)) {
+               rtlphy->hwparam_tables[PHY_REG_1T].length =
+                       RTL8192CUPHY_REG_1T_HPArrayLength;
+               rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+                       RTL8192CUPHY_REG_1T_HPArray;
+               rtlphy->hwparam_tables[RADIOA_1T].length =
+                       RTL8192CURadioA_1T_HPArrayLength;
+               rtlphy->hwparam_tables[RADIOA_1T].pdata =
+                       RTL8192CURadioA_1T_HPArray;
+               rtlphy->hwparam_tables[RADIOB_1T].length =
+                       RTL8192CURADIOB_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[RADIOB_1T].pdata =
+                       RTL8192CU_RADIOB_1TARRAY;
+               rtlphy->hwparam_tables[AGCTAB_1T].length =
+                       RTL8192CUAGCTAB_1T_HPArrayLength;
+               rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+                       Rtl8192CUAGCTAB_1T_HPArray;
+       } else {
+               rtlphy->hwparam_tables[PHY_REG_1T].length =
+                        RTL8192CUPHY_REG_1TARRAY_LENGTH;
+               rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+                       RTL8192CUPHY_REG_1TARRAY;
+               rtlphy->hwparam_tables[RADIOA_1T].length =
+                       RTL8192CURADIOA_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[RADIOA_1T].pdata =
+                       RTL8192CU_RADIOA_1TARRAY;
+               rtlphy->hwparam_tables[RADIOB_1T].length =
+                       RTL8192CURADIOB_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[RADIOB_1T].pdata =
+                       RTL8192CU_RADIOB_1TARRAY;
+               rtlphy->hwparam_tables[AGCTAB_1T].length =
+                       RTL8192CUAGCTAB_1TARRAYLENGTH;
+               rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+                       RTL8192CUAGCTAB_1TARRAY;
+       }
+}
+
+static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+                                                bool autoload_fail,
+                                                u8 *hwinfo)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 rf_path, index, tempval;
+       u16 i;
+
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 3; i++) {
+                       if (!autoload_fail) {
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_cck[rf_path][i] =
+                                   hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+                                   hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
+                                          i];
+                       } else {
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_cck[rf_path][i] =
+                                   EEPROM_DEFAULT_TXPOWERLEVEL;
+                               rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+                                   EEPROM_DEFAULT_TXPOWERLEVEL;
+                       }
+               }
+       }
+       for (i = 0; i < 3; i++) {
+               if (!autoload_fail)
+                       tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
+               else
+                       tempval = EEPROM_DEFAULT_HT40_2SDIFF;
+               rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+                   (tempval & 0xf);
+               rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+                   ((tempval & 0xf0) >> 4);
+       }
+       for (rf_path = 0; rf_path < 2; rf_path++)
+               for (i = 0; i < 3; i++)
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
+                                i, rtlefuse->
+                                eeprom_chnlarea_txpwr_cck[rf_path][i]));
+       for (rf_path = 0; rf_path < 2; rf_path++)
+               for (i = 0; i < 3; i++)
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->
+                                eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+       for (rf_path = 0; rf_path < 2; rf_path++)
+               for (i = 0; i < 3; i++)
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->
+                                eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+                                [i]));
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 14; i++) {
+                       index = _rtl92c_get_chnl_group((u8) i);
+                       rtlefuse->txpwrlevel_cck[rf_path][i] =
+                           rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
+                       rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+                           rtlefuse->
+                           eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
+                       if ((rtlefuse->
+                            eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+                            rtlefuse->
+                            eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+                           > 0) {
+                               rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+                                   rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_1s[rf_path]
+                                   [index] - rtlefuse->
+                                   eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+                                   [index];
+                       } else {
+                               rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
+                       }
+               }
+               for (i = 0; i < 14; i++) {
+                       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                               ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
+                                "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
+                                rtlefuse->txpwrlevel_cck[rf_path][i],
+                                rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+                                rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+               }
+       }
+       for (i = 0; i < 3; i++) {
+               if (!autoload_fail) {
+                       rtlefuse->eeprom_pwrlimit_ht40[i] =
+                           hwinfo[EEPROM_TXPWR_GROUP + i];
+                       rtlefuse->eeprom_pwrlimit_ht20[i] =
+                           hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
+               } else {
+                       rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
+                       rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
+               }
+       }
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 14; i++) {
+                       index = _rtl92c_get_chnl_group((u8) i);
+                       if (rf_path == RF90_PATH_A) {
+                               rtlefuse->pwrgroup_ht20[rf_path][i] =
+                                   (rtlefuse->eeprom_pwrlimit_ht20[index]
+                                    & 0xf);
+                               rtlefuse->pwrgroup_ht40[rf_path][i] =
+                                   (rtlefuse->eeprom_pwrlimit_ht40[index]
+                                    & 0xf);
+                       } else if (rf_path == RF90_PATH_B) {
+                               rtlefuse->pwrgroup_ht20[rf_path][i] =
+                                   ((rtlefuse->eeprom_pwrlimit_ht20[index]
+                                     & 0xf0) >> 4);
+                               rtlefuse->pwrgroup_ht40[rf_path][i] =
+                                   ((rtlefuse->eeprom_pwrlimit_ht40[index]
+                                     & 0xf0) >> 4);
+                       }
+                       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                               ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->pwrgroup_ht20[rf_path][i]));
+                       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                               ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+                                rf_path, i,
+                                rtlefuse->pwrgroup_ht40[rf_path][i]));
+               }
+       }
+       for (i = 0; i < 14; i++) {
+               index = _rtl92c_get_chnl_group((u8) i);
+               if (!autoload_fail)
+                       tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
+               else
+                       tempval = EEPROM_DEFAULT_HT20_DIFF;
+               rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
+               rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
+                   ((tempval >> 4) & 0xF);
+               if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
+                       rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
+               if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
+                       rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
+               index = _rtl92c_get_chnl_group((u8) i);
+               if (!autoload_fail)
+                       tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
+               else
+                       tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+               rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
+               rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
+                   ((tempval >> 4) & 0xF);
+       }
+       rtlefuse->legacy_ht_txpowerdiff =
+           rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+       for (i = 0; i < 14; i++)
+               RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+                       ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
+                        rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+       if (!autoload_fail)
+               rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
+       else
+               rtlefuse->eeprom_regulatory = 0;
+       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+               ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+       if (!autoload_fail) {
+               rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
+               rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
+       } else {
+               rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
+               rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
+       }
+       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+               ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+                rtlefuse->eeprom_tssi[RF90_PATH_A],
+                rtlefuse->eeprom_tssi[RF90_PATH_B]));
+       if (!autoload_fail)
+               tempval = hwinfo[EEPROM_THERMAL_METER];
+       else
+               tempval = EEPROM_DEFAULT_THERMALMETER;
+       rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
+       if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+           rtlefuse->eeprom_thermalmeter > 0x1c)
+               rtlefuse->eeprom_thermalmeter = 0x12;
+       if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
+               rtlefuse->apk_thermalmeterignore = true;
+       rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+       RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+               ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+}
+
+static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
+{
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 boardType;
+
+       if (IS_NORMAL_CHIP(rtlhal->version)) {
+               boardType = ((contents[EEPROM_RF_OPT1]) &
+                           BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
+       } else {
+               boardType = contents[EEPROM_RF_OPT4];
+               boardType &= BOARD_TYPE_TEST_MASK;
+       }
+       rtlefuse->board_type = boardType;
+       if (IS_HIGHT_PA(rtlefuse->board_type))
+               rtlefuse->external_pa = 1;
+       printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
+
+#ifdef CONFIG_ANTENNA_DIVERSITY
+       /* Antenna Diversity setting. */
+       if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
+               rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
+       else
+               rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
+
+       printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
+              rtl_efuse->antenna_cfg);
+#endif
+}
+
+#ifdef CONFIG_BT_COEXIST
+static void _update_bt_param(_adapter *padapter)
+{
+       struct btcoexist_priv    *pbtpriv = &(padapter->halpriv.bt_coexist);
+       struct registry_priv    *registry_par = &padapter->registrypriv;
+       if (2 != registry_par->bt_iso) {
+               /* 0:Low, 1:High, 2:From Efuse */
+               pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
+       }
+       if (registry_par->bt_sco == 1) {
+               /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
+                * 5.OtherBusy */
+               pbtpriv->BT_Service = BT_OtherAction;
+       } else if (registry_par->bt_sco == 2) {
+               pbtpriv->BT_Service = BT_SCO;
+       } else if (registry_par->bt_sco == 4) {
+               pbtpriv->BT_Service = BT_Busy;
+       } else if (registry_par->bt_sco == 5) {
+               pbtpriv->BT_Service = BT_OtherBusy;
+       } else {
+               pbtpriv->BT_Service = BT_Idle;
+       }
+       pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
+       pbtpriv->bCOBT = _TRUE;
+       pbtpriv->BtEdcaUL = 0;
+       pbtpriv->BtEdcaDL = 0;
+       pbtpriv->BtRssiState = 0xff;
+       pbtpriv->bInitSet = _FALSE;
+       pbtpriv->bBTBusyTraffic = _FALSE;
+       pbtpriv->bBTTrafficModeSet = _FALSE;
+       pbtpriv->bBTNonTrafficModeSet = _FALSE;
+       pbtpriv->CurrentState = 0;
+       pbtpriv->PreviousState = 0;
+       printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
+              (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
+       if (pbtpriv->BT_Coexist) {
+               if (pbtpriv->BT_Ant_Num == Ant_x2)
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "Ant_Num = Antx2\n");
+               else if (pbtpriv->BT_Ant_Num == Ant_x1)
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "Ant_Num = Antx1\n");
+               switch (pbtpriv->BT_CoexistType) {
+               case BT_2Wire:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_2Wire\n");
+                       break;
+               case BT_ISSC_3Wire:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_ISSC_3Wire\n");
+                       break;
+               case BT_Accel:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_Accel\n");
+                       break;
+               case BT_CSR_BC4:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_CSR_BC4\n");
+                       break;
+               case BT_CSR_BC8:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_CSR_BC8\n");
+                       break;
+               case BT_RTL8756:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = BT_RTL8756\n");
+                       break;
+               default:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+                              "CoexistType = Unknown\n");
+                       break;
+               }
+               printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
+                      pbtpriv->BT_Ant_isolation);
+               switch (pbtpriv->BT_Service) {
+               case BT_OtherAction:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_OtherAction\n");
+                       break;
+               case BT_SCO:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_SCO\n");
+                       break;
+               case BT_Busy:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_Busy\n");
+                       break;
+               case BT_OtherBusy:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_OtherBusy\n");
+                       break;
+               default:
+                       printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+                              "BT_Idle\n");
+                       break;
+               }
+               printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
+                      pbtpriv->BT_RadioSharedType);
+       }
+}
+
+#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
+
+static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
+                                               u8 *contents,
+                                               bool bautoloadfailed);
+{
+       HAL_DATA_TYPE   *pHalData = GET_HAL_DATA(Adapter);
+       bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
+       struct btcoexist_priv    *pbtpriv = &pHalData->bt_coexist;
+       u8      rf_opt4;
+
+       _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
+       if (AutoloadFail) {
+               pbtpriv->BT_Coexist = _FALSE;
+               pbtpriv->BT_CoexistType = BT_2Wire;
+               pbtpriv->BT_Ant_Num = Ant_x2;
+               pbtpriv->BT_Ant_isolation = 0;
+               pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
+               return;
+       }
+       if (isNormal) {
+               if (pHalData->BoardType == BOARD_USB_COMBO)
+                       pbtpriv->BT_Coexist = _TRUE;
+               else
+                       pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
+                                             0x20) >> 5); /* bit[5] */
+               rf_opt4 = PROMContent[EEPROM_RF_OPT4];
+               pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
+               pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
+               pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
+               pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
+       } else {
+               pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
+                                      _TRUE : _FALSE;
+       }
+       _update_bt_param(Adapter);
+}
+#endif
+
+static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u16 i, usvalue;
+       u8 hwinfo[HWSET_MAX_SIZE] = {0};
+       u16 eeprom_id;
+
+       if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+               rtl_efuse_shadow_map_update(hw);
+               memcpy((void *)hwinfo,
+                      (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+                      HWSET_MAX_SIZE);
+       } else if (rtlefuse->epromtype == EEPROM_93C46) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("RTL819X Not boot from eeprom, check it !!"));
+       }
+       RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
+                     hwinfo, HWSET_MAX_SIZE);
+       eeprom_id = *((u16 *)&hwinfo[0]);
+       if (eeprom_id != RTL8190_EEPROM_ID) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+               rtlefuse->autoload_failflag = true;
+       } else {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+               rtlefuse->autoload_failflag = false;
+       }
+       if (rtlefuse->autoload_failflag == true)
+               return;
+       for (i = 0; i < 6; i += 2) {
+               usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+               *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+       }
+       printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
+       _rtl92cu_read_txpower_info_from_hwpg(hw,
+                                          rtlefuse->autoload_failflag, hwinfo);
+       rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+       rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                (" VID = 0x%02x PID = 0x%02x\n",
+                rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
+       rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+       rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+       rtlefuse->txpwr_fromeprom = true;
+       rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+       if (rtlhal->oem_id == RT_CID_DEFAULT) {
+               switch (rtlefuse->eeprom_oemid) {
+               case EEPROM_CID_DEFAULT:
+                       if (rtlefuse->eeprom_did == 0x8176) {
+                               if ((rtlefuse->eeprom_svid == 0x103C &&
+                                    rtlefuse->eeprom_smid == 0x1629))
+                                       rtlhal->oem_id = RT_CID_819x_HP;
+                               else
+                                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       } else {
+                               rtlhal->oem_id = RT_CID_DEFAULT;
+                       }
+                       break;
+               case EEPROM_CID_TOSHIBA:
+                       rtlhal->oem_id = RT_CID_TOSHIBA;
+                       break;
+               case EEPROM_CID_QMI:
+                       rtlhal->oem_id = RT_CID_819x_QMI;
+                       break;
+               case EEPROM_CID_WHQL:
+               default:
+                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       break;
+               }
+       }
+       _rtl92cu_read_board_type(hw, hwinfo);
+#ifdef CONFIG_BT_COEXIST
+       _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
+                                           rtlefuse->autoload_failflag);
+#endif
+}
+
+static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       switch (rtlhal->oem_id) {
+       case RT_CID_819x_HP:
+               usb_priv->ledctl.led_opendrain = true;
+               break;
+       case RT_CID_819x_Lenovo:
+       case RT_CID_DEFAULT:
+       case RT_CID_TOSHIBA:
+       case RT_CID_CCX:
+       case RT_CID_819x_Acer:
+       case RT_CID_WHQL:
+       default:
+               break;
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}
+
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
+{
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 tmp_u1b;
+
+       if (!IS_NORMAL_CHIP(rtlhal->version))
+               return;
+       tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+       rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ?
+                              EEPROM_93C46 : EEPROM_BOOT_EFUSE;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
+                (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE"));
+       rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
+                (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
+       _rtl92cu_read_adapter_info(hw);
+       _rtl92cu_hal_customized_behavior(hw);
+       return;
+}
+
+static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int             status = 0;
+       u16             value16;
+       u8              value8;
+       /*  polling autoload done. */
+       u32     pollingCount = 0;
+
+       do {
+               if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                                ("Autoload Done!\n"));
+                       break;
+               }
+               if (pollingCount++ > 100) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                                ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
+                                " done!\n"));
+                       return -ENODEV;
+               }
+       } while (true);
+       /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+       /* Power on when re-enter from IPS/Radio off/card disable */
+       /* enable SPS into PWM mode */
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+       udelay(100);
+       value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+       if (0 == (value8 & LDV12_EN)) {
+               value8 |= LDV12_EN;
+               rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
+                        value8));
+               udelay(100);
+               value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
+               value8 &= ~ISO_MD2PP;
+               rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
+       }
+       /*  auto enable WLAN */
+       pollingCount = 0;
+       value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+       value16 |= APFM_ONMAC;
+       rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
+       do {
+               if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
+                       printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
+                       break;
+               }
+               if (pollingCount++ > 100) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                                ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
+                                " done!\n"));
+                       return -ENODEV;
+               }
+       } while (true);
+       /* Enable Radio ,GPIO ,and LED function */
+       rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812);
+       /* release RF digital isolation */
+       value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+       value16 &= ~ISO_DIOR;
+       rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
+       /* Reconsider when to do this operation after asking HWSD. */
+       pollingCount = 0;
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
+                                               REG_APSD_CTRL) & ~BIT(6)));
+       do {
+               pollingCount++;
+       } while ((pollingCount < 200) &&
+                (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
+       /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+       value16 = rtl_read_word(rtlpriv,  REG_CR);
+       value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+                   PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC);
+       rtl_write_word(rtlpriv, REG_CR, value16);
+       return status;
+}
+
+static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
+                                             bool wmm_enable,
+                                             u8 out_ep_num,
+                                             u8 queue_sel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
+       u32 outEPNum = (u32)out_ep_num;
+       u32 numHQ = 0;
+       u32 numLQ = 0;
+       u32 numNQ = 0;
+       u32 numPubQ;
+       u32 value32;
+       u8 value8;
+       u32 txQPageNum, txQPageUnit, txQRemainPage;
+
+       if (!wmm_enable) {
+               numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
+                         CHIP_A_PAGE_NUM_PUBQ;
+               txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
+
+               txQPageUnit = txQPageNum/outEPNum;
+               txQRemainPage = txQPageNum % outEPNum;
+               if (queue_sel & TX_SELE_HQ)
+                       numHQ = txQPageUnit;
+               if (queue_sel & TX_SELE_LQ)
+                       numLQ = txQPageUnit;
+               /* HIGH priority queue always present in the configuration of
+                * 2 out-ep. Remainder pages have assigned to High queue */
+               if ((outEPNum > 1) && (txQRemainPage))
+                       numHQ += txQRemainPage;
+               /* NOTE: This step done before writting REG_RQPN. */
+               if (isChipN) {
+                       if (queue_sel & TX_SELE_NQ)
+                               numNQ = txQPageUnit;
+                       value8 = (u8)_NPQ(numNQ);
+                       rtl_write_byte(rtlpriv,  REG_RQPN_NPQ, value8);
+               }
+       } else {
+               /* for WMM ,number of out-ep must more than or equal to 2! */
+               numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
+                         WMM_CHIP_A_PAGE_NUM_PUBQ;
+               if (queue_sel & TX_SELE_HQ) {
+                       numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
+                               WMM_CHIP_A_PAGE_NUM_HPQ;
+               }
+               if (queue_sel & TX_SELE_LQ) {
+                       numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
+                               WMM_CHIP_A_PAGE_NUM_LPQ;
+               }
+               /* NOTE: This step done before writting REG_RQPN. */
+               if (isChipN) {
+                       if (queue_sel & TX_SELE_NQ)
+                               numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
+                       value8 = (u8)_NPQ(numNQ);
+                       rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+               }
+       }
+       /* TX DMA */
+       value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+       rtl_write_dword(rtlpriv, REG_RQPN, value32);
+}
+
+static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8      txpktbuf_bndy;
+       u8      value8;
+
+       if (!wmm_enable)
+               txpktbuf_bndy = TX_PAGE_BOUNDARY;
+       else /* for WMM */
+               txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
+                                               ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+                                               : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy);
+       rtl_write_word(rtlpriv,  (REG_TRXFF_BNDY + 2), 0x27FF);
+       value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128);
+       rtl_write_byte(rtlpriv, REG_PBP, value8);
+}
+
+static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
+                                           u16 bkQ, u16 viQ, u16 voQ,
+                                           u16 mgtQ, u16 hiQ)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
+
+       value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+                  _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+                  _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+       rtl_write_word(rtlpriv,  REG_TRXDMA_CTRL, value16);
+}
+
+static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
+                                                   bool wmm_enable,
+                                                   u8 queue_sel)
+{
+       u16 uninitialized_var(value);
+
+       switch (queue_sel) {
+       case TX_SELE_HQ:
+               value = QUEUE_HIGH;
+               break;
+       case TX_SELE_LQ:
+               value = QUEUE_LOW;
+               break;
+       case TX_SELE_NQ:
+               value = QUEUE_NORMAL;
+               break;
+       default:
+               WARN_ON(1); /* Shall not reach here! */
+               break;
+       }
+       _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
+                                       value, value);
+       printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
+                                                               bool wmm_enable,
+                                                               u8 queue_sel)
+{
+       u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+       u16 uninitialized_var(valueHi);
+       u16 uninitialized_var(valueLow);
+
+       switch (queue_sel) {
+       case (TX_SELE_HQ | TX_SELE_LQ):
+               valueHi = QUEUE_HIGH;
+               valueLow = QUEUE_LOW;
+               break;
+       case (TX_SELE_NQ | TX_SELE_LQ):
+               valueHi = QUEUE_NORMAL;
+               valueLow = QUEUE_LOW;
+               break;
+       case (TX_SELE_HQ | TX_SELE_NQ):
+               valueHi = QUEUE_HIGH;
+               valueLow = QUEUE_NORMAL;
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+       if (!wmm_enable) {
+               beQ = valueLow;
+               bkQ = valueLow;
+               viQ = valueHi;
+               voQ = valueHi;
+               mgtQ = valueHi;
+               hiQ = valueHi;
+       } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
+               beQ = valueHi;
+               bkQ = valueLow;
+               viQ = valueLow;
+               voQ = valueHi;
+               mgtQ = valueHi;
+               hiQ = valueHi;
+       }
+       _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+       printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
+                                                     bool wmm_enable,
+                                                     u8 queue_sel)
+{
+       u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (!wmm_enable) { /* typical setting */
+               beQ     = QUEUE_LOW;
+               bkQ     = QUEUE_LOW;
+               viQ     = QUEUE_NORMAL;
+               voQ     = QUEUE_HIGH;
+               mgtQ    = QUEUE_HIGH;
+               hiQ     = QUEUE_HIGH;
+       } else { /* for WMM */
+               beQ     = QUEUE_LOW;
+               bkQ     = QUEUE_NORMAL;
+               viQ     = QUEUE_NORMAL;
+               voQ     = QUEUE_HIGH;
+               mgtQ    = QUEUE_HIGH;
+               hiQ     = QUEUE_HIGH;
+       }
+       _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                ("Tx queue select :0x%02x..\n", queue_sel));
+}
+
+static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
+                                              bool wmm_enable,
+                                              u8 out_ep_num,
+                                              u8 queue_sel)
+{
+       switch (out_ep_num) {
+       case 1:
+               _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
+                                                       queue_sel);
+               break;
+       case 2:
+               _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
+                                                       queue_sel);
+               break;
+       case 3:
+               _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
+                                                         queue_sel);
+               break;
+       default:
+               WARN_ON(1); /* Shall not reach here! */
+               break;
+       }
+}
+
+static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
+                                              bool wmm_enable,
+                                              u8 out_ep_num,
+                                              u8 queue_sel)
+{
+       u8      hq_sele;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       switch (out_ep_num) {
+       case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
+               if (!wmm_enable) /* typical setting */
+                       hq_sele =  HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
+                                  HQSEL_HIQ;
+               else    /* for WMM */
+                       hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
+                                 HQSEL_HIQ;
+               break;
+       case 1:
+               if (TX_SELE_LQ == queue_sel) {
+                       /* map all endpoint to Low queue */
+                       hq_sele = 0;
+               } else if (TX_SELE_HQ == queue_sel) {
+                       /* map all endpoint to High queue */
+                       hq_sele =  HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ |
+                                  HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ;
+               }
+               break;
+       default:
+               WARN_ON(1); /* Shall not reach here! */
+               break;
+       }
+       rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                ("Tx queue select :0x%02x..\n", hq_sele));
+}
+
+static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
+                                               bool wmm_enable,
+                                               u8 out_ep_num,
+                                               u8 queue_sel)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
+                                                  queue_sel);
+       else
+               _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
+                                                  queue_sel);
+}
+
+static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
+{
+}
+
+static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
+{
+       u16                     value16;
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS |
+                     RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+                     RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+       rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+       /* Accept all multicast address */
+       rtl_write_dword(rtlpriv,  REG_MAR, 0xFFFFFFFF);
+       rtl_write_dword(rtlpriv,  REG_MAR + 4, 0xFFFFFFFF);
+       /* Accept all management frames */
+       value16 = 0xFFFF;
+       rtl92c_set_mgt_filter(hw, value16);
+       /* Reject all control frame - default value is 0 */
+       rtl92c_set_ctrl_filter(hw, 0x0);
+       /* Accept all data frames */
+       value16 = 0xFFFF;
+       rtl92c_set_data_filter(hw, value16);
+}
+
+static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+       int err = 0;
+       u32     boundary = 0;
+       u8 wmm_enable = false; /* TODO */
+       u8 out_ep_nums = rtlusb->out_ep_nums;
+       u8 queue_sel = rtlusb->out_queue_sel;
+       err = _rtl92cu_init_power_on(hw);
+
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                       ("Failed to init power on!\n"));
+               return err;
+       }
+       if (!wmm_enable) {
+               boundary = TX_PAGE_BOUNDARY;
+       } else { /* for WMM */
+               boundary = (IS_NORMAL_CHIP(rtlhal->version))
+                                       ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+                                       : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+       }
+       if (false == rtl92c_init_llt_table(hw, boundary)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                       ("Failed to init LLT Table!\n"));
+               return -EINVAL;
+       }
+       _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
+                                         queue_sel);
+       _rtl92c_init_trx_buffer(hw, wmm_enable);
+       _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
+                                    queue_sel);
+       /* Get Rx PHY status in order to report RSSI and others. */
+       rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
+       rtl92c_init_interrupt(hw);
+       rtl92c_init_network_type(hw);
+       _rtl92cu_init_wmac_setting(hw);
+       rtl92c_init_adaptive_ctrl(hw);
+       rtl92c_init_edca(hw);
+       rtl92c_init_rate_fallback(hw);
+       rtl92c_init_retry_function(hw);
+       _rtl92cu_init_usb_aggregation(hw);
+       rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
+       rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
+       rtl92c_init_beacon_parameters(hw, rtlhal->version);
+       rtl92c_init_ampdu_aggregation(hw);
+       rtl92c_init_beacon_max_error(hw, true);
+       return err;
+}
+
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 sec_reg_value = 0x0;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+                 rtlpriv->sec.pairwise_enc_algorithm,
+                 rtlpriv->sec.group_enc_algorithm));
+       if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                        ("not open sw encryption\n"));
+               return;
+       }
+       sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+       if (rtlpriv->sec.use_defaultkey) {
+               sec_reg_value |= SCR_TxUseDK;
+               sec_reg_value |= SCR_RxUseDK;
+       }
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+       rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+       RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+                ("The SECR-value %x\n", sec_reg_value));
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       /* To Fix MAC loopback mode fail. */
+       rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+       rtl_write_byte(rtlpriv, 0x15, 0xe9);
+       /* HW SEQ CTRL */
+       /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+       rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+       /* fixed USB interface interference issue */
+       rtl_write_byte(rtlpriv, 0xfe40, 0xe0);
+       rtl_write_byte(rtlpriv, 0xfe41, 0x8d);
+       rtl_write_byte(rtlpriv, 0xfe42, 0x80);
+       rtlusb->reg_bcn_ctrl_val = 0x18;
+       rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _InitPABias(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 pa_setting;
+
+       /* FIXED PA current issue */
+       pa_setting = efuse_read_1byte(hw, 0x1FA);
+       if (!(pa_setting & BIT(0))) {
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406);
+       }
+       if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) &&
+           IS_92C_SERIAL(rtlhal->version)) {
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406);
+       }
+       if (!(pa_setting & BIT(4))) {
+               pa_setting = rtl_read_byte(rtlpriv, 0x16);
+               pa_setting &= 0x0F;
+               rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90);
+       }
+}
+
+static void _InitAntenna_Selection(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_ANTENNA_DIVERSITY
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (pHalData->AntDivCfg == 0)
+               return;
+
+       if (rtlphy->rf_type == RF_1T1R) {
+               rtl_write_dword(rtlpriv, REG_LEDCFG0,
+                               rtl_read_dword(rtlpriv,
+                               REG_LEDCFG0)|BIT(23));
+               rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+               if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
+                   Antenna_A)
+                       pHalData->CurAntenna = Antenna_A;
+               else
+                       pHalData->CurAntenna = Antenna_B;
+       }
+#endif
+}
+
+static void _dump_registers(struct ieee80211_hw *hw)
+{
+}
+
+static void _update_mac_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
+       mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+       mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+       mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+}
+
+int rtl92cu_hw_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       int err = 0;
+       static bool iqk_initialized;
+
+       rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+       err = _rtl92cu_init_mac(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+               return err;
+       }
+       err = rtl92c_download_fw(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        ("Failed to download FW. Init HW without FW now..\n"));
+               err = 1;
+               rtlhal->fw_ready = false;
+               return err;
+       } else {
+               rtlhal->fw_ready = true;
+       }
+       rtlhal->last_hmeboxnum = 0; /* h2c */
+       _rtl92cu_phy_param_tab_init(hw);
+       rtl92cu_phy_mac_config(hw);
+       rtl92cu_phy_bb_config(hw);
+       rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+       rtl92c_phy_rf_config(hw);
+       if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+           !IS_92C_SERIAL(rtlhal->version)) {
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+       }
+       rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+                                                RF_CHNLBW, RFREG_OFFSET_MASK);
+       rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+                                                RF_CHNLBW, RFREG_OFFSET_MASK);
+       rtl92cu_bb_block_on(hw);
+       rtl_cam_reset_all_entry(hw);
+       rtl92cu_enable_hw_security_config(hw);
+       ppsc->rfpwr_state = ERFON;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+       if (ppsc->rfpwr_state == ERFON) {
+               rtl92c_phy_set_rfpath_switch(hw, 1);
+               if (iqk_initialized) {
+                       rtl92c_phy_iq_calibrate(hw, false);
+               } else {
+                       rtl92c_phy_iq_calibrate(hw, false);
+                       iqk_initialized = true;
+               }
+               rtl92c_dm_check_txpower_tracking(hw);
+               rtl92c_phy_lc_calibrate(hw);
+       }
+       _rtl92cu_hw_configure(hw);
+       _InitPABias(hw);
+       _InitAntenna_Selection(hw);
+       _update_mac_setting(hw);
+       rtl92c_dm_init(hw);
+       _dump_registers(hw);
+       return err;
+}
+
+static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+/**************************************
+a.     TXPAUSE 0x522[7:0] = 0xFF       Pause MAC TX queue
+b.     RF path 0 offset 0x00 = 0x00    disable RF
+c.     APSD_CTRL 0x600[7:0] = 0x40
+d.     SYS_FUNC_EN 0x02[7:0] = 0x16    reset BB state machine
+e.     SYS_FUNC_EN 0x02[7:0] = 0x14    reset BB state machine
+***************************************/
+       u8 eRFPath = 0, value8 = 0;
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
+
+       value8 |= APSDOFF;
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
+       value8 = 0;
+       value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
+       value8 &= (~FEN_BB_GLB_RSTn);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
+}
+
+static void  _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlhal->fw_version <=  0x20) {
+               /*****************************
+               f. MCUFWDL 0x80[7:0]=0          reset MCU ready status
+               g. SYS_FUNC_EN 0x02[10]= 0      reset MCU reg, (8051 reset)
+               h. SYS_FUNC_EN 0x02[15-12]= 5   reset MAC reg, DCORE
+               i. SYS_FUNC_EN 0x02[10]= 1      enable MCU reg, (8051 enable)
+               ******************************/
+               u16 valu16 = 0;
+
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+               valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+               rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 &
+                              (~FEN_CPUEN))); /* reset MCU ,8051 */
+               valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF;
+               rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+                             (FEN_HWPDN|FEN_ELDR))); /* reset MAC */
+               valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+               rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+                              FEN_CPUEN)); /* enable MCU ,8051 */
+       } else {
+               u8 retry_cnts = 0;
+
+               /* IF fw in RAM code, do reset */
+               if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
+                       /* reset MCU ready status */
+                       rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+                       if (rtlhal->fw_ready) {
+                               /* 8051 reset by self */
+                               rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
+                               while ((retry_cnts++ < 100) &&
+                                      (FEN_CPUEN & rtl_read_word(rtlpriv,
+                                      REG_SYS_FUNC_EN))) {
+                                       udelay(50);
+                               }
+                               if (retry_cnts >= 100) {
+                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                               ("#####=> 8051 reset failed!.."
+                                               ".......................\n"););
+                                       /* if 8051 reset fail, reset MAC. */
+                                       rtl_write_byte(rtlpriv,
+                                                      REG_SYS_FUNC_EN + 1,
+                                                      0x50);
+                                       udelay(100);
+                               }
+                       }
+               }
+               /* Reset MAC and Enable 8051 */
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+       }
+       if (bWithoutHWSM) {
+               /*****************************
+                 Without HW auto state machine
+               g.SYS_CLKR 0x08[15:0] = 0x30A3          disable MAC clock
+               h.AFE_PLL_CTRL 0x28[7:0] = 0x80         disable AFE PLL
+               i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F     gated AFE DIG_CLOCK
+               j.SYS_ISu_CTRL 0x00[7:0] = 0xF9         isolated digital to PON
+               ******************************/
+               rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+               rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
+               rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F);
+               rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9);
+       }
+}
+
+static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*****************************
+k. SYS_FUNC_EN 0x03[7:0] = 0x44                disable ELDR runction
+l. SYS_CLKR 0x08[15:0] = 0x3083                disable ELDR clock
+m. SYS_ISO_CTRL 0x01[7:0] = 0x83       isolated ELDR to PON
+******************************/
+       rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+       rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
+}
+
+static void _DisableGPIO(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+/***************************************
+j. GPIO_PIN_CTRL 0x44[31:0]=0x000
+k. Value = GPIO_PIN_CTRL[7:0]
+l.  GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level
+m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
+n. LEDCFG 0x4C[15:0] = 0x8080
+***************************************/
+       u8      value8;
+       u16     value16;
+       u32     value32;
+
+       /* 1. Disable GPIO[7:0] */
+       rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000);
+       value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
+       value8 = (u8) (value32&0x000000FF);
+       value32 |= ((value8<<8) | 0x00FF0000);
+       rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32);
+       /* 2. Disable GPIO[10:8] */
+       rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00);
+       value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F;
+       value8 = (u8) (value16&0x000F);
+       value16 |= ((value8<<4) | 0x0780);
+       rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16);
+       /* 3. Disable LED0 & 1 */
+       rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
+}
+
+static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u16 value16 = 0;
+       u8 value8 = 0;
+
+       if (bWithoutHWSM) {
+               /*****************************
+               n. LDOA15_CTRL 0x20[7:0] = 0x04  disable A15 power
+               o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
+               r. When driver call disable, the ASIC will turn off remaining
+                  clock automatically
+               ******************************/
+               rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+               value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+               value8 &= (~LDV12_EN);
+               rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+       }
+
+/*****************************
+h. SPS0_CTRL 0x11[7:0] = 0x23          enter PFM mode
+i. APS_FSMCO 0x04[15:0] = 0x4802       set USB suspend
+******************************/
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+       value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+       rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16);
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
+}
+
+static void _CardDisableHWSM(struct ieee80211_hw *hw)
+{
+       /* ==== RF Off Sequence ==== */
+       _DisableRFAFEAndResetBB(hw);
+       /* ==== Reset digital sequence   ====== */
+       _ResetDigitalProcedure1(hw, false);
+       /*  ==== Pull GPIO PIN to balance level and LED control ====== */
+       _DisableGPIO(hw);
+       /* ==== Disable analog sequence === */
+       _DisableAnalog(hw, false);
+}
+
+static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
+{
+       /*==== RF Off Sequence ==== */
+       _DisableRFAFEAndResetBB(hw);
+       /*  ==== Reset digital sequence   ====== */
+       _ResetDigitalProcedure1(hw, true);
+       /*  ==== Pull GPIO PIN to balance level and LED control ====== */
+       _DisableGPIO(hw);
+       /*  ==== Reset digital sequence   ====== */
+       _ResetDigitalProcedure2(hw);
+       /*  ==== Disable analog sequence === */
+       _DisableAnalog(hw, true);
+}
+
+static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+                                     u8 set_bits, u8 clear_bits)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtlusb->reg_bcn_ctrl_val |= set_bits;
+       rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
+       rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 tmp1byte = 0;
+       if (IS_NORMAL_CHIP(rtlhal->version)) {
+               tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                              tmp1byte & (~BIT(6)));
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+               tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+               tmp1byte &= ~(BIT(0));
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE,
+                              rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6));
+       }
+}
+
+static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 tmp1byte = 0;
+
+       if (IS_NORMAL_CHIP(rtlhal->version)) {
+               tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                              tmp1byte | BIT(6));
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+               tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+               tmp1byte |= BIT(0);
+               rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE,
+                              rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6)));
+       }
+}
+
+static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1));
+       else
+               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+}
+
+static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0);
+       else
+               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+}
+
+static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
+                                    enum nl80211_iftype type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+       enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+       bt_msr &= 0xfc;
+       rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+       if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
+           NL80211_IFTYPE_STATION) {
+               _rtl92cu_stop_tx_beacon(hw);
+               _rtl92cu_enable_bcn_sub_func(hw);
+       } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+               _rtl92cu_resume_tx_beacon(hw);
+               _rtl92cu_disable_bcn_sub_func(hw);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
+                        "STATUS:No such media status(%x).\n", type));
+       }
+       switch (type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               bt_msr |= MSR_NOLINK;
+               ledaction = LED_CTL_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to NO LINK!\n"));
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               bt_msr |= MSR_ADHOC;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to Ad Hoc!\n"));
+               break;
+       case NL80211_IFTYPE_STATION:
+               bt_msr |= MSR_INFRA;
+               ledaction = LED_CTL_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to STA!\n"));
+               break;
+       case NL80211_IFTYPE_AP:
+               bt_msr |= MSR_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Set Network type to AP!\n"));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Network type %d not support!\n", type));
+               goto error_out;
+       }
+       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtlpriv->cfg->ops->led_control(hw, ledaction);
+       if ((bt_msr & 0xfc) == MSR_AP)
+               rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+       else
+               rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+       return 0;
+error_out:
+       return 1;
+}
+
+void rtl92cu_card_disable(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       enum nl80211_iftype opmode;
+
+       mac->link_state = MAC80211_NOLINK;
+       opmode = NL80211_IFTYPE_UNSPECIFIED;
+       _rtl92cu_set_media_status(hw, opmode);
+       rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+       if (rtlusb->disableHWSM)
+               _CardDisableHWSM(hw);
+       else
+               _CardDisableWithoutHWSM(hw);
+}
+
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+       /* dummy routine needed for callback from rtl_op_configure_filter() */
+}
+
+/*========================================================================== */
+
+static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
+                             enum nl80211_iftype type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 filterout_non_associated_bssid = false;
+
+       switch (type) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_STATION:
+               filterout_non_associated_bssid = true;
+               break;
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_AP:
+       default:
+               break;
+       }
+       if (filterout_non_associated_bssid == true) {
+               if (IS_NORMAL_CHIP(rtlhal->version)) {
+                       switch (rtlphy->current_io_type) {
+                       case IO_CMD_RESUME_DM_BY_SCAN:
+                               reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
+                               /* enable update TSF */
+                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+                               break;
+                       case IO_CMD_PAUSE_DM_BY_SCAN:
+                               reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
+                               /* disable update TSF */
+                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+                               break;
+                       }
+               } else {
+                       reg_rcr |= (RCR_CBSSID);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                                     (u8 *)(&reg_rcr));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
+               }
+       } else if (filterout_non_associated_bssid == false) {
+               if (IS_NORMAL_CHIP(rtlhal->version)) {
+                       reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                                     (u8 *)(&reg_rcr));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+               } else {
+                       reg_rcr &= (~RCR_CBSSID);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                                     (u8 *)(&reg_rcr));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
+               }
+       }
+}
+
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+       if (_rtl92cu_set_media_status(hw, type))
+               return -EOPNOTSUPP;
+       _rtl92cu_set_check_bssid(hw, type);
+       return 0;
+}
+
+static void _InitBeaconParameters(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+       /* TODO: Remove these magic number */
+       rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
+       rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
+       rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+       /* Change beacon AIFS to the largest number
+        * beacause test chip does not contension before sending beacon. */
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+       else
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
+                                   bool Linked)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00);
+       rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F);
+}
+
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 bcn_interval, atim_window;
+       u32 value32;
+
+       bcn_interval = mac->beacon_interval;
+       atim_window = 2;        /*FIX MERGE */
+       rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+       rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+       _InitBeaconParameters(hw);
+       rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+       /*
+        * Force beacon frame transmission even after receiving beacon frame
+        * from other ad hoc STA
+        *
+        *
+        * Reset TSF Timer to zero, added by Roger. 2008.06.24
+        */
+       value32 = rtl_read_dword(rtlpriv, REG_TCR);
+       value32 &= ~TSFRST;
+       rtl_write_dword(rtlpriv, REG_TCR, value32);
+       value32 |= TSFRST;
+       rtl_write_dword(rtlpriv, REG_TCR, value32);
+       RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
+                ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+                value32));
+       /* TODO: Modify later (Find the right parameters)
+        * NOTE: Fix test chip's bug (about contention windows's randomness) */
+       if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
+           (mac->opmode == NL80211_IFTYPE_AP)) {
+               rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
+               rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
+       }
+       _beacon_function_enable(hw, true, true);
+}
+
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 bcn_interval = mac->beacon_interval;
+
+       RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+                ("beacon_interval:%d\n", bcn_interval));
+       rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+}
+
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+                                  u32 add_msr, u32 rm_msr)
+{
+}
+
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       switch (variable) {
+       case HW_VAR_RCR:
+               *((u32 *)(val)) = mac->rx_conf;
+               break;
+       case HW_VAR_RF_STATE:
+               *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+               break;
+       case HW_VAR_FWLPS_RF_ON:{
+                       enum rf_pwrstate rfState;
+                       u32 val_rcr;
+
+                       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+                                                     (u8 *)(&rfState));
+                       if (rfState == ERFOFF) {
+                               *((bool *) (val)) = true;
+                       } else {
+                               val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+                               val_rcr &= 0x00070000;
+                               if (val_rcr)
+                                       *((bool *) (val)) = false;
+                               else
+                                       *((bool *) (val)) = true;
+                       }
+                       break;
+               }
+       case HW_VAR_FW_PSMODE_STATUS:
+               *((bool *) (val)) = ppsc->fw_current_inpsmode;
+               break;
+       case HW_VAR_CORRECT_TSF:{
+                       u64 tsf;
+                       u32 *ptsf_low = (u32 *)&tsf;
+                       u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+                       *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+                       *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+                       *((u64 *)(val)) = tsf;
+                       break;
+               }
+       case HW_VAR_MGT_FILTER:
+               *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+               break;
+       case HW_VAR_CTRL_FILTER:
+               *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+               break;
+       case HW_VAR_DATA_FILTER:
+               *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+}
+
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       enum wireless_mode wirelessmode = mac->mode;
+       u8 idx = 0;
+
+       switch (variable) {
+       case HW_VAR_ETHER_ADDR:{
+                       for (idx = 0; idx < ETH_ALEN; idx++) {
+                               rtl_write_byte(rtlpriv, (REG_MACID + idx),
+                                              val[idx]);
+                       }
+                       break;
+               }
+       case HW_VAR_BASIC_RATE:{
+                       u16 rate_cfg = ((u16 *) val)[0];
+                       u8 rate_index = 0;
+
+                       rate_cfg &= 0x15f;
+                       /* TODO */
+                       /* if (mac->current_network.vender == HT_IOT_PEER_CISCO
+                        *     && ((rate_cfg & 0x150) == 0)) {
+                        *        rate_cfg |= 0x010;
+                        * } */
+                       rate_cfg |= 0x01;
+                       rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+                       rtl_write_byte(rtlpriv, REG_RRSR + 1,
+                                      (rate_cfg >> 8) & 0xff);
+                       while (rate_cfg > 0x1) {
+                               rate_cfg >>= 1;
+                               rate_index++;
+                       }
+                       rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+                                      rate_index);
+                       break;
+               }
+       case HW_VAR_BSSID:{
+                       for (idx = 0; idx < ETH_ALEN; idx++) {
+                               rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+                                              val[idx]);
+                       }
+                       break;
+               }
+       case HW_VAR_SIFS:{
+                       rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]);
+                       rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
+                       rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("HW_VAR_SIFS\n"));
+                       break;
+               }
+       case HW_VAR_SLOT_TIME:{
+                       u8 e_aci;
+                       u8 QOS_MODE = 1;
+
+                       rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("HW_VAR_SLOT_TIME %x\n", val[0]));
+                       if (QOS_MODE) {
+                               for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+                                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                               HW_VAR_AC_PARAM,
+                                                               (u8 *)(&e_aci));
+                       } else {
+                               u8 sifstime = 0;
+                               u8      u1bAIFS;
+
+                               if (IS_WIRELESS_MODE_A(wirelessmode) ||
+                                   IS_WIRELESS_MODE_N_24G(wirelessmode) ||
+                                   IS_WIRELESS_MODE_N_5G(wirelessmode))
+                                       sifstime = 16;
+                               else
+                                       sifstime = 10;
+                               u1bAIFS = sifstime + (2 *  val[0]);
+                               rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
+                                              u1bAIFS);
+                               rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
+                                              u1bAIFS);
+                               rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
+                                              u1bAIFS);
+                               rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
+                                              u1bAIFS);
+                       }
+                       break;
+               }
+       case HW_VAR_ACK_PREAMBLE:{
+                       u8 reg_tmp;
+                       u8 short_preamble = (bool) (*(u8 *) val);
+                       reg_tmp = 0;
+                       if (short_preamble)
+                               reg_tmp |= 0x80;
+                       rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+                       break;
+               }
+       case HW_VAR_AMPDU_MIN_SPACE:{
+                       u8 min_spacing_to_set;
+                       u8 sec_min_space;
+
+                       min_spacing_to_set = *((u8 *) val);
+                       if (min_spacing_to_set <= 7) {
+                               switch (rtlpriv->sec.pairwise_enc_algorithm) {
+                               case NO_ENCRYPTION:
+                               case AESCCMP_ENCRYPTION:
+                                       sec_min_space = 0;
+                                       break;
+                               case WEP40_ENCRYPTION:
+                               case WEP104_ENCRYPTION:
+                               case TKIP_ENCRYPTION:
+                                       sec_min_space = 6;
+                                       break;
+                               default:
+                                       sec_min_space = 7;
+                                       break;
+                               }
+                               if (min_spacing_to_set < sec_min_space)
+                                       min_spacing_to_set = sec_min_space;
+                               mac->min_space_cfg = ((mac->min_space_cfg &
+                                                    0xf8) |
+                                                    min_spacing_to_set);
+                               *val = min_spacing_to_set;
+                               RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                       ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+                                       mac->min_space_cfg));
+                               rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+                                              mac->min_space_cfg);
+                       }
+                       break;
+               }
+       case HW_VAR_SHORTGI_DENSITY:{
+                       u8 density_to_set;
+
+                       density_to_set = *((u8 *) val);
+                       density_to_set &= 0x1f;
+                       mac->min_space_cfg &= 0x07;
+                       mac->min_space_cfg |= (density_to_set << 3);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+                                 mac->min_space_cfg));
+                       rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+                                      mac->min_space_cfg);
+                       break;
+               }
+       case HW_VAR_AMPDU_FACTOR:{
+                       u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+                       u8 factor_toset;
+                       u8 *p_regtoset = NULL;
+                       u8 index = 0;
+
+                       p_regtoset = regtoset_normal;
+                       factor_toset = *((u8 *) val);
+                       if (factor_toset <= 3) {
+                               factor_toset = (1 << (factor_toset + 2));
+                               if (factor_toset > 0xf)
+                                       factor_toset = 0xf;
+                               for (index = 0; index < 4; index++) {
+                                       if ((p_regtoset[index] & 0xf0) >
+                                           (factor_toset << 4))
+                                               p_regtoset[index] =
+                                                    (p_regtoset[index] & 0x0f)
+                                                    | (factor_toset << 4);
+                                       if ((p_regtoset[index] & 0x0f) >
+                                            factor_toset)
+                                               p_regtoset[index] =
+                                                    (p_regtoset[index] & 0xf0)
+                                                    | (factor_toset);
+                                       rtl_write_byte(rtlpriv,
+                                                      (REG_AGGLEN_LMT + index),
+                                                      p_regtoset[index]);
+                               }
+                               RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                        ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
+                                         factor_toset));
+                       }
+                       break;
+               }
+       case HW_VAR_AC_PARAM:{
+                       u8 e_aci = *((u8 *) val);
+                       u32 u4b_ac_param;
+                       u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+                       u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+                       u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
+
+                       u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+                       u4b_ac_param |= (u32) ((cw_min & 0xF) <<
+                                        AC_PARAM_ECW_MIN_OFFSET);
+                       u4b_ac_param |= (u32) ((cw_max & 0xF) <<
+                                        AC_PARAM_ECW_MAX_OFFSET);
+                       u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                ("queue:%x, ac_param:%x\n", e_aci,
+                                 u4b_ac_param));
+                       switch (e_aci) {
+                       case AC1_BK:
+                               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       case AC0_BE:
+                               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       case AC2_VI:
+                               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       case AC3_VO:
+                               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
+                                               u4b_ac_param);
+                               break;
+                       default:
+                               RT_ASSERT(false, ("SetHwReg8185(): invalid"
+                                         " aci: %d !\n", e_aci));
+                               break;
+                       }
+                       if (rtlusb->acm_method != eAcmWay2_SW)
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                        HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+                       break;
+               }
+       case HW_VAR_ACM_CTRL:{
+                       u8 e_aci = *((u8 *) val);
+                       union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
+                                                       (&(mac->ac[0].aifs));
+                       u8 acm = p_aci_aifsn->f.acm;
+                       u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+                       acm_ctrl =
+                           acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
+                       if (acm) {
+                               switch (e_aci) {
+                               case AC0_BE:
+                                       acm_ctrl |= AcmHw_BeqEn;
+                                       break;
+                               case AC2_VI:
+                                       acm_ctrl |= AcmHw_ViqEn;
+                                       break;
+                               case AC3_VO:
+                                       acm_ctrl |= AcmHw_VoqEn;
+                                       break;
+                               default:
+                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                                ("HW_VAR_ACM_CTRL acm set "
+                                                 "failed: eACI is %d\n", acm));
+                                       break;
+                               }
+                       } else {
+                               switch (e_aci) {
+                               case AC0_BE:
+                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       break;
+                               case AC2_VI:
+                                       acm_ctrl &= (~AcmHw_ViqEn);
+                                       break;
+                               case AC3_VO:
+                                       acm_ctrl &= (~AcmHw_BeqEn);
+                                       break;
+                               default:
+                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                                ("switch case not process\n"));
+                                       break;
+                               }
+                       }
+                       RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+                                ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+                                 "Write 0x%X\n", acm_ctrl));
+                       rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+                       break;
+               }
+       case HW_VAR_RCR:{
+                       rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+                       mac->rx_conf = ((u32 *) (val))[0];
+                       RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
+                                ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
+                       break;
+               }
+       case HW_VAR_RETRY_LIMIT:{
+                       u8 retry_limit = ((u8 *) (val))[0];
+
+                       rtl_write_word(rtlpriv, REG_RL,
+                                      retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+                                      retry_limit << RETRY_LIMIT_LONG_SHIFT);
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
+                                "ETRY_LIMIT(0x%08x)\n", retry_limit));
+                       break;
+               }
+       case HW_VAR_DUAL_TSF_RST:
+               rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+               break;
+       case HW_VAR_EFUSE_BYTES:
+               rtlefuse->efuse_usedbytes = *((u16 *) val);
+               break;
+       case HW_VAR_EFUSE_USAGE:
+               rtlefuse->efuse_usedpercentage = *((u8 *) val);
+               break;
+       case HW_VAR_IO_CMD:
+               rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
+               break;
+       case HW_VAR_WPA_CONFIG:
+               rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+               break;
+       case HW_VAR_SET_RPWM:{
+                       u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
+
+                       if (rpwm_val & BIT(7))
+                               rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+                                              (*(u8 *)val));
+                       else
+                               rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+                                              ((*(u8 *)val) | BIT(7)));
+                       break;
+               }
+       case HW_VAR_H2C_FW_PWRMODE:{
+                       u8 psmode = (*(u8 *) val);
+
+                       if ((psmode != FW_PS_ACTIVE_MODE) &&
+                          (!IS_92C_SERIAL(rtlhal->version)))
+                               rtl92c_dm_rf_saving(hw, true);
+                       rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+                       break;
+               }
+       case HW_VAR_FW_PSMODE_STATUS:
+               ppsc->fw_current_inpsmode = *((bool *) val);
+               break;
+       case HW_VAR_H2C_FW_JOINBSSRPT:{
+                       u8 mstatus = (*(u8 *) val);
+                       u8 tmp_reg422;
+                       bool recover = false;
+
+                       if (mstatus == RT_MEDIA_CONNECT) {
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                        HW_VAR_AID, NULL);
+                               rtl_write_byte(rtlpriv, REG_CR + 1, 0x03);
+                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+                               tmp_reg422 = rtl_read_byte(rtlpriv,
+                                                       REG_FWHW_TXQ_CTRL + 2);
+                               if (tmp_reg422 & BIT(6))
+                                       recover = true;
+                               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                                              tmp_reg422 & (~BIT(6)));
+                               rtl92c_set_fw_rsvdpagepkt(hw, 0);
+                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+                               if (recover)
+                                       rtl_write_byte(rtlpriv,
+                                                REG_FWHW_TXQ_CTRL + 2,
+                                               tmp_reg422 | BIT(6));
+                               rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+                       }
+                       rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+                       break;
+               }
+       case HW_VAR_AID:{
+                       u16 u2btmp;
+
+                       u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+                       u2btmp &= 0xC000;
+                       rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+                                      (u2btmp | mac->assoc_id));
+                       break;
+               }
+       case HW_VAR_CORRECT_TSF:{
+                       u8 btype_ibss = ((u8 *) (val))[0];
+
+                       if (btype_ibss == true)
+                               _rtl92cu_stop_tx_beacon(hw);
+                       _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+                       rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
+                                       0xffffffff));
+                       rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+                                       (u32)((mac->tsf >> 32) & 0xffffffff));
+                       _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+                       if (btype_ibss == true)
+                               _rtl92cu_resume_tx_beacon(hw);
+                       break;
+               }
+       case HW_VAR_MGT_FILTER:
+               rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+               break;
+       case HW_VAR_CTRL_FILTER:
+               rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+               break;
+       case HW_VAR_DATA_FILTER:
+               rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+                                                       "not process\n"));
+               break;
+       }
+}
+
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 ratr_value = (u32) mac->basic_rates;
+       u8 *mcsrate = mac->mcs;
+       u8 ratr_index = 0;
+       u8 nmode = mac->ht_enable;
+       u8 mimo_ps = 1;
+       u16 shortgi_rate = 0;
+       u32 tmp_ratr_value = 0;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
+       enum wireless_mode wirelessmode = mac->mode;
+
+       ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               if (ratr_value & 0x0000000c)
+                       ratr_value &= 0x0000000d;
+               else
+                       ratr_value &= 0x0000000f;
+               break;
+       case WIRELESS_MODE_G:
+               ratr_value &= 0x00000FF5;
+               break;
+       case WIRELESS_MODE_N_24G:
+       case WIRELESS_MODE_N_5G:
+               nmode = 1;
+               if (mimo_ps == 0) {
+                       ratr_value &= 0x0007F005;
+               } else {
+                       u32 ratr_mask;
+
+                       if (get_rf_type(rtlphy) == RF_1T2R ||
+                           get_rf_type(rtlphy) == RF_1T1R)
+                               ratr_mask = 0x000ff005;
+                       else
+                               ratr_mask = 0x0f0ff005;
+                       if (curtxbw_40mhz)
+                               ratr_mask |= 0x00000010;
+                       ratr_value &= ratr_mask;
+               }
+               break;
+       default:
+               if (rtlphy->rf_type == RF_1T2R)
+                       ratr_value &= 0x000ff0ff;
+               else
+                       ratr_value &= 0x0f0ff0ff;
+               break;
+       }
+       ratr_value &= 0x0FFFFFFF;
+       if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+           (!curtxbw_40mhz && curshortgi_20mhz))) {
+               ratr_value |= 0x10000000;
+               tmp_ratr_value = (ratr_value >> 12);
+               for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+                       if ((1 << shortgi_rate) & tmp_ratr_value)
+                               break;
+               }
+               shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+                              (shortgi_rate << 4) | (shortgi_rate);
+       }
+       rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
+                REG_ARFR0)));
+}
+
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 ratr_bitmap = (u32) mac->basic_rates;
+       u8 *p_mcsrate = mac->mcs;
+       u8 ratr_index = 0;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = mac->sgi_40;
+       u8 curshortgi_20mhz = mac->sgi_20;
+       enum wireless_mode wirelessmode = mac->mode;
+       bool shortgi = false;
+       u8 rate_mask[5];
+       u8 macid = 0;
+       u8 mimops = 1;
+
+       ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               ratr_index = RATR_INX_WIRELESS_B;
+               if (ratr_bitmap & 0x0000000c)
+                       ratr_bitmap &= 0x0000000d;
+               else
+                       ratr_bitmap &= 0x0000000f;
+               break;
+       case WIRELESS_MODE_G:
+               ratr_index = RATR_INX_WIRELESS_GB;
+               if (rssi_level == 1)
+                       ratr_bitmap &= 0x00000f00;
+               else if (rssi_level == 2)
+                       ratr_bitmap &= 0x00000ff0;
+               else
+                       ratr_bitmap &= 0x00000ff5;
+               break;
+       case WIRELESS_MODE_A:
+               ratr_index = RATR_INX_WIRELESS_A;
+               ratr_bitmap &= 0x00000ff0;
+               break;
+       case WIRELESS_MODE_N_24G:
+       case WIRELESS_MODE_N_5G:
+               ratr_index = RATR_INX_WIRELESS_NGB;
+               if (mimops == 0) {
+                       if (rssi_level == 1)
+                               ratr_bitmap &= 0x00070000;
+                       else if (rssi_level == 2)
+                               ratr_bitmap &= 0x0007f000;
+                       else
+                               ratr_bitmap &= 0x0007f005;
+               } else {
+                       if (rtlphy->rf_type == RF_1T2R ||
+                           rtlphy->rf_type == RF_1T1R) {
+                               if (curtxbw_40mhz) {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x000f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x000ff000;
+                                       else
+                                               ratr_bitmap &= 0x000ff015;
+                               } else {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x000f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x000ff000;
+                                       else
+                                               ratr_bitmap &= 0x000ff005;
+                               }
+                       } else {
+                               if (curtxbw_40mhz) {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x0f0f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x0f0ff000;
+                                       else
+                                               ratr_bitmap &= 0x0f0ff015;
+                               } else {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x0f0f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x0f0ff000;
+                                       else
+                                               ratr_bitmap &= 0x0f0ff005;
+                               }
+                       }
+               }
+               if ((curtxbw_40mhz && curshortgi_40mhz) ||
+                   (!curtxbw_40mhz && curshortgi_20mhz)) {
+                       if (macid == 0)
+                               shortgi = true;
+                       else if (macid == 1)
+                               shortgi = false;
+               }
+               break;
+       default:
+               ratr_index = RATR_INX_WIRELESS_NGB;
+               if (rtlphy->rf_type == RF_1T2R)
+                       ratr_bitmap &= 0x000ff0ff;
+               else
+                       ratr_bitmap &= 0x0f0ff0ff;
+               break;
+       }
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
+                ratr_bitmap));
+       *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
+                                     ratr_index << 28);
+       rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+                                               "ratr_val:%x, %x:%x:%x:%x:%x\n",
+                                               ratr_index, ratr_bitmap,
+                                               rate_mask[0], rate_mask[1],
+                                               rate_mask[2], rate_mask[3],
+                                               rate_mask[4]));
+       rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+}
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 sifs_timer;
+
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+                                     (u8 *)&mac->slot_time);
+       if (!mac->ht_enable)
+               sifs_timer = 0x0a0a;
+       else
+               sifs_timer = 0x0e0e;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+       u8 u1tmp = 0;
+       bool actuallyset = false;
+       unsigned long flag = 0;
+       /* to do - usb autosuspend */
+       u8 usb_autosuspend = 0;
+
+       if (ppsc->swrf_processing)
+               return false;
+       spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+       if (ppsc->rfchange_inprogress) {
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+               return false;
+       } else {
+               ppsc->rfchange_inprogress = true;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+       }
+       cur_rfstate = ppsc->rfpwr_state;
+       if (usb_autosuspend) {
+               /* to do................... */
+       } else {
+               if (ppsc->pwrdown_mode) {
+                       u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
+                       e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
+                                              ERFOFF : ERFON;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+                                ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
+               } else {
+                       rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
+                                      rtl_read_byte(rtlpriv,
+                                      REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+                       u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+                       e_rfpowerstate_toset  = (u1tmp & BIT(3)) ?
+                                                ERFON : ERFOFF;
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+                               ("GPIO_IN=%02x\n", u1tmp));
+               }
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
+                        e_rfpowerstate_toset));
+       }
+       if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF  - HW "
+                        "Radio ON, RF ON\n"));
+               ppsc->hwradiooff = false;
+               actuallyset = true;
+       } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset  ==
+                   ERFOFF)) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF  - HW"
+                        " Radio OFF\n"));
+               ppsc->hwradiooff = true;
+               actuallyset = true;
+       } else {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+                        ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
+                        " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
+                        "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
+       }
+       if (actuallyset) {
+               ppsc->hwradiooff = 1;
+               if (e_rfpowerstate_toset == ERFON) {
+                       if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM) &&
+                            RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
+                               RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+                       else if ((ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_PCI_D3)
+                                && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3))
+                               RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+               }
+               spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+               /* For power down module, we need to enable register block
+                * contrl reg at 0x1c. Then enable power down control bit
+                * of register 0x04 BIT4 and BIT15 as 1.
+                */
+               if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
+                       /* Enable register area 0x0-0xc. */
+                       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+                       if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
+                               /*
+                                * We should configure HW PDn source for WiFi
+                                * ONLY, and then our HW will be set in
+                                * power-down mode if PDn source from all
+                                * functions are configured.
+                                */
+                               u1tmp = rtl_read_byte(rtlpriv,
+                                                     REG_MULTI_FUNC_CTRL);
+                               rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
+                                              (u1tmp|WL_HWPDN_EN));
+                       } else {
+                               rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
+                       }
+               }
+               if (e_rfpowerstate_toset == ERFOFF) {
+                       if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
+                               RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+                       else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+                               RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+               }
+       } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
+               /* Enter D3 or ASPM after GPIO had been done. */
+               if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+               else if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_PCI_D3)
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+               spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+       } else {
+               spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+       }
+       *valid = 1;
+       return !ppsc->hwradiooff;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
new file mode 100644 (file)
index 0000000..62af555
--- /dev/null
@@ -0,0 +1,116 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_HW_H__
+#define __RTL92CU_HW_H__
+
+#define H2C_RA_MASK    6
+
+#define LLT_POLLING_LLT_THRESHOLD              20
+#define LLT_POLLING_READY_TIMEOUT_COUNT                100
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER                255
+
+#define RX_PAGE_SIZE_REG_VALUE                 PBP_128
+/* Note: We will divide number of page equally for each queue
+ * other than public queue! */
+#define TX_TOTAL_PAGE_NUMBER                   0xF8
+#define TX_PAGE_BOUNDARY                       (TX_TOTAL_PAGE_NUMBER + 1)
+
+
+#define CHIP_B_PAGE_NUM_PUBQ                   0xE7
+
+/* For Test Chip Setting
+ * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
+#define CHIP_A_PAGE_NUM_PUBQ                   0x7E
+
+
+/* For Chip A Setting */
+#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER                0xF5
+#define WMM_CHIP_A_TX_PAGE_BOUNDARY            \
+       (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_A_PAGE_NUM_PUBQ               0xA3
+#define WMM_CHIP_A_PAGE_NUM_HPQ                        0x29
+#define WMM_CHIP_A_PAGE_NUM_LPQ                        0x29
+
+
+
+/* Note: For Chip B Setting ,modify later */
+#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER                0xF5
+#define WMM_CHIP_B_TX_PAGE_BOUNDARY            \
+       (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_B_PAGE_NUM_PUBQ               0xB0
+#define WMM_CHIP_B_PAGE_NUM_HPQ                        0x29
+#define WMM_CHIP_B_PAGE_NUM_LPQ                        0x1C
+#define WMM_CHIP_B_PAGE_NUM_NPQ                        0x1C
+
+#define BOARD_TYPE_NORMAL_MASK                 0xE0
+#define BOARD_TYPE_TEST_MASK                   0x0F
+
+/* should be renamed and moved to another file */
+enum _BOARD_TYPE_8192CUSB {
+       BOARD_USB_DONGLE                = 0,    /* USB dongle */
+       BOARD_USB_High_PA               = 1,    /* USB dongle - high power PA */
+       BOARD_MINICARD                  = 2,    /* Minicard */
+       BOARD_USB_SOLO                  = 3,    /* USB solo-Slim module */
+       BOARD_USB_COMBO                 = 4,    /* USB Combo-Slim module */
+};
+
+#define IS_HIGHT_PA(boardtype)         \
+       ((boardtype == BOARD_USB_High_PA) ? true : false)
+
+#define RTL92C_DRIVER_INFO_SIZE                                4
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw);
+int rtl92cu_hw_init(struct ieee80211_hw *hw);
+void rtl92cu_card_disable(struct ieee80211_hw *hw);
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+                                  u32 add_msr, u32 rm_msr);
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+u8 _rtl92c_get_chnl_group(u8 chnl);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+                        u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
new file mode 100644 (file)
index 0000000..332c743
--- /dev/null
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl92cu_init_led(struct ieee80211_hw *hw,
+                             struct rtl_led *pled, enum rtl_led_pin ledpin)
+{
+       pled->hw = hw;
+       pled->ledpin = ledpin;
+       pled->ledon = false;
+}
+
+static void _rtl92cu_deInit_led(struct rtl_led *pled)
+{
+}
+
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+       u8 ledcfg;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+                ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+       ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+       switch (pled->ledpin) {
+       case LED_PIN_GPIO0:
+               break;
+       case LED_PIN_LED0:
+               rtl_write_byte(rtlpriv,
+                              REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
+               break;
+       case LED_PIN_LED1:
+               rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       pled->ledon = true;
+}
+
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+       u8 ledcfg;
+
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+                ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+       ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+       switch (pled->ledpin) {
+       case LED_PIN_GPIO0:
+               break;
+       case LED_PIN_LED0:
+               ledcfg &= 0xf0;
+               if (usbpriv->ledctl.led_opendrain == true)
+                       rtl_write_byte(rtlpriv, REG_LEDCFG2,
+                                      (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+               else
+                       rtl_write_byte(rtlpriv, REG_LEDCFG2,
+                                      (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+               break;
+       case LED_PIN_LED1:
+               ledcfg &= 0x0f;
+               rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       pled->ledon = false;
+}
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
+{
+       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+       _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
+       _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
+{
+       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+       _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
+       _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
+}
+
+static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
+                                   enum led_ctl_mode ledaction)
+{
+}
+
+void rtl92cu_led_control(struct ieee80211_hw *hw,
+                       enum led_ctl_mode ledaction)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+           (ledaction == LED_CTL_TX ||
+            ledaction == LED_CTL_RX ||
+            ledaction == LED_CTL_SITE_SURVEY ||
+            ledaction == LED_CTL_LINK ||
+            ledaction == LED_CTL_NO_LINK ||
+            ledaction == LED_CTL_START_TO_LINK ||
+            ledaction == LED_CTL_POWER_ON)) {
+               return;
+       }
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
+                               ledaction));
+       _rtl92cu_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
new file mode 100644 (file)
index 0000000..decaee4
--- /dev/null
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_LED_H__
+#define __RTL92CU_LED_H__
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
new file mode 100644 (file)
index 0000000..f8514cb
--- /dev/null
@@ -0,0 +1,1144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+****************************************************************************/
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../cam.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+/* macro to shorten lines */
+
+#define LINK_Q ui_link_quality
+#define RX_EVM rx_evm_percentage
+#define RX_SIGQ        rx_mimo_signalquality
+
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       enum version_8192c chip_version = VERSION_UNKNOWN;
+       u32 value32;
+
+       value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+       if (value32 & TRP_VAUX_EN) {
+               chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
+                              VERSION_TEST_CHIP_88C;
+       } else {
+               /* Normal mass production chip. */
+               chip_version = NORMAL_CHIP;
+               chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
+               chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
+               /* RTL8723 with BT function. */
+               chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
+               if (IS_VENDOR_UMC(chip_version))
+                       chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
+                                        CHIP_VENDOR_UMC_B_CUT : 0);
+               if (IS_92C_SERIAL(chip_version)) {
+                       value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
+                       chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
+                                CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
+               } else if (IS_8723_SERIES(chip_version)) {
+                       value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
+                       chip_version |= ((value32 & RF_RL_ID) ?
+                                         CHIP_8723_DRV_REV : 0);
+               }
+       }
+       rtlhal->version  = (enum version_8192c)chip_version;
+       switch (rtlhal->version) {
+       case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+               break;
+       case VERSION_NORMAL_TSMC_CHIP_92C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
+               break;
+       case VERSION_NORMAL_TSMC_CHIP_88C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
+                       "92C_1T2R_A_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
+                       "92C_A_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_88C_A_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_92C_1T2R_B_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_92C_B_CUT.\n"));
+               break;
+       case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+                       "_88C_B_CUT.\n"));
+               break;
+       case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+                       "_8723_1T1R_A_CUT.\n"));
+               break;
+       case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+                       "_8723_1T1R_B_CUT.\n"));
+               break;
+       case VERSION_TEST_CHIP_92C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
+               break;
+       case VERSION_TEST_CHIP_88C:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       ("Chip Version ID: ???????????????.\n"));
+               break;
+       }
+       if (IS_92C_SERIAL(rtlhal->version))
+               rtlphy->rf_type =
+                        (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
+       else
+               rtlphy->rf_type = RF_1T1R;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+                 "RF_2T2R" : "RF_1T1R"));
+       if (get_rf_type(rtlphy) == RF_1T1R)
+               rtlpriv->dm.rfpath_rxenable[0] = true;
+       else
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+                                               rtlhal->version));
+}
+
+/**
+ * writeLLT - LLT table write access
+ * @io: io callback
+ * @address: LLT logical address.
+ * @data: LLT data content
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       bool status = true;
+       long count = 0;
+       u32 value = _LLT_INIT_ADDR(address) |
+           _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+       rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+       do {
+               value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+               if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+                       break;
+               if (count > POLLING_LLT_THRESHOLD) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                ("Failed to polling write LLT done at"
+                                " address %d! _LLT_OP_VALUE(%x)\n",
+                                address, _LLT_OP_VALUE(value)));
+                       status = false;
+                       break;
+               }
+       } while (++count);
+       return status;
+}
+/**
+ * rtl92c_init_LLT_table - Init LLT table
+ * @io: io callback
+ * @boundary:
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
+{
+       bool rst = true;
+       u32     i;
+
+       for (i = 0; i < (boundary - 1); i++) {
+               rst = rtl92c_llt_write(hw, i , i + 1);
+               if (true != rst) {
+                       printk(KERN_ERR "===> %s #1 fail\n", __func__);
+                       return rst;
+               }
+       }
+       /* end of list */
+       rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
+       if (true != rst) {
+               printk(KERN_ERR "===> %s #2 fail\n", __func__);
+               return rst;
+       }
+       /* Make the other pages as ring buffer
+        * This ring buffer is used as beacon buffer if we config this MAC
+        *  as two MAC transfer.
+        * Otherwise used as local loopback buffer.
+        */
+       for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
+               rst = rtl92c_llt_write(hw, i, (i + 1));
+               if (true != rst) {
+                       printk(KERN_ERR "===> %s #3 fail\n", __func__);
+                       return rst;
+               }
+       }
+       /* Let last entry point to the start entry of ring buffer */
+       rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
+       if (true != rst) {
+               printk(KERN_ERR "===> %s #4 fail\n", __func__);
+               return rst;
+       }
+       return rst;
+}
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+                    u8 *p_macaddr, bool is_group, u8 enc_algo,
+                    bool is_wepkey, bool clear_all)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 *macaddr = p_macaddr;
+       u32 entry_id = 0;
+       bool is_pairwise = false;
+       static u8 cam_const_addr[4][6] = {
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+       };
+       static u8 cam_const_broad[] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+
+       if (clear_all) {
+               u8 idx = 0;
+               u8 cam_offset = 0;
+               u8 clear_number = 5;
+
+               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+               for (idx = 0; idx < clear_number; idx++) {
+                       rtl_cam_mark_invalid(hw, cam_offset + idx);
+                       rtl_cam_empty_entry(hw, cam_offset + idx);
+                       if (idx < 5) {
+                               memset(rtlpriv->sec.key_buf[idx], 0,
+                                      MAX_KEY_LEN);
+                               rtlpriv->sec.key_len[idx] = 0;
+                       }
+               }
+       } else {
+               switch (enc_algo) {
+               case WEP40_ENCRYPTION:
+                       enc_algo = CAM_WEP40;
+                       break;
+               case WEP104_ENCRYPTION:
+                       enc_algo = CAM_WEP104;
+                       break;
+               case TKIP_ENCRYPTION:
+                       enc_algo = CAM_TKIP;
+                       break;
+               case AESCCMP_ENCRYPTION:
+                       enc_algo = CAM_AES;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                               ("iillegal switch case\n"));
+                       enc_algo = CAM_TKIP;
+                       break;
+               }
+               if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+                       macaddr = cam_const_addr[key_index];
+                       entry_id = key_index;
+               } else {
+                       if (is_group) {
+                               macaddr = cam_const_broad;
+                               entry_id = key_index;
+                       } else {
+                               key_index = PAIRWISE_KEYIDX;
+                               entry_id = CAM_PAIRWISE_KEY_POSITION;
+                               is_pairwise = true;
+                       }
+               }
+               if (rtlpriv->sec.key_len[key_index] == 0) {
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                ("delete one entry\n"));
+                       rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+               } else {
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+                                ("The insert KEY length is %d\n",
+                                 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+                                ("The insert KEY  is %x %x\n",
+                                 rtlpriv->sec.key_buf[0][0],
+                                 rtlpriv->sec.key_buf[0][1]));
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                ("add one entry\n"));
+                       if (is_pairwise) {
+                               RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+                                             "Pairwiase Key content :",
+                                             rtlpriv->sec.pairwise_key,
+                                             rtlpriv->sec.
+                                             key_len[PAIRWISE_KEYIDX]);
+                               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                        ("set Pairwiase key\n"));
+
+                               rtl_cam_add_one_entry(hw, macaddr, key_index,
+                                               entry_id, enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.
+                                               key_buf[key_index]);
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                        ("set group key\n"));
+                               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                                       rtl_cam_add_one_entry(hw,
+                                               rtlefuse->dev_addr,
+                                               PAIRWISE_KEYIDX,
+                                               CAM_PAIRWISE_KEY_POSITION,
+                                               enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.key_buf
+                                               [entry_id]);
+                               }
+                               rtl_cam_add_one_entry(hw, macaddr, key_index,
+                                               entry_id, enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.key_buf[entry_id]);
+                       }
+               }
+       }
+}
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
+}
+
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+               rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
+                               0xFFFFFFFF);
+               rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
+                               0xFFFFFFFF);
+               rtlpci->irq_enabled = true;
+       } else {
+               rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
+                               0xFFFFFFFF);
+               rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
+                               0xFFFFFFFF);
+               rtlusb->irq_enabled = true;
+       }
+}
+
+void rtl92c_init_interrupt(struct ieee80211_hw *hw)
+{
+        rtl92c_enable_interrupt(hw);
+}
+
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
+       rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
+       if (IS_HARDWARE_TYPE_8192CE(rtlhal))
+               rtlpci->irq_enabled = false;
+       else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+               rtlusb->irq_enabled = false;
+}
+
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 u4b_ac_param;
+
+       rtl92c_dm_init_edca_turbo(hw);
+       u4b_ac_param = (u32) mac->ac[aci].aifs;
+       u4b_ac_param |=
+           ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
+           AC_PARAM_ECW_MIN_OFFSET;
+       u4b_ac_param |=
+           ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
+           AC_PARAM_ECW_MAX_OFFSET;
+       u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
+                        AC_PARAM_TXOP_OFFSET;
+       RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
+                ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
+       switch (aci) {
+       case AC1_BK:
+               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
+               break;
+       case AC0_BE:
+               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
+               break;
+       case AC2_VI:
+               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
+               break;
+       case AC3_VO:
+               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
+               break;
+       default:
+               RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+               break;
+       }
+}
+
+/*-------------------------------------------------------------------------
+ * HW MAC Address
+ *-------------------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
+{
+       u32 i;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       for (i = 0 ; i < ETH_ALEN ; i++)
+               rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
+               "%02X-%02X-%02X\n",
+               rtl_read_byte(rtlpriv, REG_MACID),
+               rtl_read_byte(rtlpriv, REG_MACID+1),
+               rtl_read_byte(rtlpriv, REG_MACID+2),
+               rtl_read_byte(rtlpriv, REG_MACID+3),
+               rtl_read_byte(rtlpriv, REG_MACID+4),
+               rtl_read_byte(rtlpriv, REG_MACID+5)));
+}
+
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
+}
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+       u8 value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       switch (type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               value = NT_NO_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to NO LINK!\n"));
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               value = NT_LINK_AD_HOC;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to Ad Hoc!\n"));
+               break;
+       case NL80211_IFTYPE_STATION:
+               value = NT_LINK_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to STA!\n"));
+               break;
+       case NL80211_IFTYPE_AP:
+               value = NT_AS_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Set Network type to AP!\n"));
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                       ("Network type %d not support!\n", type));
+               return -EOPNOTSUPP;
+       }
+       rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+       return 0;
+}
+
+void rtl92c_init_network_type(struct ieee80211_hw *hw)
+{
+       rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
+}
+
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
+{
+       u16     value16;
+       u32     value32;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* Response Rate Set */
+       value32 = rtl_read_dword(rtlpriv, REG_RRSR);
+       value32 &= ~RATE_BITMAP_ALL;
+       value32 |= RATE_RRSR_CCK_ONLY_1M;
+       rtl_write_dword(rtlpriv, REG_RRSR, value32);
+       /* SIFS (used in NAV) */
+       value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+       rtl_write_word(rtlpriv,  REG_SPEC_SIFS, value16);
+       /* Retry Limit */
+       value16 = _LRL(0x30) | _SRL(0x30);
+       rtl_write_dword(rtlpriv,  REG_RL, value16);
+}
+
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* Set Data Auto Rate Fallback Retry Count register. */
+       rtl_write_dword(rtlpriv,  REG_DARFRC, 0x00000000);
+       rtl_write_dword(rtlpriv,  REG_DARFRC+4, 0x10080404);
+       rtl_write_dword(rtlpriv,  REG_RARFRC, 0x04030201);
+       rtl_write_dword(rtlpriv,  REG_RARFRC+4, 0x08070605);
+}
+
+static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+                               u8 ctx_sifs)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
+       rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
+}
+
+static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+                                u8 ctx_sifs)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
+       rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
+}
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+                           u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
+{
+       /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
+        * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
+        */
+       u32 value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       value = (u32)aifs;
+       value |= ((u32)cw_min & 0xF) << 8;
+       value |= ((u32)cw_max & 0xF) << 12;
+       value |= (u32)txop << 16;
+       /* 92C hardware register sequence is the same as queue number. */
+       rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
+}
+
+void rtl92c_init_edca(struct ieee80211_hw *hw)
+{
+       u16 value16;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* disable EDCCA count down, to reduce collison and retry */
+       value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+       value16 |= DIS_EDCA_CNT_DWN;
+       rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
+       /* Update SIFS timing.  ??????????
+        * pHalData->SifsTime = 0x0e0e0a0a; */
+       rtl92c_set_cck_sifs(hw, 0xa, 0xa);
+       rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
+       /* Set CCK/OFDM SIFS to be 10us. */
+       rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
+       rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
+       rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
+       rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
+       /* TXOP */
+       rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
+       rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
+       rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
+       rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
+       /* PIFS */
+       rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+       /* AGGR BREAK TIME Register */
+       rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+       rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+       rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
+       rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
+}
+
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
+       rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+       /* init AMPDU aggregation number, tuning for Tx's TP, */
+       rtl_write_word(rtlpriv, 0x4CA, 0x0708);
+}
+
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+}
+
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
+       rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+       rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+void rtl92c_init_retry_function(struct ieee80211_hw *hw)
+{
+       u8      value8;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
+       value8 |= EN_AMPDU_RTY_NEW;
+       rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
+       /* Set ACK timeout */
+       rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+}
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+                                  enum version_8192c version)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+       rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
+       rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+       if (IS_NORMAL_CHIP(rtlhal->version))
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+       else
+               rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
+}
+
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
+
+       rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
+}
+
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+}
+
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
+}
+
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+}
+
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
+}
+
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       return rtl_read_word(rtlpriv,  REG_RXFLTMAP2);
+}
+
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
+}
+/*==============================================================*/
+
+static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+{
+       if ((antpower <= -100) || (antpower >= 20))
+               return 0;
+       else if (antpower >= 0)
+               return 100;
+       else
+               return 100 + antpower;
+}
+
+static u8 _rtl92c_evm_db_to_percentage(char value)
+{
+       char ret_val;
+
+       ret_val = value;
+       if (ret_val >= 0)
+               ret_val = 0;
+       if (ret_val <= -33)
+               ret_val = -33;
+       ret_val = 0 - ret_val;
+       ret_val *= 3;
+       if (ret_val == 99)
+               ret_val = 100;
+       return ret_val;
+}
+
+static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
+                                    u8 signal_strength_index)
+{
+       long signal_power;
+
+       signal_power = (long)((signal_strength_index + 1) >> 1);
+       signal_power -= 95;
+       return signal_power;
+}
+
+static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+               long currsig)
+{
+       long retsig;
+
+       if (currsig >= 61 && currsig <= 100)
+               retsig = 90 + ((currsig - 60) / 4);
+       else if (currsig >= 41 && currsig <= 60)
+               retsig = 78 + ((currsig - 40) / 2);
+       else if (currsig >= 31 && currsig <= 40)
+               retsig = 66 + (currsig - 30);
+       else if (currsig >= 21 && currsig <= 30)
+               retsig = 54 + (currsig - 20);
+       else if (currsig >= 5 && currsig <= 20)
+               retsig = 42 + (((currsig - 5) * 2) / 3);
+       else if (currsig == 4)
+               retsig = 36;
+       else if (currsig == 3)
+               retsig = 27;
+       else if (currsig == 2)
+               retsig = 18;
+       else if (currsig == 1)
+               retsig = 9;
+       else
+               retsig = currsig;
+       return retsig;
+}
+
+static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+                                     struct rtl_stats *pstats,
+                                     struct rx_desc_92c *pdesc,
+                                     struct rx_fwinfo_92c *p_drvinfo,
+                                     bool packet_match_bssid,
+                                     bool packet_toself,
+                                     bool packet_beacon)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct phy_sts_cck_8192s_t *cck_buf;
+       s8 rx_pwr_all = 0, rx_pwr[4];
+       u8 rf_rx_num = 0, evm, pwdb_all;
+       u8 i, max_spatial_stream;
+       u32 rssi, total_rssi = 0;
+       bool in_powersavemode = false;
+       bool is_cck_rate;
+
+       is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+       pstats->packet_matchbssid = packet_match_bssid;
+       pstats->packet_toself = packet_toself;
+       pstats->is_cck = is_cck_rate;
+       pstats->packet_beacon = packet_beacon;
+       pstats->is_cck = is_cck_rate;
+       pstats->RX_SIGQ[0] = -1;
+       pstats->RX_SIGQ[1] = -1;
+       if (is_cck_rate) {
+               u8 report, cck_highpwr;
+               cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
+               if (!in_powersavemode)
+                       cck_highpwr = rtlphy->cck_high_power;
+               else
+                       cck_highpwr = false;
+               if (!cck_highpwr) {
+                       u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+                       report = cck_buf->cck_agc_rpt & 0xc0;
+                       report = report >> 6;
+                       switch (report) {
+                       case 0x3:
+                               rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+                               break;
+                       case 0x2:
+                               rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+                               break;
+                       case 0x1:
+                               rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+                               break;
+                       case 0x0:
+                               rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+                               break;
+                       }
+               } else {
+                       u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+                       report = p_drvinfo->cfosho[0] & 0x60;
+                       report = report >> 5;
+                       switch (report) {
+                       case 0x3:
+                               rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       case 0x2:
+                               rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       case 0x1:
+                               rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       case 0x0:
+                               rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+                               break;
+                       }
+               }
+               pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+               pstats->rx_pwdb_all = pwdb_all;
+               pstats->recvsignalpower = rx_pwr_all;
+               if (packet_match_bssid) {
+                       u8 sq;
+                       if (pstats->rx_pwdb_all > 40)
+                               sq = 100;
+                       else {
+                               sq = cck_buf->sq_rpt;
+                               if (sq > 64)
+                                       sq = 0;
+                               else if (sq < 20)
+                                       sq = 100;
+                               else
+                                       sq = ((64 - sq) * 100) / 44;
+                       }
+                       pstats->signalquality = sq;
+                       pstats->RX_SIGQ[0] = sq;
+                       pstats->RX_SIGQ[1] = -1;
+               }
+       } else {
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
+               for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
+                       if (rtlpriv->dm.rfpath_rxenable[i])
+                               rf_rx_num++;
+                       rx_pwr[i] =
+                           ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+                       rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+                       total_rssi += rssi;
+                       rtlpriv->stats.rx_snr_db[i] =
+                           (long)(p_drvinfo->rxsnr[i] / 2);
+
+                       if (packet_match_bssid)
+                               pstats->rx_mimo_signalstrength[i] = (u8) rssi;
+               }
+               rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+               pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+               pstats->rx_pwdb_all = pwdb_all;
+               pstats->rxpower = rx_pwr_all;
+               pstats->recvsignalpower = rx_pwr_all;
+               if (GET_RX_DESC_RX_MCS(pdesc) &&
+                   GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
+                   GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
+                       max_spatial_stream = 2;
+               else
+                       max_spatial_stream = 1;
+               for (i = 0; i < max_spatial_stream; i++) {
+                       evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+                       if (packet_match_bssid) {
+                               if (i == 0)
+                                       pstats->signalquality =
+                                           (u8) (evm & 0xff);
+                               pstats->RX_SIGQ[i] =
+                                   (u8) (evm & 0xff);
+                       }
+               }
+       }
+       if (is_cck_rate)
+               pstats->signalstrength =
+                   (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+       else if (rf_rx_num != 0)
+               pstats->signalstrength =
+                   (u8) (_rtl92c_signal_scale_mapping
+                         (hw, total_rssi /= rf_rx_num));
+}
+
+static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
+               struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 rfpath;
+       u32 last_rssi, tmpval;
+
+       if (pstats->packet_toself || pstats->packet_beacon) {
+               rtlpriv->stats.rssi_calculate_cnt++;
+               if (rtlpriv->stats.ui_rssi.total_num++ >=
+                   PHY_RSSI_SLID_WIN_MAX) {
+                       rtlpriv->stats.ui_rssi.total_num =
+                           PHY_RSSI_SLID_WIN_MAX;
+                       last_rssi =
+                           rtlpriv->stats.ui_rssi.elements[rtlpriv->
+                                                          stats.ui_rssi.index];
+                       rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+               }
+               rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
+               rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
+                                       index++] = pstats->signalstrength;
+               if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+                       rtlpriv->stats.ui_rssi.index = 0;
+               tmpval = rtlpriv->stats.ui_rssi.total_val /
+                   rtlpriv->stats.ui_rssi.total_num;
+               rtlpriv->stats.signal_strength =
+                   _rtl92c_translate_todbm(hw, (u8) tmpval);
+               pstats->rssi = rtlpriv->stats.signal_strength;
+       }
+       if (!pstats->is_cck && pstats->packet_toself) {
+               for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+                    rfpath++) {
+                       if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
+                               continue;
+                       if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   pstats->rx_mimo_signalstrength[rfpath];
+                       }
+                       if (pstats->rx_mimo_signalstrength[rfpath] >
+                           rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   ((rtlpriv->stats.
+                                     rx_rssi_percentage[rfpath] *
+                                     (RX_SMOOTH_FACTOR - 1)) +
+                                    (pstats->rx_mimo_signalstrength[rfpath])) /
+                                   (RX_SMOOTH_FACTOR);
+
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   rtlpriv->stats.rx_rssi_percentage[rfpath] +
+                                   1;
+                       } else {
+                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
+                                   ((rtlpriv->stats.
+                                     rx_rssi_percentage[rfpath] *
+                                     (RX_SMOOTH_FACTOR - 1)) +
+                                    (pstats->rx_mimo_signalstrength[rfpath])) /
+                                   (RX_SMOOTH_FACTOR);
+                       }
+               }
+       }
+}
+
+static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
+                                              struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int weighting = 0;
+
+       if (rtlpriv->stats.recv_signal_power == 0)
+               rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+       if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+               weighting = 5;
+       else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+               weighting = (-5);
+       rtlpriv->stats.recv_signal_power =
+           (rtlpriv->stats.recv_signal_power * 5 +
+            pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
+               struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       long undecorated_smoothed_pwdb = 0;
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+               return;
+       } else {
+               undecorated_smoothed_pwdb =
+                   rtlpriv->dm.undecorated_smoothed_pwdb;
+       }
+       if (pstats->packet_toself || pstats->packet_beacon) {
+               if (undecorated_smoothed_pwdb < 0)
+                       undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+               if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+                       undecorated_smoothed_pwdb =
+                           (((undecorated_smoothed_pwdb) *
+                             (RX_SMOOTH_FACTOR - 1)) +
+                            (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+                       undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
+                           + 1;
+               } else {
+                       undecorated_smoothed_pwdb =
+                           (((undecorated_smoothed_pwdb) *
+                             (RX_SMOOTH_FACTOR - 1)) +
+                            (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+               }
+               rtlpriv->dm.undecorated_smoothed_pwdb =
+                   undecorated_smoothed_pwdb;
+               _rtl92c_update_rxsignalstatistics(hw, pstats);
+       }
+}
+
+static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
+                                            struct rtl_stats *pstats)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 last_evm = 0, n_stream, tmpval;
+
+       if (pstats->signalquality != 0) {
+               if (pstats->packet_toself || pstats->packet_beacon) {
+                       if (rtlpriv->stats.LINK_Q.total_num++ >=
+                           PHY_LINKQUALITY_SLID_WIN_MAX) {
+                               rtlpriv->stats.LINK_Q.total_num =
+                                   PHY_LINKQUALITY_SLID_WIN_MAX;
+                               last_evm =
+                                   rtlpriv->stats.LINK_Q.elements
+                                   [rtlpriv->stats.LINK_Q.index];
+                               rtlpriv->stats.LINK_Q.total_val -=
+                                   last_evm;
+                       }
+                       rtlpriv->stats.LINK_Q.total_val +=
+                           pstats->signalquality;
+                       rtlpriv->stats.LINK_Q.elements
+                          [rtlpriv->stats.LINK_Q.index++] =
+                           pstats->signalquality;
+                       if (rtlpriv->stats.LINK_Q.index >=
+                           PHY_LINKQUALITY_SLID_WIN_MAX)
+                               rtlpriv->stats.LINK_Q.index = 0;
+                       tmpval = rtlpriv->stats.LINK_Q.total_val /
+                           rtlpriv->stats.LINK_Q.total_num;
+                       rtlpriv->stats.signal_quality = tmpval;
+                       rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+                       for (n_stream = 0; n_stream < 2;
+                            n_stream++) {
+                               if (pstats->RX_SIGQ[n_stream] != -1) {
+                                       if (!rtlpriv->stats.RX_EVM[n_stream]) {
+                                               rtlpriv->stats.RX_EVM[n_stream]
+                                                = pstats->RX_SIGQ[n_stream];
+                                       }
+                                       rtlpriv->stats.RX_EVM[n_stream] =
+                                           ((rtlpriv->stats.RX_EVM
+                                           [n_stream] *
+                                           (RX_SMOOTH_FACTOR - 1)) +
+                                           (pstats->RX_SIGQ
+                                           [n_stream] * 1)) /
+                                           (RX_SMOOTH_FACTOR);
+                               }
+                       }
+               }
+       } else {
+               ;
+       }
+}
+
+static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
+                                    u8 *buffer,
+                                    struct rtl_stats *pcurrent_stats)
+{
+       if (!pcurrent_stats->packet_matchbssid &&
+           !pcurrent_stats->packet_beacon)
+               return;
+       _rtl92c_process_ui_rssi(hw, pcurrent_stats);
+       _rtl92c_process_pwdb(hw, pcurrent_stats);
+       _rtl92c_process_LINK_Q(hw, pcurrent_stats);
+}
+
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+                                              struct sk_buff *skb,
+                                              struct rtl_stats *pstats,
+                                              struct rx_desc_92c *pdesc,
+                                              struct rx_fwinfo_92c *p_drvinfo)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct ieee80211_hdr *hdr;
+       u8 *tmp_buf;
+       u8 *praddr;
+       u8 *psaddr;
+       __le16 fc;
+       u16 type, cpu_fc;
+       bool packet_matchbssid, packet_toself, packet_beacon;
+
+       tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+       hdr = (struct ieee80211_hdr *)tmp_buf;
+       fc = hdr->frame_control;
+       cpu_fc = le16_to_cpu(fc);
+       type = WLAN_FC_GET_TYPE(fc);
+       praddr = hdr->addr1;
+       psaddr = hdr->addr2;
+       packet_matchbssid =
+           ((IEEE80211_FTYPE_CTL != type) &&
+            (!compare_ether_addr(mac->bssid,
+                         (cpu_fc & IEEE80211_FCTL_TODS) ?
+                         hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
+                         hdr->addr2 : hdr->addr3)) &&
+            (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+
+       packet_toself = packet_matchbssid &&
+           (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+       if (ieee80211_is_beacon(fc))
+               packet_beacon = true;
+       _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+                                  packet_matchbssid, packet_toself,
+                                  packet_beacon);
+       _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
new file mode 100644 (file)
index 0000000..298fdb7
--- /dev/null
@@ -0,0 +1,180 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_MAC_H__
+#define __RTL92C_MAC_H__
+
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER                255
+#define DRIVER_EARLY_INT_TIME                                  0x05
+#define BCN_DMA_ATIME_INT_TIME                         0x02
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw);
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary);
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+                    u8 *p_macaddr, bool is_group, u8 enc_algo,
+                    bool is_wepkey, bool clear_all);
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
+
+
+/*---------------------------------------------------------------
+ *     Hardware init functions
+ *---------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
+void rtl92c_init_interrupt(struct ieee80211_hw *hw);
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92c_init_network_type(struct ieee80211_hw *hw);
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw);
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw);
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+                                                       u16 queue,
+                                                       u16 txop,
+                                                       u8 ecwmax,
+                                                       u8 ecwmin,
+                                                       u8 aifs);
+
+void rtl92c_init_edca(struct ieee80211_hw *hw);
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
+void rtl92c_init_retry_function(struct ieee80211_hw *hw);
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+                                  enum version_8192c version);
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
+
+/* For filter */
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
+
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
+
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+       (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
+        GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
+        GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
+        GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
+
+struct rx_fwinfo_92c {
+       u8 gain_trsw[4];
+       u8 pwdb_all;
+       u8 cfosho[4];
+       u8 cfotail[4];
+       char rxevm[2];
+       char rxsnr[4];
+       u8 pdsnr[2];
+       u8 csi_current[2];
+       u8 csi_target[2];
+       u8 sigevm;
+       u8 max_ex_pwr;
+       u8 ex_intf_flag:1;
+       u8 sgi_en:1;
+       u8 rxsc:2;
+       u8 reserve:4;
+} __packed;
+
+struct rx_desc_92c {
+       u32 length:14;
+       u32 crc32:1;
+       u32 icverror:1;
+       u32 drv_infosize:4;
+       u32 security:3;
+       u32 qos:1;
+       u32 shift:2;
+       u32 phystatus:1;
+       u32 swdec:1;
+       u32 lastseg:1;
+       u32 firstseg:1;
+       u32 eor:1;
+       u32 own:1;
+       u32 macid:5;    /* word 1 */
+       u32 tid:4;
+       u32 hwrsvd:5;
+       u32 paggr:1;
+       u32 faggr:1;
+       u32 a1_fit:4;
+       u32 a2_fit:4;
+       u32 pam:1;
+       u32 pwr:1;
+       u32 moredata:1;
+       u32 morefrag:1;
+       u32 type:2;
+       u32 mc:1;
+       u32 bc:1;
+       u32 seq:12;     /* word 2 */
+       u32 frag:4;
+       u32 nextpktlen:14;
+       u32 nextind:1;
+       u32 rsvd:1;
+       u32 rxmcs:6;    /* word 3 */
+       u32 rxht:1;
+       u32 amsdu:1;
+       u32 splcp:1;
+       u32 bandwidth:1;
+       u32 htc:1;
+       u32 tcpchk_rpt:1;
+       u32 ipcchk_rpt:1;
+       u32 tcpchk_valid:1;
+       u32 hwpcerr:1;
+       u32 hwpcind:1;
+       u32 iv0:16;
+       u32 iv1;        /* word 4 */
+       u32 tsfl;       /* word 5 */
+       u32 bufferaddress;      /* word 6 */
+       u32 bufferaddress64;    /* word 7 */
+} __packed;
+
+enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc,
+                                                         unsigned int
+                                                         skb_queue);
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+                                     struct sk_buff *skb,
+                                     struct rtl_stats *pstats,
+                                     struct rx_desc_92c *pdesc,
+                                     struct rx_fwinfo_92c *p_drvinfo);
+
+/*---------------------------------------------------------------
+ *     Card disable functions
+ *---------------------------------------------------------------*/
+
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
new file mode 100644 (file)
index 0000000..4e020e6
--- /dev/null
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 original_value, readback_value, bitshift;
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+                                              "rfpath(%#x), bitmask(%#x)\n",
+                                              regaddr, rfpath, bitmask));
+       if (rtlphy->rf_mode != RF_OP_BY_FW) {
+               original_value = _rtl92c_phy_rf_serial_read(hw,
+                                                           rfpath, regaddr);
+       } else {
+               original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+                                                              rfpath, regaddr);
+       }
+       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+       readback_value = (original_value & bitmask) >> bitshift;
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                ("regaddr(%#x), rfpath(%#x), "
+                 "bitmask(%#x), original_value(%#x)\n",
+                 regaddr, rfpath, bitmask, original_value));
+       return readback_value;
+}
+
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath,
+                          u32 regaddr, u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 original_value, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+                 regaddr, bitmask, data, rfpath));
+       if (rtlphy->rf_mode != RF_OP_BY_FW) {
+               if (bitmask != RFREG_OFFSET_MASK) {
+                       original_value = _rtl92c_phy_rf_serial_read(hw,
+                                                                   rfpath,
+                                                                   regaddr);
+                       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+                       data =
+                           ((original_value & (~bitmask)) |
+                            (data << bitshift));
+               }
+               _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
+       } else {
+               if (bitmask != RFREG_OFFSET_MASK) {
+                       original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+                                                                      rfpath,
+                                                                      regaddr);
+                       bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+                       data =
+                           ((original_value & (~bitmask)) |
+                            (data << bitshift));
+               }
+               _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
+       }
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+                                              "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+                                              regaddr, bitmask, data, rfpath));
+}
+
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
+{
+       bool rtstatus;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool is92c = IS_92C_SERIAL(rtlhal->version);
+
+       rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
+       if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
+               rtl_write_byte(rtlpriv, 0x14, 0x71);
+       return rtstatus;
+}
+
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
+{
+       bool rtstatus = true;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u16 regval;
+       u8 b_reg_hwparafile = 1;
+
+       _rtl92c_phy_init_bb_rf_register_definition(hw);
+       regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+       rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) |
+                      BIT(0) | BIT(1));
+       rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+       rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+       rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+       if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
+                              FEN_DIO_PCIE |   FEN_BB_GLB_RSTn | FEN_BBRSTB);
+       } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
+                              FEN_BB_GLB_RSTn | FEN_BBRSTB);
+               rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+       }
+       rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+       if (b_reg_hwparafile == 1)
+               rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
+       return rtstatus;
+}
+
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 i;
+       u32 arraylength;
+       u32 *ptrarray;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+       arraylength =  rtlphy->hwparam_tables[MAC_REG].length ;
+       ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+       for (i = 0; i < arraylength; i = i + 2)
+               rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+       return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                                 u8 configtype)
+{
+       int i;
+       u32 *phy_regarray_table;
+       u32 *agctab_array_table;
+       u16 phy_reg_arraylen, agctab_arraylen;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (IS_92C_SERIAL(rtlhal->version)) {
+               agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length;
+               agctab_array_table =  rtlphy->hwparam_tables[AGCTAB_2T].pdata;
+               phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length;
+               phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata;
+       } else {
+               agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length;
+               agctab_array_table =  rtlphy->hwparam_tables[AGCTAB_1T].pdata;
+               phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length;
+               phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata;
+       }
+       if (configtype == BASEBAND_CONFIG_PHY_REG) {
+               for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+                       if (phy_regarray_table[i] == 0xfe)
+                               mdelay(50);
+                       else if (phy_regarray_table[i] == 0xfd)
+                               mdelay(5);
+                       else if (phy_regarray_table[i] == 0xfc)
+                               mdelay(1);
+                       else if (phy_regarray_table[i] == 0xfb)
+                               udelay(50);
+                       else if (phy_regarray_table[i] == 0xfa)
+                               udelay(5);
+                       else if (phy_regarray_table[i] == 0xf9)
+                               udelay(1);
+                       rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+                                     phy_regarray_table[i + 1]);
+                       udelay(1);
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                ("The phy_regarray_table[0] is %x"
+                                 " Rtl819XPHY_REGArray[1] is %x\n",
+                                 phy_regarray_table[i],
+                                 phy_regarray_table[i + 1]));
+               }
+       } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+               for (i = 0; i < agctab_arraylen; i = i + 2) {
+                       rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
+                                     agctab_array_table[i + 1]);
+                       udelay(1);
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                ("The agctab_array_table[0] is "
+                                 "%x Rtl819XPHY_REGArray[1] is %x\n",
+                                 agctab_array_table[i],
+                                 agctab_array_table[i + 1]));
+               }
+       }
+       return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                   u8 configtype)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       int i;
+       u32 *phy_regarray_table_pg;
+       u16 phy_regarray_pg_len;
+
+       rtlphy->pwrgroup_cnt = 0;
+       phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length;
+       phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
+       if (configtype == BASEBAND_CONFIG_PHY_REG) {
+               for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+                       if (phy_regarray_table_pg[i] == 0xfe)
+                               mdelay(50);
+                       else if (phy_regarray_table_pg[i] == 0xfd)
+                               mdelay(5);
+                       else if (phy_regarray_table_pg[i] == 0xfc)
+                               mdelay(1);
+                       else if (phy_regarray_table_pg[i] == 0xfb)
+                               udelay(50);
+                       else if (phy_regarray_table_pg[i] == 0xfa)
+                               udelay(5);
+                       else if (phy_regarray_table_pg[i] == 0xf9)
+                               udelay(1);
+                       _rtl92c_store_pwrIndex_diffrate_offset(hw,
+                                                 phy_regarray_table_pg[i],
+                                                 phy_regarray_table_pg[i + 1],
+                                                 phy_regarray_table_pg[i + 2]);
+               }
+       } else {
+               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                        ("configtype != BaseBand_Config_PHY_REG\n"));
+       }
+       return true;
+}
+
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath)
+{
+       int i;
+       u32 *radioa_array_table;
+       u32 *radiob_array_table;
+       u16 radioa_arraylen, radiob_arraylen;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (IS_92C_SERIAL(rtlhal->version)) {
+               radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length;
+               radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
+               radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
+               radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+       } else {
+               radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
+               radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
+               radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
+               radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+       switch (rfpath) {
+       case RF90_PATH_A:
+               for (i = 0; i < radioa_arraylen; i = i + 2) {
+                       if (radioa_array_table[i] == 0xfe)
+                               mdelay(50);
+                       else if (radioa_array_table[i] == 0xfd)
+                               mdelay(5);
+                       else if (radioa_array_table[i] == 0xfc)
+                               mdelay(1);
+                       else if (radioa_array_table[i] == 0xfb)
+                               udelay(50);
+                       else if (radioa_array_table[i] == 0xfa)
+                               udelay(5);
+                       else if (radioa_array_table[i] == 0xf9)
+                               udelay(1);
+                       else {
+                               rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+                                             RFREG_OFFSET_MASK,
+                                             radioa_array_table[i + 1]);
+                               udelay(1);
+                       }
+               }
+               break;
+       case RF90_PATH_B:
+               for (i = 0; i < radiob_arraylen; i = i + 2) {
+                       if (radiob_array_table[i] == 0xfe) {
+                               mdelay(50);
+                       } else if (radiob_array_table[i] == 0xfd)
+                               mdelay(5);
+                       else if (radiob_array_table[i] == 0xfc)
+                               mdelay(1);
+                       else if (radiob_array_table[i] == 0xfb)
+                               udelay(50);
+                       else if (radiob_array_table[i] == 0xfa)
+                               udelay(5);
+                       else if (radiob_array_table[i] == 0xf9)
+                               udelay(1);
+                       else {
+                               rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
+                                             RFREG_OFFSET_MASK,
+                                             radiob_array_table[i + 1]);
+                               udelay(1);
+                       }
+               }
+               break;
+       case RF90_PATH_C:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       case RF90_PATH_D:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               break;
+       }
+       return true;
+}
+
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u8 reg_bw_opmode;
+       u8 reg_prsr_rsc;
+
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+                ("Switch to %s bandwidth\n",
+                 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+                 "20MHz" : "40MHz"))
+       if (is_hal_stop(rtlhal)) {
+               rtlphy->set_bwmode_inprogress = false;
+               return;
+       }
+       reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+       reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+       switch (rtlphy->current_chan_bw) {
+       case HT_CHANNEL_WIDTH_20:
+               reg_bw_opmode |= BW_OPMODE_20MHZ;
+               rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+               rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+               reg_prsr_rsc =
+                   (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
+               rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+               break;
+       }
+       switch (rtlphy->current_chan_bw) {
+       case HT_CHANNEL_WIDTH_20:
+               rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+               rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+               rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+               rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+               rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+                             (mac->cur_40_prime_sc >> 1));
+               rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+               rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
+               rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+                             (mac->cur_40_prime_sc ==
+                              HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+               break;
+       }
+       rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+       rtlphy->set_bwmode_inprogress = false;
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       mutex_lock(&rtlpriv->io.bb_mutex);
+       rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+       rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+       mutex_unlock(&rtlpriv->io.bb_mutex);
+}
+
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+       u8 tmpreg;
+       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+       if ((tmpreg & 0x70) != 0)
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+       else
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+       if ((tmpreg & 0x70) != 0) {
+               rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+               if (is2t)
+                       rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+                                                 MASK12BITS);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+                             (rf_a_mode & 0x8FFFF) | 0x10000);
+               if (is2t)
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+                                     (rf_b_mode & 0x8FFFF) | 0x10000);
+       }
+       lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
+       mdelay(100);
+       if ((tmpreg & 0x70) != 0) {
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+               if (is2t)
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+                                     rf_b_mode);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+       }
+}
+
+bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                           enum rf_pwrstate rfpwr_state)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool bresult = true;
+       u8 i, queue_id;
+       struct rtl8192_tx_ring *ring = NULL;
+
+       ppsc->set_rfpowerstate_inprogress = true;
+       switch (rfpwr_state) {
+       case ERFON:
+               if ((ppsc->rfpwr_state == ERFOFF) &&
+                   RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+                       bool rtstatus;
+                       u32 InitializeCount = 0;
+
+                       do {
+                               InitializeCount++;
+                               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                        ("IPS Set eRf nic enable\n"));
+                               rtstatus = rtl_ps_enable_nic(hw);
+                       } while ((rtstatus != true)
+                                && (InitializeCount < 10));
+                       RT_CLEAR_PS_LEVEL(ppsc,
+                                         RT_RF_OFF_LEVL_HALT_NIC);
+               } else {
+                       RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                ("Set ERFON sleeped:%d ms\n",
+                                 jiffies_to_msecs(jiffies -
+                                                  ppsc->
+                                                  last_sleep_jiffies)));
+                       ppsc->last_awake_jiffies = jiffies;
+                       rtl92ce_phy_set_rf_on(hw);
+               }
+               if (mac->link_state == MAC80211_LINKED) {
+                       rtlpriv->cfg->ops->led_control(hw,
+                                                      LED_CTL_LINK);
+               } else {
+                       rtlpriv->cfg->ops->led_control(hw,
+                                                      LED_CTL_NO_LINK);
+               }
+               break;
+       case ERFOFF:
+               for (queue_id = 0, i = 0;
+                    queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+                       ring = &pcipriv->dev.tx_ring[queue_id];
+                       if (skb_queue_len(&ring->queue) == 0 ||
+                               queue_id == BEACON_QUEUE) {
+                               queue_id++;
+                               continue;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("eRf Off/Sleep: %d times "
+                                         "TcbBusyQueue[%d] "
+                                         "=%d before doze!\n", (i + 1),
+                                         queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               udelay(10);
+                               i++;
+                       }
+                       if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("\nERFOFF: %d times "
+                                         "TcbBusyQueue[%d] = %d !\n",
+                                         MAX_DOZE_WAITING_TIMES_9x,
+                                         queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               break;
+                       }
+               }
+               if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+                       RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                ("IPS Set eRf nic disable\n"));
+                       rtl_ps_disable_nic(hw);
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+               } else {
+                       if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+                               rtlpriv->cfg->ops->led_control(hw,
+                                                        LED_CTL_NO_LINK);
+                       } else {
+                               rtlpriv->cfg->ops->led_control(hw,
+                                                        LED_CTL_POWER_OFF);
+                       }
+               }
+               break;
+       case ERFSLEEP:
+               if (ppsc->rfpwr_state == ERFOFF)
+                       break;
+               for (queue_id = 0, i = 0;
+                    queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+                       ring = &pcipriv->dev.tx_ring[queue_id];
+                       if (skb_queue_len(&ring->queue) == 0) {
+                               queue_id++;
+                               continue;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("eRf Off/Sleep: %d times "
+                                         "TcbBusyQueue[%d] =%d before "
+                                         "doze!\n", (i + 1), queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               udelay(10);
+                               i++;
+                       }
+                       if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        ("\n ERFSLEEP: %d times "
+                                         "TcbBusyQueue[%d] = %d !\n",
+                                         MAX_DOZE_WAITING_TIMES_9x,
+                                         queue_id,
+                                         skb_queue_len(&ring->queue)));
+                               break;
+                       }
+               }
+               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                        ("Set ERFSLEEP awaked:%d ms\n",
+                         jiffies_to_msecs(jiffies -
+                                          ppsc->last_awake_jiffies)));
+               ppsc->last_sleep_jiffies = jiffies;
+               _rtl92c_phy_set_rf_sleep(hw);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("switch case not process\n"));
+               bresult = false;
+               break;
+       }
+       if (bresult)
+               ppsc->rfpwr_state = rfpwr_state;
+       ppsc->set_rfpowerstate_inprogress = false;
+       return bresult;
+}
+
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state)
+{
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool bresult = false;
+
+       if (rfpwr_state == ppsc->rfpwr_state)
+               return bresult;
+       bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state);
+       return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
new file mode 100644 (file)
index 0000000..0629955
--- /dev/null
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/phy.h"
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw);
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
new file mode 100644 (file)
index 0000000..7f1be61
--- /dev/null
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
new file mode 100644 (file)
index 0000000..1c79c22
--- /dev/null
@@ -0,0 +1,493 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       switch (bandwidth) {
+       case HT_CHANNEL_WIDTH_20:
+               rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+                                            0xfffff3ff) | 0x0400);
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+                             rtlphy->rfreg_chnlval[0]);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+                                            0xfffff3ff));
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+                             rtlphy->rfreg_chnlval[0]);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("unknown bandwidth: %#X\n", bandwidth));
+               break;
+       }
+}
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
+       bool turbo_scanoff = false;
+       u8 idx1, idx2;
+       u8 *ptr;
+
+       if (rtlhal->interface == INTF_PCI) {
+               if (rtlefuse->eeprom_regulatory != 0)
+                       turbo_scanoff = true;
+       } else {
+               if ((rtlefuse->eeprom_regulatory != 0) ||
+                   (rtlefuse->external_pa))
+                       turbo_scanoff = true;
+       }
+       if (mac->act_scanning == true) {
+               tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+               tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+               if (turbo_scanoff) {
+                       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+                               tx_agc[idx1] = ppowerlevel[idx1] |
+                                   (ppowerlevel[idx1] << 8) |
+                                   (ppowerlevel[idx1] << 16) |
+                                   (ppowerlevel[idx1] << 24);
+                               if (rtlhal->interface == INTF_USB) {
+                                       if (tx_agc[idx1] > 0x20 &&
+                                           rtlefuse->external_pa)
+                                               tx_agc[idx1] = 0x20;
+                               }
+                       }
+               }
+       } else {
+               if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                   TXHIGHPWRLEVEL_LEVEL1) {
+                       tx_agc[RF90_PATH_A] = 0x10101010;
+                       tx_agc[RF90_PATH_B] = 0x10101010;
+               } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                          TXHIGHPWRLEVEL_LEVEL1) {
+                       tx_agc[RF90_PATH_A] = 0x00000000;
+                       tx_agc[RF90_PATH_B] = 0x00000000;
+               } else{
+                       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+                               tx_agc[idx1] = ppowerlevel[idx1] |
+                                   (ppowerlevel[idx1] << 8) |
+                                   (ppowerlevel[idx1] << 16) |
+                                   (ppowerlevel[idx1] << 24);
+                       }
+                       if (rtlefuse->eeprom_regulatory == 0) {
+                               tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][6]) +
+                                       (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][7] <<  8);
+                               tx_agc[RF90_PATH_A] += tmpval;
+                               tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][14]) +
+                                       (rtlphy->mcs_txpwrlevel_origoffset
+                                       [0][15] << 24);
+                               tx_agc[RF90_PATH_B] += tmpval;
+                       }
+               }
+       }
+       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+               ptr = (u8 *) (&(tx_agc[idx1]));
+               for (idx2 = 0; idx2 < 4; idx2++) {
+                       if (*ptr > RF6052_MAX_TX_PWR)
+                               *ptr = RF6052_MAX_TX_PWR;
+                       ptr++;
+               }
+       }
+       tmpval = tx_agc[RF90_PATH_A] & 0xff;
+       rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_A_CCK1_MCS32));
+
+       tmpval = tx_agc[RF90_PATH_A] >> 8;
+       if (mac->mode == WIRELESS_MODE_B)
+               tmpval = tmpval & 0xff00ffff;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK11_A_CCK2_11));
+       tmpval = tx_agc[RF90_PATH_B] >> 24;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK11_A_CCK2_11));
+       tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+               ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK1_55_MCS32));
+}
+
+static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
+                                     u8 *ppowerlevel, u8 channel,
+                                     u32 *ofdmbase, u32 *mcsbase)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u32 powerBase0, powerBase1;
+       u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
+       u8 i, powerlevel[2];
+
+       for (i = 0; i < 2; i++) {
+               powerlevel[i] = ppowerlevel[i];
+               legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+               powerBase0 = powerlevel[i] + legacy_pwrdiff;
+               powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+                   (powerBase0 << 8) | powerBase0;
+               *(ofdmbase + i) = powerBase0;
+               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                       (" [OFDM power base index rf(%c) = 0x%x]\n",
+                        ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+       }
+       for (i = 0; i < 2; i++) {
+               if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+                       ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+                       powerlevel[i] += ht20_pwrdiff;
+               }
+               powerBase1 = powerlevel[i];
+               powerBase1 = (powerBase1 << 24) |
+                   (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+               *(mcsbase + i) = powerBase1;
+               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                       (" [MCS power base index rf(%c) = 0x%x]\n",
+                        ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+       }
+}
+
+static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+                                                      u8 channel, u8 index,
+                                                      u32 *powerBase0,
+                                                      u32 *powerBase1,
+                                                      u32 *p_outwriteval)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 i, chnlgroup = 0, pwr_diff_limit[4];
+       u32 writeVal, customer_limit, rf;
+
+       for (rf = 0; rf < 2; rf++) {
+               switch (rtlefuse->eeprom_regulatory) {
+               case 0:
+                       chnlgroup = 0;
+                       writeVal = rtlphy->mcs_txpwrlevel_origoffset
+                           [chnlgroup][index + (rf ? 8 : 0)]
+                           + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("RTK better performance,writeVal(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               case 1:
+                       if (rtlphy->pwrgroup_cnt == 1)
+                               chnlgroup = 0;
+                       if (rtlphy->pwrgroup_cnt >= 3) {
+                               if (channel <= 3)
+                                       chnlgroup = 0;
+                               else if (channel >= 4 && channel <= 9)
+                                       chnlgroup = 1;
+                               else if (channel > 9)
+                                       chnlgroup = 2;
+                               if (rtlphy->current_chan_bw ==
+                                   HT_CHANNEL_WIDTH_20)
+                                       chnlgroup++;
+                               else
+                                       chnlgroup += 4;
+                       }
+                       writeVal = rtlphy->mcs_txpwrlevel_origoffset
+                                       [chnlgroup][index +
+                                       (rf ? 8 : 0)] +
+                                       ((index < 2) ? powerBase0[rf] :
+                                       powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Realtek regulatory, 20MHz, "
+                               "writeVal(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               case 2:
+                       writeVal = ((index < 2) ? powerBase0[rf] :
+                                  powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Better regulatory,writeVal(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               case 3:
+                       chnlgroup = 0;
+                       if (rtlphy->current_chan_bw ==
+                           HT_CHANNEL_WIDTH_20_40) {
+                               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                                       ("customer's limit, 40MHzrf(%c) = "
+                                       "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+                                       rtlefuse->pwrgroup_ht40[rf]
+                                       [channel - 1]));
+                       } else {
+                               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                                       ("customer's limit, 20MHz rf(%c) = "
+                                       "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+                                       rtlefuse->pwrgroup_ht20[rf]
+                                       [channel - 1]));
+                       }
+                       for (i = 0; i < 4; i++) {
+                               pwr_diff_limit[i] =
+                                   (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+                                   [chnlgroup][index + (rf ? 8 : 0)]
+                                   & (0x7f << (i * 8))) >> (i * 8));
+                               if (rtlphy->current_chan_bw ==
+                                   HT_CHANNEL_WIDTH_20_40) {
+                                       if (pwr_diff_limit[i] >
+                                           rtlefuse->pwrgroup_ht40[rf]
+                                               [channel - 1])
+                                               pwr_diff_limit[i] = rtlefuse->
+                                                   pwrgroup_ht40[rf]
+                                                   [channel - 1];
+                               } else {
+                                       if (pwr_diff_limit[i] >
+                                           rtlefuse->pwrgroup_ht20[rf]
+                                               [channel - 1])
+                                               pwr_diff_limit[i] =
+                                                   rtlefuse->pwrgroup_ht20[rf]
+                                                   [channel - 1];
+                               }
+                       }
+                       customer_limit = (pwr_diff_limit[3] << 24) |
+                           (pwr_diff_limit[2] << 16) |
+                           (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Customer's limit rf(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), customer_limit));
+                       writeVal = customer_limit + ((index < 2) ?
+                                  powerBase0[rf] : powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                               ("Customer, writeVal rf(%c)= 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               default:
+                       chnlgroup = 0;
+                       writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+                                  [index + (rf ? 8 : 0)] + ((index < 2) ?
+                                  powerBase0[rf] : powerBase1[rf]);
+                       RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
+                               "performance, writeValrf(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), writeVal));
+                       break;
+               }
+               if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                   TXHIGHPWRLEVEL_LEVEL1)
+                       writeVal = 0x14141414;
+               else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                        TXHIGHPWRLEVEL_LEVEL2)
+                       writeVal = 0x00000000;
+               if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+                       writeVal = writeVal - 0x06060606;
+               else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                        TXHIGHPWRLEVEL_BT2)
+                       writeVal = writeVal;
+               *(p_outwriteval + rf) = writeVal;
+       }
+}
+
+static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+                                        u8 index, u32 *pValue)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u16 regoffset_a[6] = {
+               RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+               RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+               RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+       };
+       u16 regoffset_b[6] = {
+               RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+               RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+               RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+       };
+       u8 i, rf, pwr_val[4];
+       u32 writeVal;
+       u16 regoffset;
+
+       for (rf = 0; rf < 2; rf++) {
+               writeVal = pValue[rf];
+               for (i = 0; i < 4; i++) {
+                       pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
+                                         (i * 8));
+                       if (pwr_val[i] > RF6052_MAX_TX_PWR)
+                               pwr_val[i] = RF6052_MAX_TX_PWR;
+               }
+               writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+                   (pwr_val[1] << 8) | pwr_val[0];
+               if (rf == 0)
+                       regoffset = regoffset_a[index];
+               else
+                       regoffset = regoffset_b[index];
+               rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+               RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+                       ("Set 0x%x = %08x\n", regoffset, writeVal));
+               if (((get_rf_type(rtlphy) == RF_2T2R) &&
+                    (regoffset == RTXAGC_A_MCS15_MCS12 ||
+                     regoffset == RTXAGC_B_MCS15_MCS12)) ||
+                   ((get_rf_type(rtlphy) != RF_2T2R) &&
+                    (regoffset == RTXAGC_A_MCS07_MCS04 ||
+                     regoffset == RTXAGC_B_MCS07_MCS04))) {
+                       writeVal = pwr_val[3];
+                       if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+                           regoffset == RTXAGC_A_MCS07_MCS04)
+                               regoffset = 0xc90;
+                       if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+                           regoffset == RTXAGC_B_MCS07_MCS04)
+                               regoffset = 0xc98;
+                       for (i = 0; i < 3; i++) {
+                               writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+                               rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+                                             (u8)writeVal);
+                       }
+               }
+       }
+}
+
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel)
+{
+       u32 writeVal[2], powerBase0[2], powerBase1[2];
+       u8 index = 0;
+
+       rtl92c_phy_get_power_base(hw, ppowerlevel,
+                                 channel, &powerBase0[0], &powerBase1[0]);
+       for (index = 0; index < 6; index++) {
+               _rtl92c_get_txpower_writeval_by_regulatory(hw,
+                                                          channel, index,
+                                                          &powerBase0[0],
+                                                          &powerBase1[0],
+                                                          &writeVal[0]);
+               _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+       }
+}
+
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       bool rtstatus = true;
+       u8 b_reg_hwparafile = 1;
+
+       if (rtlphy->rf_type == RF_1T1R)
+               rtlphy->num_total_rfpath = 1;
+       else
+               rtlphy->num_total_rfpath = 2;
+       if (b_reg_hwparafile == 1)
+               rtstatus = _rtl92c_phy_rf6052_config_parafile(hw);
+       return rtstatus;
+}
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 u4_regvalue = 0;
+       u8 rfpath;
+       bool rtstatus = true;
+       struct bb_reg_def *pphyreg;
+
+       for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+               pphyreg = &rtlphy->phyreg_def[rfpath];
+               switch (rfpath) {
+               case RF90_PATH_A:
+               case RF90_PATH_C:
+                       u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+                                                   BRFSI_RFENV);
+                       break;
+               case RF90_PATH_B:
+               case RF90_PATH_D:
+                       u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+                                                   BRFSI_RFENV << 16);
+                       break;
+               }
+               rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+               udelay(1);
+               rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+               udelay(1);
+               rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+                             B3WIREADDREAALENGTH, 0x0);
+               udelay(1);
+               rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+               udelay(1);
+               switch (rfpath) {
+               case RF90_PATH_A:
+                       rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+                                       (enum radio_path) rfpath);
+                       break;
+               case RF90_PATH_B:
+                       rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+                                       (enum radio_path) rfpath);
+                       break;
+               case RF90_PATH_C:
+                       break;
+               case RF90_PATH_D:
+                       break;
+               }
+               switch (rfpath) {
+               case RF90_PATH_A:
+               case RF90_PATH_C:
+                       rtl_set_bbreg(hw, pphyreg->rfintfs,
+                                     BRFSI_RFENV, u4_regvalue);
+                       break;
+               case RF90_PATH_B:
+               case RF90_PATH_D:
+                       rtl_set_bbreg(hw, pphyreg->rfintfs,
+                                     BRFSI_RFENV << 16, u4_regvalue);
+                       break;
+               }
+               if (rtstatus != true) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                ("Radio[%d] Fail!!", rfpath));
+                       goto phy_rf_cfg_fail;
+               }
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+       return rtstatus;
+phy_rf_cfg_fail:
+       return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
new file mode 100644 (file)
index 0000000..86c2728
--- /dev/null
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_RF_H__
+#define __RTL92CU_RF_H__
+
+#define RF6052_MAX_TX_PWR              0x3F
+#define RF6052_MAX_REG                 0x3F
+#define RF6052_MAX_PATH                        2
+
+extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+                                           u8 bandwidth);
+extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                             u8 *ppowerlevel);
+extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                              u8 *ppowerlevel, u8 channel);
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
new file mode 100644 (file)
index 0000000..71244a3
--- /dev/null
@@ -0,0 +1,336 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../usb.h"
+#include "../efuse.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "rf.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "hw.h"
+#include <linux/vmalloc.h>
+
+MODULE_AUTHOR("Georgia         <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang       <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger    <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+
+static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_flag = 0;
+       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.thermalvalue = 0;
+       rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
+       if (!rtlpriv->rtlhal.pfirmware) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't alloc buffer for fw.\n"));
+               return 1;
+       }
+       return 0;
+}
+
+static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->rtlhal.pfirmware) {
+               vfree(rtlpriv->rtlhal.pfirmware);
+               rtlpriv->rtlhal.pfirmware = NULL;
+       }
+}
+
+static struct rtl_hal_ops rtl8192cu_hal_ops = {
+       .init_sw_vars = rtl92cu_init_sw_vars,
+       .deinit_sw_vars = rtl92cu_deinit_sw_vars,
+       .read_chip_version = rtl92c_read_chip_version,
+       .read_eeprom_info = rtl92cu_read_eeprom_info,
+       .enable_interrupt = rtl92c_enable_interrupt,
+       .disable_interrupt = rtl92c_disable_interrupt,
+       .hw_init = rtl92cu_hw_init,
+       .hw_disable = rtl92cu_card_disable,
+       .set_network_type = rtl92cu_set_network_type,
+       .set_chk_bssid = rtl92cu_set_check_bssid,
+       .set_qos = rtl92c_set_qos,
+       .set_bcn_reg = rtl92cu_set_beacon_related_registers,
+       .set_bcn_intv = rtl92cu_set_beacon_interval,
+       .update_interrupt_mask = rtl92cu_update_interrupt_mask,
+       .get_hw_reg = rtl92cu_get_hw_reg,
+       .set_hw_reg = rtl92cu_set_hw_reg,
+       .update_rate_table = rtl92cu_update_hal_rate_table,
+       .update_rate_mask = rtl92cu_update_hal_rate_mask,
+       .fill_tx_desc = rtl92cu_tx_fill_desc,
+       .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
+       .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
+       .cmd_send_packet = rtl92cu_cmd_send_packet,
+       .query_rx_desc = rtl92cu_rx_query_desc,
+       .set_channel_access = rtl92cu_update_channel_access_setting,
+       .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
+       .set_bw_mode = rtl92c_phy_set_bw_mode,
+       .switch_channel = rtl92c_phy_sw_chnl,
+       .dm_watchdog = rtl92c_dm_watchdog,
+       .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+       .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
+       .led_control = rtl92cu_led_control,
+       .enable_hw_sec = rtl92cu_enable_hw_security_config,
+       .set_key = rtl92c_set_key,
+       .init_sw_leds = rtl92cu_init_sw_leds,
+       .deinit_sw_leds = rtl92cu_deinit_sw_leds,
+       .get_bbreg = rtl92c_phy_query_bb_reg,
+       .set_bbreg = rtl92c_phy_set_bb_reg,
+       .get_rfreg = rtl92cu_phy_query_rf_reg,
+       .set_rfreg = rtl92cu_phy_set_rf_reg,
+       .phy_rf6052_config = rtl92cu_phy_rf6052_config,
+       .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
+       .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
+       .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
+       .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
+       .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
+       .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
+       .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
+};
+
+static struct rtl_mod_params rtl92cu_mod_params = {
+       .sw_crypto = 0,
+};
+
+static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
+       /* rx */
+       .in_ep_num = RTL92C_USB_BULK_IN_NUM,
+       .rx_urb_num = RTL92C_NUM_RX_URBS,
+       .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
+       .usb_rx_hdl = rtl8192cu_rx_hdl,
+       .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
+       /* tx */
+       .usb_tx_cleanup = rtl8192c_tx_cleanup,
+       .usb_tx_post_hdl = rtl8192c_tx_post_hdl,
+       .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
+       /* endpoint mapping */
+       .usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
+       .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
+};
+
+static struct rtl_hal_cfg rtl92cu_hal_cfg = {
+       .name = "rtl92c_usb",
+       .fw_name = "rtlwifi/rtl8192cufw.bin",
+       .ops = &rtl8192cu_hal_ops,
+       .mod_params = &rtl92cu_mod_params,
+       .usb_interface_cfg = &rtl92cu_interface_cfg,
+
+       .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+       .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+       .maps[SYS_CLK] = REG_SYS_CLKR,
+       .maps[MAC_RCR_AM] = AM,
+       .maps[MAC_RCR_AB] = AB,
+       .maps[MAC_RCR_ACRC32] = ACRC32,
+       .maps[MAC_RCR_ACF] = ACF,
+       .maps[MAC_RCR_AAP] = AAP,
+
+       .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+       .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+       .maps[EFUSE_CLK] = 0,
+       .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+       .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+       .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+       .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+       .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
+       .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+       .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+       .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+       .maps[RWCAM] = REG_CAMCMD,
+       .maps[WCAMI] = REG_CAMWRITE,
+       .maps[RCAMO] = REG_CAMREAD,
+       .maps[CAMDBG] = REG_CAMDBG,
+       .maps[SECR] = REG_SECCFG,
+       .maps[SEC_CAM_NONE] = CAM_NONE,
+       .maps[SEC_CAM_WEP40] = CAM_WEP40,
+       .maps[SEC_CAM_TKIP] = CAM_TKIP,
+       .maps[SEC_CAM_AES] = CAM_AES,
+       .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+       .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+       .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+       .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+       .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+       .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+       .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+       .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+       .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+       .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+       .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+       .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+       .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+       .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+       .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+       .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+       .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+       .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+       .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+       .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
+       .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+       .maps[RTL_IMR_RDU] = IMR_RDU,
+       .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+       .maps[RTL_IMR_BDOK] = IMR_BDOK,
+       .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+       .maps[RTL_IMR_TBDER] = IMR_TBDER,
+       .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+       .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+       .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+       .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+       .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+       .maps[RTL_IMR_VODOK] = IMR_VODOK,
+       .maps[RTL_IMR_ROK] = IMR_ROK,
+       .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+       .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+       .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+#define USB_VENDER_ID_REALTEK          0x0bda
+
+/* 2010-10-19 DID_USB_V3.4 */
+static struct usb_device_id rtl8192c_usb_ids[] = {
+
+       /*=== Realtek demoboard ===*/
+       /* Default ID */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+
+       /****** 8188CU ********/
+       /* 8188CE-VAU USB minCard */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+       /* 8188cu 1*1 dongle */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+       /* 8188cu 1*1 dongle, (b/g mode only) */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+       /* 8188cu Slim Solo */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+       /* 8188cu Slim Combo */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+       /* 8188RU High-power USB Dongle */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+       /* 8188CE-VAU USB minCard (b/g mode only) */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+       /* 8188 Combo for BC4 */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+       /****** 8192CU ********/
+       /* 8191cu 1*2 */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+       /* 8192cu 2*2 */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+       /* 8192CE-VAU USB minCard */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+
+       /*=== Customer ID ===*/
+       /****** 8188CU ********/
+       {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+       {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+       {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+       {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+       {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       /* HP - Lite-On ,8188CUS Slim Combo */
+       {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+       {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+       {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+       {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
+       {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
+       {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
+       {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
+       /* Russian customer -Azwave (8188CE-VAU  b/g mode only) */
+       {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
+
+       /****** 8192CU ********/
+       {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
+       {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
+       {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+       {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
+       {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+       {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+       {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+       {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+       {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+       {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+
+static struct usb_driver rtl8192cu_driver = {
+       .name = "rtl8192cu",
+       .probe = rtl_usb_probe,
+       .disconnect = rtl_usb_disconnect,
+       .id_table = rtl8192c_usb_ids,
+
+#ifdef CONFIG_PM
+       /* .suspend = rtl_usb_suspend, */
+       /* .resume = rtl_usb_resume, */
+       /* .reset_resume = rtl8192c_resume, */
+#endif /* CONFIG_PM */
+#ifdef CONFIG_AUTOSUSPEND
+       .supports_autosuspend = 1,
+#endif
+};
+
+static int __init rtl8192cu_init(void)
+{
+       return usb_register(&rtl8192cu_driver);
+}
+
+static void __exit rtl8192cu_exit(void)
+{
+       usb_deregister(&rtl8192cu_driver);
+}
+
+module_init(rtl8192cu_init);
+module_exit(rtl8192cu_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
new file mode 100644 (file)
index 0000000..43b1177
--- /dev/null
@@ -0,0 +1,53 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_SW_H__
+#define __RTL92CU_SW_H__
+
+#define EFUSE_MAX_SECTION      16
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *powerlevel);
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                           u8 configtype);
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                   u8 configtype);
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath,
+                          u32 regaddr, u32 bitmask, u32 data);
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
new file mode 100644 (file)
index 0000000..d57ef5e
--- /dev/null
@@ -0,0 +1,1888 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+
+u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
+       0x024, 0x0011800f,
+       0x028, 0x00ffdb83,
+       0x800, 0x80040002,
+       0x804, 0x00000003,
+       0x808, 0x0000fc00,
+       0x80c, 0x0000000a,
+       0x810, 0x10005388,
+       0x814, 0x020c3d10,
+       0x818, 0x02200385,
+       0x81c, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390004,
+       0x828, 0x01000100,
+       0x82c, 0x00390004,
+       0x830, 0x27272727,
+       0x834, 0x27272727,
+       0x838, 0x27272727,
+       0x83c, 0x27272727,
+       0x840, 0x00010000,
+       0x844, 0x00010000,
+       0x848, 0x27272727,
+       0x84c, 0x27272727,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569a569a,
+       0x85c, 0x0c1b25a4,
+       0x860, 0x66e60230,
+       0x864, 0x061f0130,
+       0x868, 0x27272727,
+       0x86c, 0x2b2b2b27,
+       0x870, 0x07000700,
+       0x874, 0x22184000,
+       0x878, 0x08080808,
+       0x87c, 0x00000000,
+       0x880, 0xc0083070,
+       0x884, 0x000004d5,
+       0x888, 0x00000000,
+       0x88c, 0xcc0000c0,
+       0x890, 0x00000800,
+       0x894, 0xfffffffe,
+       0x898, 0x40302010,
+       0x89c, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90c, 0x81121313,
+       0xa00, 0x00d047c8,
+       0xa04, 0x80ff000c,
+       0xa08, 0x8c838300,
+       0xa0c, 0x2e68120f,
+       0xa10, 0x9500bb78,
+       0xa14, 0x11144028,
+       0xa18, 0x00881117,
+       0xa1c, 0x89140f00,
+       0xa20, 0x1a1b0000,
+       0xa24, 0x090e1317,
+       0xa28, 0x00000204,
+       0xa2c, 0x00d30000,
+       0xa70, 0x101fbf00,
+       0xa74, 0x00000007,
+       0xc00, 0x48071d40,
+       0xc04, 0x03a05633,
+       0xc08, 0x000000e4,
+       0xc0c, 0x6c6c6c6c,
+       0xc10, 0x08800000,
+       0xc14, 0x40000100,
+       0xc18, 0x08800000,
+       0xc1c, 0x40000100,
+       0xc20, 0x00000000,
+       0xc24, 0x00000000,
+       0xc28, 0x00000000,
+       0xc2c, 0x00000000,
+       0xc30, 0x69e9ac44,
+       0xc34, 0x469652cf,
+       0xc38, 0x49795994,
+       0xc3c, 0x0a97971c,
+       0xc40, 0x1f7c403f,
+       0xc44, 0x000100b7,
+       0xc48, 0xec020107,
+       0xc4c, 0x007f037f,
+       0xc50, 0x6954341e,
+       0xc54, 0x43bc0094,
+       0xc58, 0x6954341e,
+       0xc5c, 0x433c0094,
+       0xc60, 0x00000000,
+       0xc64, 0x5116848b,
+       0xc68, 0x47c00bff,
+       0xc6c, 0x00000036,
+       0xc70, 0x2c7f000d,
+       0xc74, 0x0186115b,
+       0xc78, 0x0000001f,
+       0xc7c, 0x00b99612,
+       0xc80, 0x40000100,
+       0xc84, 0x20f60000,
+       0xc88, 0x40000100,
+       0xc8c, 0x20200000,
+       0xc90, 0x00121820,
+       0xc94, 0x00000000,
+       0xc98, 0x00121820,
+       0xc9c, 0x00007f7f,
+       0xca0, 0x00000000,
+       0xca4, 0x00000080,
+       0xca8, 0x00000000,
+       0xcac, 0x00000000,
+       0xcb0, 0x00000000,
+       0xcb4, 0x00000000,
+       0xcb8, 0x00000000,
+       0xcbc, 0x28000000,
+       0xcc0, 0x00000000,
+       0xcc4, 0x00000000,
+       0xcc8, 0x00000000,
+       0xccc, 0x00000000,
+       0xcd0, 0x00000000,
+       0xcd4, 0x00000000,
+       0xcd8, 0x64b22427,
+       0xcdc, 0x00766932,
+       0xce0, 0x00222222,
+       0xce4, 0x00000000,
+       0xce8, 0x37644302,
+       0xcec, 0x2f97d40c,
+       0xd00, 0x00080740,
+       0xd04, 0x00020403,
+       0xd08, 0x0000907f,
+       0xd0c, 0x20010201,
+       0xd10, 0xa0633333,
+       0xd14, 0x3333bc43,
+       0xd18, 0x7a8f5b6b,
+       0xd2c, 0xcc979975,
+       0xd30, 0x00000000,
+       0xd34, 0x80608000,
+       0xd38, 0x00000000,
+       0xd3c, 0x00027293,
+       0xd40, 0x00000000,
+       0xd44, 0x00000000,
+       0xd48, 0x00000000,
+       0xd4c, 0x00000000,
+       0xd50, 0x6437140a,
+       0xd54, 0x00000000,
+       0xd58, 0x00000000,
+       0xd5c, 0x30032064,
+       0xd60, 0x4653de68,
+       0xd64, 0x04518a3c,
+       0xd68, 0x00002101,
+       0xd6c, 0x2a201c16,
+       0xd70, 0x1812362e,
+       0xd74, 0x322c2220,
+       0xd78, 0x000e3c24,
+       0xe00, 0x2a2a2a2a,
+       0xe04, 0x2a2a2a2a,
+       0xe08, 0x03902a2a,
+       0xe10, 0x2a2a2a2a,
+       0xe14, 0x2a2a2a2a,
+       0xe18, 0x2a2a2a2a,
+       0xe1c, 0x2a2a2a2a,
+       0xe28, 0x00000000,
+       0xe30, 0x1000dc1f,
+       0xe34, 0x10008c1f,
+       0xe38, 0x02140102,
+       0xe3c, 0x681604c2,
+       0xe40, 0x01007c00,
+       0xe44, 0x01004800,
+       0xe48, 0xfb000000,
+       0xe4c, 0x000028d1,
+       0xe50, 0x1000dc1f,
+       0xe54, 0x10008c1f,
+       0xe58, 0x02140102,
+       0xe5c, 0x28160d05,
+       0xe60, 0x00000010,
+       0xe68, 0x001b25a4,
+       0xe6c, 0x63db25a4,
+       0xe70, 0x63db25a4,
+       0xe74, 0x0c1b25a4,
+       0xe78, 0x0c1b25a4,
+       0xe7c, 0x0c1b25a4,
+       0xe80, 0x0c1b25a4,
+       0xe84, 0x63db25a4,
+       0xe88, 0x0c1b25a4,
+       0xe8c, 0x63db25a4,
+       0xed0, 0x63db25a4,
+       0xed4, 0x63db25a4,
+       0xed8, 0x63db25a4,
+       0xedc, 0x001b25a4,
+       0xee0, 0x001b25a4,
+       0xeec, 0x6fdb25a4,
+       0xf14, 0x00000003,
+       0xf4c, 0x00000000,
+       0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
+       0x024, 0x0011800f,
+       0x028, 0x00ffdb83,
+       0x800, 0x80040000,
+       0x804, 0x00000001,
+       0x808, 0x0000fc00,
+       0x80c, 0x0000000a,
+       0x810, 0x10005388,
+       0x814, 0x020c3d10,
+       0x818, 0x02200385,
+       0x81c, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390004,
+       0x828, 0x00000000,
+       0x82c, 0x00000000,
+       0x830, 0x00000000,
+       0x834, 0x00000000,
+       0x838, 0x00000000,
+       0x83c, 0x00000000,
+       0x840, 0x00010000,
+       0x844, 0x00000000,
+       0x848, 0x00000000,
+       0x84c, 0x00000000,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569a569a,
+       0x85c, 0x001b25a4,
+       0x860, 0x66e60230,
+       0x864, 0x061f0130,
+       0x868, 0x00000000,
+       0x86c, 0x32323200,
+       0x870, 0x07000700,
+       0x874, 0x22004000,
+       0x878, 0x00000808,
+       0x87c, 0x00000000,
+       0x880, 0xc0083070,
+       0x884, 0x000004d5,
+       0x888, 0x00000000,
+       0x88c, 0xccc000c0,
+       0x890, 0x00000800,
+       0x894, 0xfffffffe,
+       0x898, 0x40302010,
+       0x89c, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90c, 0x81121111,
+       0xa00, 0x00d047c8,
+       0xa04, 0x80ff000c,
+       0xa08, 0x8c838300,
+       0xa0c, 0x2e68120f,
+       0xa10, 0x9500bb78,
+       0xa14, 0x11144028,
+       0xa18, 0x00881117,
+       0xa1c, 0x89140f00,
+       0xa20, 0x1a1b0000,
+       0xa24, 0x090e1317,
+       0xa28, 0x00000204,
+       0xa2c, 0x00d30000,
+       0xa70, 0x101fbf00,
+       0xa74, 0x00000007,
+       0xc00, 0x48071d40,
+       0xc04, 0x03a05611,
+       0xc08, 0x000000e4,
+       0xc0c, 0x6c6c6c6c,
+       0xc10, 0x08800000,
+       0xc14, 0x40000100,
+       0xc18, 0x08800000,
+       0xc1c, 0x40000100,
+       0xc20, 0x00000000,
+       0xc24, 0x00000000,
+       0xc28, 0x00000000,
+       0xc2c, 0x00000000,
+       0xc30, 0x69e9ac44,
+       0xc34, 0x469652cf,
+       0xc38, 0x49795994,
+       0xc3c, 0x0a97971c,
+       0xc40, 0x1f7c403f,
+       0xc44, 0x000100b7,
+       0xc48, 0xec020107,
+       0xc4c, 0x007f037f,
+       0xc50, 0x6954341e,
+       0xc54, 0x43bc0094,
+       0xc58, 0x6954341e,
+       0xc5c, 0x433c0094,
+       0xc60, 0x00000000,
+       0xc64, 0x5116848b,
+       0xc68, 0x47c00bff,
+       0xc6c, 0x00000036,
+       0xc70, 0x2c7f000d,
+       0xc74, 0x018610db,
+       0xc78, 0x0000001f,
+       0xc7c, 0x00b91612,
+       0xc80, 0x40000100,
+       0xc84, 0x20f60000,
+       0xc88, 0x40000100,
+       0xc8c, 0x20200000,
+       0xc90, 0x00121820,
+       0xc94, 0x00000000,
+       0xc98, 0x00121820,
+       0xc9c, 0x00007f7f,
+       0xca0, 0x00000000,
+       0xca4, 0x00000080,
+       0xca8, 0x00000000,
+       0xcac, 0x00000000,
+       0xcb0, 0x00000000,
+       0xcb4, 0x00000000,
+       0xcb8, 0x00000000,
+       0xcbc, 0x28000000,
+       0xcc0, 0x00000000,
+       0xcc4, 0x00000000,
+       0xcc8, 0x00000000,
+       0xccc, 0x00000000,
+       0xcd0, 0x00000000,
+       0xcd4, 0x00000000,
+       0xcd8, 0x64b22427,
+       0xcdc, 0x00766932,
+       0xce0, 0x00222222,
+       0xce4, 0x00000000,
+       0xce8, 0x37644302,
+       0xcec, 0x2f97d40c,
+       0xd00, 0x00080740,
+       0xd04, 0x00020401,
+       0xd08, 0x0000907f,
+       0xd0c, 0x20010201,
+       0xd10, 0xa0633333,
+       0xd14, 0x3333bc43,
+       0xd18, 0x7a8f5b6b,
+       0xd2c, 0xcc979975,
+       0xd30, 0x00000000,
+       0xd34, 0x80608000,
+       0xd38, 0x00000000,
+       0xd3c, 0x00027293,
+       0xd40, 0x00000000,
+       0xd44, 0x00000000,
+       0xd48, 0x00000000,
+       0xd4c, 0x00000000,
+       0xd50, 0x6437140a,
+       0xd54, 0x00000000,
+       0xd58, 0x00000000,
+       0xd5c, 0x30032064,
+       0xd60, 0x4653de68,
+       0xd64, 0x04518a3c,
+       0xd68, 0x00002101,
+       0xd6c, 0x2a201c16,
+       0xd70, 0x1812362e,
+       0xd74, 0x322c2220,
+       0xd78, 0x000e3c24,
+       0xe00, 0x2a2a2a2a,
+       0xe04, 0x2a2a2a2a,
+       0xe08, 0x03902a2a,
+       0xe10, 0x2a2a2a2a,
+       0xe14, 0x2a2a2a2a,
+       0xe18, 0x2a2a2a2a,
+       0xe1c, 0x2a2a2a2a,
+       0xe28, 0x00000000,
+       0xe30, 0x1000dc1f,
+       0xe34, 0x10008c1f,
+       0xe38, 0x02140102,
+       0xe3c, 0x681604c2,
+       0xe40, 0x01007c00,
+       0xe44, 0x01004800,
+       0xe48, 0xfb000000,
+       0xe4c, 0x000028d1,
+       0xe50, 0x1000dc1f,
+       0xe54, 0x10008c1f,
+       0xe58, 0x02140102,
+       0xe5c, 0x28160d05,
+       0xe60, 0x00000008,
+       0xe68, 0x001b25a4,
+       0xe6c, 0x631b25a0,
+       0xe70, 0x631b25a0,
+       0xe74, 0x081b25a0,
+       0xe78, 0x081b25a0,
+       0xe7c, 0x081b25a0,
+       0xe80, 0x081b25a0,
+       0xe84, 0x631b25a0,
+       0xe88, 0x081b25a0,
+       0xe8c, 0x631b25a0,
+       0xed0, 0x631b25a0,
+       0xed4, 0x631b25a0,
+       0xed8, 0x631b25a0,
+       0xedc, 0x001b25a0,
+       0xee0, 0x001b25a0,
+       0xeec, 0x6b1b25a0,
+       0xf14, 0x00000003,
+       0xf4c, 0x00000000,
+       0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = {
+       0xe00, 0xffffffff, 0x07090c0c,
+       0xe04, 0xffffffff, 0x01020405,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x0b0c0c0e,
+       0xe14, 0xffffffff, 0x01030506,
+       0xe18, 0xffffffff, 0x0b0c0d0e,
+       0xe1c, 0xffffffff, 0x01030509,
+       0x830, 0xffffffff, 0x07090c0c,
+       0x834, 0xffffffff, 0x01020405,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x0b0c0d0e,
+       0x848, 0xffffffff, 0x01030509,
+       0x84c, 0xffffffff, 0x0b0c0d0e,
+       0x868, 0xffffffff, 0x01030509,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x04040404,
+       0xe04, 0xffffffff, 0x00020204,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x06060606,
+       0xe14, 0xffffffff, 0x00020406,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x04040404,
+       0x834, 0xffffffff, 0x00020204,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x06060606,
+       0x848, 0xffffffff, 0x00020406,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x04040404,
+       0xe04, 0xffffffff, 0x00020204,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x04040404,
+       0x834, 0xffffffff, 0x00020204,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb1,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e52c,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x019, 0x00000000,
+       0x01a, 0x00010255,
+       0x01b, 0x00060a00,
+       0x01c, 0x000fc378,
+       0x01d, 0x000a1250,
+       0x01e, 0x0004445f,
+       0x01f, 0x00080001,
+       0x020, 0x0000b614,
+       0x021, 0x0006c000,
+       0x022, 0x00000000,
+       0x023, 0x00001558,
+       0x024, 0x00000060,
+       0x025, 0x00000483,
+       0x026, 0x0004f000,
+       0x027, 0x000ec7d9,
+       0x028, 0x000577c0,
+       0x029, 0x00004783,
+       0x02a, 0x00000001,
+       0x02b, 0x00021334,
+       0x02a, 0x00000000,
+       0x02b, 0x00000054,
+       0x02a, 0x00000001,
+       0x02b, 0x00000808,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000c,
+       0x02a, 0x00000002,
+       0x02b, 0x00000808,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000003,
+       0x02b, 0x00000808,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000004,
+       0x02b, 0x00000808,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000005,
+       0x02b, 0x00000808,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000006,
+       0x02b, 0x00000709,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000007,
+       0x02b, 0x00000709,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000008,
+       0x02b, 0x0000060a,
+       0x02b, 0x0004b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000009,
+       0x02b, 0x0000060a,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000a,
+       0x02b, 0x0000060a,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000b,
+       0x02b, 0x0000060a,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000c,
+       0x02b, 0x0000060a,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000d,
+       0x02b, 0x0000060a,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000e,
+       0x02b, 0x0000050b,
+       0x02b, 0x00066666,
+       0x02c, 0x0000001a,
+       0x02a, 0x000e0000,
+       0x010, 0x0004000f,
+       0x011, 0x000e31fc,
+       0x010, 0x0006000f,
+       0x011, 0x000ff9f8,
+       0x010, 0x0002000f,
+       0x011, 0x000203f9,
+       0x010, 0x0003000f,
+       0x011, 0x000ff500,
+       0x010, 0x00000000,
+       0x011, 0x00000000,
+       0x010, 0x0008000f,
+       0x011, 0x0003f100,
+       0x010, 0x0009000f,
+       0x011, 0x00023100,
+       0x012, 0x00032000,
+       0x012, 0x00071000,
+       0x012, 0x000b0000,
+       0x012, 0x000fc000,
+       0x013, 0x000287af,
+       0x013, 0x000244b7,
+       0x013, 0x000204ab,
+       0x013, 0x0001c49f,
+       0x013, 0x00018493,
+       0x013, 0x00014297,
+       0x013, 0x00010295,
+       0x013, 0x0000c298,
+       0x013, 0x0000819c,
+       0x013, 0x000040a8,
+       0x013, 0x0000001c,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f424,
+       0x015, 0x0004f424,
+       0x015, 0x0008f424,
+       0x015, 0x000cf424,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+       0x000, 0x00010159,
+       0x018, 0x0000f401,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01f, 0x00080003,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01e, 0x00044457,
+       0x01f, 0x00080000,
+       0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb1,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e52c,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x012, 0x00032000,
+       0x012, 0x00071000,
+       0x012, 0x000b0000,
+       0x012, 0x000fc000,
+       0x013, 0x000287af,
+       0x013, 0x000244b7,
+       0x013, 0x000204ab,
+       0x013, 0x0001c49f,
+       0x013, 0x00018493,
+       0x013, 0x00014297,
+       0x013, 0x00010295,
+       0x013, 0x0000c298,
+       0x013, 0x0000819c,
+       0x013, 0x000040a8,
+       0x013, 0x0000001c,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f424,
+       0x015, 0x0004f424,
+       0x015, 0x0008f424,
+       0x015, 0x000cf424,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+};
+
+u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb1,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e52c,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x019, 0x00000000,
+       0x01a, 0x00010255,
+       0x01b, 0x00060a00,
+       0x01c, 0x000fc378,
+       0x01d, 0x000a1250,
+       0x01e, 0x0004445f,
+       0x01f, 0x00080001,
+       0x020, 0x0000b614,
+       0x021, 0x0006c000,
+       0x022, 0x00000000,
+       0x023, 0x00001558,
+       0x024, 0x00000060,
+       0x025, 0x00000483,
+       0x026, 0x0004f000,
+       0x027, 0x000ec7d9,
+       0x028, 0x000577c0,
+       0x029, 0x00004783,
+       0x02a, 0x00000001,
+       0x02b, 0x00021334,
+       0x02a, 0x00000000,
+       0x02b, 0x00000054,
+       0x02a, 0x00000001,
+       0x02b, 0x00000808,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000c,
+       0x02a, 0x00000002,
+       0x02b, 0x00000808,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000003,
+       0x02b, 0x00000808,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000004,
+       0x02b, 0x00000808,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000005,
+       0x02b, 0x00000808,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000006,
+       0x02b, 0x00000709,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000007,
+       0x02b, 0x00000709,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000008,
+       0x02b, 0x0000060a,
+       0x02b, 0x0004b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000009,
+       0x02b, 0x0000060a,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000a,
+       0x02b, 0x0000060a,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000b,
+       0x02b, 0x0000060a,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000c,
+       0x02b, 0x0000060a,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000d,
+       0x02b, 0x0000060a,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000e,
+       0x02b, 0x0000050b,
+       0x02b, 0x00066666,
+       0x02c, 0x0000001a,
+       0x02a, 0x000e0000,
+       0x010, 0x0004000f,
+       0x011, 0x000e31fc,
+       0x010, 0x0006000f,
+       0x011, 0x000ff9f8,
+       0x010, 0x0002000f,
+       0x011, 0x000203f9,
+       0x010, 0x0003000f,
+       0x011, 0x000ff500,
+       0x010, 0x00000000,
+       0x011, 0x00000000,
+       0x010, 0x0008000f,
+       0x011, 0x0003f100,
+       0x010, 0x0009000f,
+       0x011, 0x00023100,
+       0x012, 0x00032000,
+       0x012, 0x00071000,
+       0x012, 0x000b0000,
+       0x012, 0x000fc000,
+       0x013, 0x000287b3,
+       0x013, 0x000244b7,
+       0x013, 0x000204ab,
+       0x013, 0x0001c49f,
+       0x013, 0x00018493,
+       0x013, 0x0001429b,
+       0x013, 0x00010299,
+       0x013, 0x0000c29c,
+       0x013, 0x000081a0,
+       0x013, 0x000040ac,
+       0x013, 0x00000020,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f405,
+       0x015, 0x0004f405,
+       0x015, 0x0008f405,
+       0x015, 0x000cf405,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+       0x000, 0x00010159,
+       0x018, 0x0000f401,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01f, 0x00080003,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01e, 0x00044457,
+       0x01f, 0x00080000,
+       0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = {
+       0x0,
+};
+
+u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
+       0x420, 0x00000080,
+       0x423, 0x00000000,
+       0x430, 0x00000000,
+       0x431, 0x00000000,
+       0x432, 0x00000000,
+       0x433, 0x00000001,
+       0x434, 0x00000004,
+       0x435, 0x00000005,
+       0x436, 0x00000006,
+       0x437, 0x00000007,
+       0x438, 0x00000000,
+       0x439, 0x00000000,
+       0x43a, 0x00000000,
+       0x43b, 0x00000001,
+       0x43c, 0x00000004,
+       0x43d, 0x00000005,
+       0x43e, 0x00000006,
+       0x43f, 0x00000007,
+       0x440, 0x0000005d,
+       0x441, 0x00000001,
+       0x442, 0x00000000,
+       0x444, 0x00000015,
+       0x445, 0x000000f0,
+       0x446, 0x0000000f,
+       0x447, 0x00000000,
+       0x458, 0x00000041,
+       0x459, 0x000000a8,
+       0x45a, 0x00000072,
+       0x45b, 0x000000b9,
+       0x460, 0x00000066,
+       0x461, 0x00000066,
+       0x462, 0x00000008,
+       0x463, 0x00000003,
+       0x4c8, 0x000000ff,
+       0x4c9, 0x00000008,
+       0x4cc, 0x000000ff,
+       0x4cd, 0x000000ff,
+       0x4ce, 0x00000001,
+       0x500, 0x00000026,
+       0x501, 0x000000a2,
+       0x502, 0x0000002f,
+       0x503, 0x00000000,
+       0x504, 0x00000028,
+       0x505, 0x000000a3,
+       0x506, 0x0000005e,
+       0x507, 0x00000000,
+       0x508, 0x0000002b,
+       0x509, 0x000000a4,
+       0x50a, 0x0000005e,
+       0x50b, 0x00000000,
+       0x50c, 0x0000004f,
+       0x50d, 0x000000a4,
+       0x50e, 0x00000000,
+       0x50f, 0x00000000,
+       0x512, 0x0000001c,
+       0x514, 0x0000000a,
+       0x515, 0x00000010,
+       0x516, 0x0000000a,
+       0x517, 0x00000010,
+       0x51a, 0x00000016,
+       0x524, 0x0000000f,
+       0x525, 0x0000004f,
+       0x546, 0x00000040,
+       0x547, 0x00000000,
+       0x550, 0x00000010,
+       0x551, 0x00000010,
+       0x559, 0x00000002,
+       0x55a, 0x00000002,
+       0x55d, 0x000000ff,
+       0x605, 0x00000030,
+       0x608, 0x0000000e,
+       0x609, 0x0000002a,
+       0x652, 0x00000020,
+       0x63c, 0x0000000a,
+       0x63d, 0x0000000e,
+       0x63e, 0x0000000a,
+       0x63f, 0x0000000e,
+       0x66e, 0x00000005,
+       0x700, 0x00000021,
+       0x701, 0x00000043,
+       0x702, 0x00000065,
+       0x703, 0x00000087,
+       0x708, 0x00000021,
+       0x709, 0x00000043,
+       0x70a, 0x00000065,
+       0x70b, 0x00000087,
+};
+
+u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = {
+       0xc78, 0x7b000001,
+       0xc78, 0x7b010001,
+       0xc78, 0x7b020001,
+       0xc78, 0x7b030001,
+       0xc78, 0x7b040001,
+       0xc78, 0x7b050001,
+       0xc78, 0x7a060001,
+       0xc78, 0x79070001,
+       0xc78, 0x78080001,
+       0xc78, 0x77090001,
+       0xc78, 0x760a0001,
+       0xc78, 0x750b0001,
+       0xc78, 0x740c0001,
+       0xc78, 0x730d0001,
+       0xc78, 0x720e0001,
+       0xc78, 0x710f0001,
+       0xc78, 0x70100001,
+       0xc78, 0x6f110001,
+       0xc78, 0x6e120001,
+       0xc78, 0x6d130001,
+       0xc78, 0x6c140001,
+       0xc78, 0x6b150001,
+       0xc78, 0x6a160001,
+       0xc78, 0x69170001,
+       0xc78, 0x68180001,
+       0xc78, 0x67190001,
+       0xc78, 0x661a0001,
+       0xc78, 0x651b0001,
+       0xc78, 0x641c0001,
+       0xc78, 0x631d0001,
+       0xc78, 0x621e0001,
+       0xc78, 0x611f0001,
+       0xc78, 0x60200001,
+       0xc78, 0x49210001,
+       0xc78, 0x48220001,
+       0xc78, 0x47230001,
+       0xc78, 0x46240001,
+       0xc78, 0x45250001,
+       0xc78, 0x44260001,
+       0xc78, 0x43270001,
+       0xc78, 0x42280001,
+       0xc78, 0x41290001,
+       0xc78, 0x402a0001,
+       0xc78, 0x262b0001,
+       0xc78, 0x252c0001,
+       0xc78, 0x242d0001,
+       0xc78, 0x232e0001,
+       0xc78, 0x222f0001,
+       0xc78, 0x21300001,
+       0xc78, 0x20310001,
+       0xc78, 0x06320001,
+       0xc78, 0x05330001,
+       0xc78, 0x04340001,
+       0xc78, 0x03350001,
+       0xc78, 0x02360001,
+       0xc78, 0x01370001,
+       0xc78, 0x00380001,
+       0xc78, 0x00390001,
+       0xc78, 0x003a0001,
+       0xc78, 0x003b0001,
+       0xc78, 0x003c0001,
+       0xc78, 0x003d0001,
+       0xc78, 0x003e0001,
+       0xc78, 0x003f0001,
+       0xc78, 0x7b400001,
+       0xc78, 0x7b410001,
+       0xc78, 0x7b420001,
+       0xc78, 0x7b430001,
+       0xc78, 0x7b440001,
+       0xc78, 0x7b450001,
+       0xc78, 0x7a460001,
+       0xc78, 0x79470001,
+       0xc78, 0x78480001,
+       0xc78, 0x77490001,
+       0xc78, 0x764a0001,
+       0xc78, 0x754b0001,
+       0xc78, 0x744c0001,
+       0xc78, 0x734d0001,
+       0xc78, 0x724e0001,
+       0xc78, 0x714f0001,
+       0xc78, 0x70500001,
+       0xc78, 0x6f510001,
+       0xc78, 0x6e520001,
+       0xc78, 0x6d530001,
+       0xc78, 0x6c540001,
+       0xc78, 0x6b550001,
+       0xc78, 0x6a560001,
+       0xc78, 0x69570001,
+       0xc78, 0x68580001,
+       0xc78, 0x67590001,
+       0xc78, 0x665a0001,
+       0xc78, 0x655b0001,
+       0xc78, 0x645c0001,
+       0xc78, 0x635d0001,
+       0xc78, 0x625e0001,
+       0xc78, 0x615f0001,
+       0xc78, 0x60600001,
+       0xc78, 0x49610001,
+       0xc78, 0x48620001,
+       0xc78, 0x47630001,
+       0xc78, 0x46640001,
+       0xc78, 0x45650001,
+       0xc78, 0x44660001,
+       0xc78, 0x43670001,
+       0xc78, 0x42680001,
+       0xc78, 0x41690001,
+       0xc78, 0x406a0001,
+       0xc78, 0x266b0001,
+       0xc78, 0x256c0001,
+       0xc78, 0x246d0001,
+       0xc78, 0x236e0001,
+       0xc78, 0x226f0001,
+       0xc78, 0x21700001,
+       0xc78, 0x20710001,
+       0xc78, 0x06720001,
+       0xc78, 0x05730001,
+       0xc78, 0x04740001,
+       0xc78, 0x03750001,
+       0xc78, 0x02760001,
+       0xc78, 0x01770001,
+       0xc78, 0x00780001,
+       0xc78, 0x00790001,
+       0xc78, 0x007a0001,
+       0xc78, 0x007b0001,
+       0xc78, 0x007c0001,
+       0xc78, 0x007d0001,
+       0xc78, 0x007e0001,
+       0xc78, 0x007f0001,
+       0xc78, 0x3800001e,
+       0xc78, 0x3801001e,
+       0xc78, 0x3802001e,
+       0xc78, 0x3803001e,
+       0xc78, 0x3804001e,
+       0xc78, 0x3805001e,
+       0xc78, 0x3806001e,
+       0xc78, 0x3807001e,
+       0xc78, 0x3808001e,
+       0xc78, 0x3c09001e,
+       0xc78, 0x3e0a001e,
+       0xc78, 0x400b001e,
+       0xc78, 0x440c001e,
+       0xc78, 0x480d001e,
+       0xc78, 0x4c0e001e,
+       0xc78, 0x500f001e,
+       0xc78, 0x5210001e,
+       0xc78, 0x5611001e,
+       0xc78, 0x5a12001e,
+       0xc78, 0x5e13001e,
+       0xc78, 0x6014001e,
+       0xc78, 0x6015001e,
+       0xc78, 0x6016001e,
+       0xc78, 0x6217001e,
+       0xc78, 0x6218001e,
+       0xc78, 0x6219001e,
+       0xc78, 0x621a001e,
+       0xc78, 0x621b001e,
+       0xc78, 0x621c001e,
+       0xc78, 0x621d001e,
+       0xc78, 0x621e001e,
+       0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
+       0xc78, 0x7b000001,
+       0xc78, 0x7b010001,
+       0xc78, 0x7b020001,
+       0xc78, 0x7b030001,
+       0xc78, 0x7b040001,
+       0xc78, 0x7b050001,
+       0xc78, 0x7a060001,
+       0xc78, 0x79070001,
+       0xc78, 0x78080001,
+       0xc78, 0x77090001,
+       0xc78, 0x760a0001,
+       0xc78, 0x750b0001,
+       0xc78, 0x740c0001,
+       0xc78, 0x730d0001,
+       0xc78, 0x720e0001,
+       0xc78, 0x710f0001,
+       0xc78, 0x70100001,
+       0xc78, 0x6f110001,
+       0xc78, 0x6e120001,
+       0xc78, 0x6d130001,
+       0xc78, 0x6c140001,
+       0xc78, 0x6b150001,
+       0xc78, 0x6a160001,
+       0xc78, 0x69170001,
+       0xc78, 0x68180001,
+       0xc78, 0x67190001,
+       0xc78, 0x661a0001,
+       0xc78, 0x651b0001,
+       0xc78, 0x641c0001,
+       0xc78, 0x631d0001,
+       0xc78, 0x621e0001,
+       0xc78, 0x611f0001,
+       0xc78, 0x60200001,
+       0xc78, 0x49210001,
+       0xc78, 0x48220001,
+       0xc78, 0x47230001,
+       0xc78, 0x46240001,
+       0xc78, 0x45250001,
+       0xc78, 0x44260001,
+       0xc78, 0x43270001,
+       0xc78, 0x42280001,
+       0xc78, 0x41290001,
+       0xc78, 0x402a0001,
+       0xc78, 0x262b0001,
+       0xc78, 0x252c0001,
+       0xc78, 0x242d0001,
+       0xc78, 0x232e0001,
+       0xc78, 0x222f0001,
+       0xc78, 0x21300001,
+       0xc78, 0x20310001,
+       0xc78, 0x06320001,
+       0xc78, 0x05330001,
+       0xc78, 0x04340001,
+       0xc78, 0x03350001,
+       0xc78, 0x02360001,
+       0xc78, 0x01370001,
+       0xc78, 0x00380001,
+       0xc78, 0x00390001,
+       0xc78, 0x003a0001,
+       0xc78, 0x003b0001,
+       0xc78, 0x003c0001,
+       0xc78, 0x003d0001,
+       0xc78, 0x003e0001,
+       0xc78, 0x003f0001,
+       0xc78, 0x7b400001,
+       0xc78, 0x7b410001,
+       0xc78, 0x7b420001,
+       0xc78, 0x7b430001,
+       0xc78, 0x7b440001,
+       0xc78, 0x7b450001,
+       0xc78, 0x7a460001,
+       0xc78, 0x79470001,
+       0xc78, 0x78480001,
+       0xc78, 0x77490001,
+       0xc78, 0x764a0001,
+       0xc78, 0x754b0001,
+       0xc78, 0x744c0001,
+       0xc78, 0x734d0001,
+       0xc78, 0x724e0001,
+       0xc78, 0x714f0001,
+       0xc78, 0x70500001,
+       0xc78, 0x6f510001,
+       0xc78, 0x6e520001,
+       0xc78, 0x6d530001,
+       0xc78, 0x6c540001,
+       0xc78, 0x6b550001,
+       0xc78, 0x6a560001,
+       0xc78, 0x69570001,
+       0xc78, 0x68580001,
+       0xc78, 0x67590001,
+       0xc78, 0x665a0001,
+       0xc78, 0x655b0001,
+       0xc78, 0x645c0001,
+       0xc78, 0x635d0001,
+       0xc78, 0x625e0001,
+       0xc78, 0x615f0001,
+       0xc78, 0x60600001,
+       0xc78, 0x49610001,
+       0xc78, 0x48620001,
+       0xc78, 0x47630001,
+       0xc78, 0x46640001,
+       0xc78, 0x45650001,
+       0xc78, 0x44660001,
+       0xc78, 0x43670001,
+       0xc78, 0x42680001,
+       0xc78, 0x41690001,
+       0xc78, 0x406a0001,
+       0xc78, 0x266b0001,
+       0xc78, 0x256c0001,
+       0xc78, 0x246d0001,
+       0xc78, 0x236e0001,
+       0xc78, 0x226f0001,
+       0xc78, 0x21700001,
+       0xc78, 0x20710001,
+       0xc78, 0x06720001,
+       0xc78, 0x05730001,
+       0xc78, 0x04740001,
+       0xc78, 0x03750001,
+       0xc78, 0x02760001,
+       0xc78, 0x01770001,
+       0xc78, 0x00780001,
+       0xc78, 0x00790001,
+       0xc78, 0x007a0001,
+       0xc78, 0x007b0001,
+       0xc78, 0x007c0001,
+       0xc78, 0x007d0001,
+       0xc78, 0x007e0001,
+       0xc78, 0x007f0001,
+       0xc78, 0x3800001e,
+       0xc78, 0x3801001e,
+       0xc78, 0x3802001e,
+       0xc78, 0x3803001e,
+       0xc78, 0x3804001e,
+       0xc78, 0x3805001e,
+       0xc78, 0x3806001e,
+       0xc78, 0x3807001e,
+       0xc78, 0x3808001e,
+       0xc78, 0x3c09001e,
+       0xc78, 0x3e0a001e,
+       0xc78, 0x400b001e,
+       0xc78, 0x440c001e,
+       0xc78, 0x480d001e,
+       0xc78, 0x4c0e001e,
+       0xc78, 0x500f001e,
+       0xc78, 0x5210001e,
+       0xc78, 0x5611001e,
+       0xc78, 0x5a12001e,
+       0xc78, 0x5e13001e,
+       0xc78, 0x6014001e,
+       0xc78, 0x6015001e,
+       0xc78, 0x6016001e,
+       0xc78, 0x6217001e,
+       0xc78, 0x6218001e,
+       0xc78, 0x6219001e,
+       0xc78, 0x621a001e,
+       0xc78, 0x621b001e,
+       0xc78, 0x621c001e,
+       0xc78, 0x621d001e,
+       0xc78, 0x621e001e,
+       0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
+       0x024, 0x0011800f,
+       0x028, 0x00ffdb83,
+       0x040, 0x000c0004,
+       0x800, 0x80040000,
+       0x804, 0x00000001,
+       0x808, 0x0000fc00,
+       0x80c, 0x0000000a,
+       0x810, 0x10005388,
+       0x814, 0x020c3d10,
+       0x818, 0x02200385,
+       0x81c, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390204,
+       0x828, 0x00000000,
+       0x82c, 0x00000000,
+       0x830, 0x00000000,
+       0x834, 0x00000000,
+       0x838, 0x00000000,
+       0x83c, 0x00000000,
+       0x840, 0x00010000,
+       0x844, 0x00000000,
+       0x848, 0x00000000,
+       0x84c, 0x00000000,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569a569a,
+       0x85c, 0x001b25a4,
+       0x860, 0x66e60230,
+       0x864, 0x061f0130,
+       0x868, 0x00000000,
+       0x86c, 0x20202000,
+       0x870, 0x03000300,
+       0x874, 0x22004000,
+       0x878, 0x00000808,
+       0x87c, 0x00ffc3f1,
+       0x880, 0xc0083070,
+       0x884, 0x000004d5,
+       0x888, 0x00000000,
+       0x88c, 0xccc000c0,
+       0x890, 0x00000800,
+       0x894, 0xfffffffe,
+       0x898, 0x40302010,
+       0x89c, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90c, 0x81121111,
+       0xa00, 0x00d047c8,
+       0xa04, 0x80ff000c,
+       0xa08, 0x8c838300,
+       0xa0c, 0x2e68120f,
+       0xa10, 0x9500bb78,
+       0xa14, 0x11144028,
+       0xa18, 0x00881117,
+       0xa1c, 0x89140f00,
+       0xa20, 0x15160000,
+       0xa24, 0x070b0f12,
+       0xa28, 0x00000104,
+       0xa2c, 0x00d30000,
+       0xa70, 0x101fbf00,
+       0xa74, 0x00000007,
+       0xc00, 0x48071d40,
+       0xc04, 0x03a05611,
+       0xc08, 0x000000e4,
+       0xc0c, 0x6c6c6c6c,
+       0xc10, 0x08800000,
+       0xc14, 0x40000100,
+       0xc18, 0x08800000,
+       0xc1c, 0x40000100,
+       0xc20, 0x00000000,
+       0xc24, 0x00000000,
+       0xc28, 0x00000000,
+       0xc2c, 0x00000000,
+       0xc30, 0x69e9ac44,
+       0xc34, 0x469652cf,
+       0xc38, 0x49795994,
+       0xc3c, 0x0a97971c,
+       0xc40, 0x1f7c403f,
+       0xc44, 0x000100b7,
+       0xc48, 0xec020107,
+       0xc4c, 0x007f037f,
+       0xc50, 0x6954342e,
+       0xc54, 0x43bc0094,
+       0xc58, 0x6954342f,
+       0xc5c, 0x433c0094,
+       0xc60, 0x00000000,
+       0xc64, 0x5116848b,
+       0xc68, 0x47c00bff,
+       0xc6c, 0x00000036,
+       0xc70, 0x2c46000d,
+       0xc74, 0x018610db,
+       0xc78, 0x0000001f,
+       0xc7c, 0x00b91612,
+       0xc80, 0x24000090,
+       0xc84, 0x20f60000,
+       0xc88, 0x24000090,
+       0xc8c, 0x20200000,
+       0xc90, 0x00121820,
+       0xc94, 0x00000000,
+       0xc98, 0x00121820,
+       0xc9c, 0x00007f7f,
+       0xca0, 0x00000000,
+       0xca4, 0x00000080,
+       0xca8, 0x00000000,
+       0xcac, 0x00000000,
+       0xcb0, 0x00000000,
+       0xcb4, 0x00000000,
+       0xcb8, 0x00000000,
+       0xcbc, 0x28000000,
+       0xcc0, 0x00000000,
+       0xcc4, 0x00000000,
+       0xcc8, 0x00000000,
+       0xccc, 0x00000000,
+       0xcd0, 0x00000000,
+       0xcd4, 0x00000000,
+       0xcd8, 0x64b22427,
+       0xcdc, 0x00766932,
+       0xce0, 0x00222222,
+       0xce4, 0x00000000,
+       0xce8, 0x37644302,
+       0xcec, 0x2f97d40c,
+       0xd00, 0x00080740,
+       0xd04, 0x00020401,
+       0xd08, 0x0000907f,
+       0xd0c, 0x20010201,
+       0xd10, 0xa0633333,
+       0xd14, 0x3333bc43,
+       0xd18, 0x7a8f5b6b,
+       0xd2c, 0xcc979975,
+       0xd30, 0x00000000,
+       0xd34, 0x80608000,
+       0xd38, 0x00000000,
+       0xd3c, 0x00027293,
+       0xd40, 0x00000000,
+       0xd44, 0x00000000,
+       0xd48, 0x00000000,
+       0xd4c, 0x00000000,
+       0xd50, 0x6437140a,
+       0xd54, 0x00000000,
+       0xd58, 0x00000000,
+       0xd5c, 0x30032064,
+       0xd60, 0x4653de68,
+       0xd64, 0x04518a3c,
+       0xd68, 0x00002101,
+       0xd6c, 0x2a201c16,
+       0xd70, 0x1812362e,
+       0xd74, 0x322c2220,
+       0xd78, 0x000e3c24,
+       0xe00, 0x24242424,
+       0xe04, 0x24242424,
+       0xe08, 0x03902024,
+       0xe10, 0x24242424,
+       0xe14, 0x24242424,
+       0xe18, 0x24242424,
+       0xe1c, 0x24242424,
+       0xe28, 0x00000000,
+       0xe30, 0x1000dc1f,
+       0xe34, 0x10008c1f,
+       0xe38, 0x02140102,
+       0xe3c, 0x681604c2,
+       0xe40, 0x01007c00,
+       0xe44, 0x01004800,
+       0xe48, 0xfb000000,
+       0xe4c, 0x000028d1,
+       0xe50, 0x1000dc1f,
+       0xe54, 0x10008c1f,
+       0xe58, 0x02140102,
+       0xe5c, 0x28160d05,
+       0xe60, 0x00000008,
+       0xe68, 0x001b25a4,
+       0xe6c, 0x631b25a0,
+       0xe70, 0x631b25a0,
+       0xe74, 0x081b25a0,
+       0xe78, 0x081b25a0,
+       0xe7c, 0x081b25a0,
+       0xe80, 0x081b25a0,
+       0xe84, 0x631b25a0,
+       0xe88, 0x081b25a0,
+       0xe8c, 0x631b25a0,
+       0xed0, 0x631b25a0,
+       0xed4, 0x631b25a0,
+       0xed8, 0x631b25a0,
+       0xedc, 0x001b25a0,
+       0xee0, 0x001b25a0,
+       0xeec, 0x6b1b25a0,
+       0xee8, 0x31555448,
+       0xf14, 0x00000003,
+       0xf4c, 0x00000000,
+       0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
+       0xe00, 0xffffffff, 0x06080808,
+       0xe04, 0xffffffff, 0x00040406,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x04060608,
+       0xe14, 0xffffffff, 0x00020204,
+       0xe18, 0xffffffff, 0x04060608,
+       0xe1c, 0xffffffff, 0x00020204,
+       0x830, 0xffffffff, 0x06080808,
+       0x834, 0xffffffff, 0x00040406,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x04060608,
+       0x848, 0xffffffff, 0x00020204,
+       0x84c, 0xffffffff, 0x04060608,
+       0x868, 0xffffffff, 0x00020204,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+       0xe00, 0xffffffff, 0x00000000,
+       0xe04, 0xffffffff, 0x00000000,
+       0xe08, 0x0000ff00, 0x00000000,
+       0x86c, 0xffffff00, 0x00000000,
+       0xe10, 0xffffffff, 0x00000000,
+       0xe14, 0xffffffff, 0x00000000,
+       0xe18, 0xffffffff, 0x00000000,
+       0xe1c, 0xffffffff, 0x00000000,
+       0x830, 0xffffffff, 0x00000000,
+       0x834, 0xffffffff, 0x00000000,
+       0x838, 0xffffff00, 0x00000000,
+       0x86c, 0x000000ff, 0x00000000,
+       0x83c, 0xffffffff, 0x00000000,
+       0x848, 0xffffffff, 0x00000000,
+       0x84c, 0xffffffff, 0x00000000,
+       0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
+       0x000, 0x00030159,
+       0x001, 0x00031284,
+       0x002, 0x00098000,
+       0x003, 0x00018c63,
+       0x004, 0x000210e7,
+       0x009, 0x0002044f,
+       0x00a, 0x0001adb0,
+       0x00b, 0x00054867,
+       0x00c, 0x0008992e,
+       0x00d, 0x0000e529,
+       0x00e, 0x00039ce7,
+       0x00f, 0x00000451,
+       0x019, 0x00000000,
+       0x01a, 0x00000255,
+       0x01b, 0x00060a00,
+       0x01c, 0x000fc378,
+       0x01d, 0x000a1250,
+       0x01e, 0x0004445f,
+       0x01f, 0x00080001,
+       0x020, 0x0000b614,
+       0x021, 0x0006c000,
+       0x022, 0x0000083c,
+       0x023, 0x00001558,
+       0x024, 0x00000060,
+       0x025, 0x00000483,
+       0x026, 0x0004f000,
+       0x027, 0x000ec7d9,
+       0x028, 0x000977c0,
+       0x029, 0x00004783,
+       0x02a, 0x00000001,
+       0x02b, 0x00021334,
+       0x02a, 0x00000000,
+       0x02b, 0x00000054,
+       0x02a, 0x00000001,
+       0x02b, 0x00000808,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000c,
+       0x02a, 0x00000002,
+       0x02b, 0x00000808,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000003,
+       0x02b, 0x00000808,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000004,
+       0x02b, 0x00000808,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000005,
+       0x02b, 0x00000808,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000006,
+       0x02b, 0x00000709,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000007,
+       0x02b, 0x00000709,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000008,
+       0x02b, 0x0000060a,
+       0x02b, 0x0004b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x00000009,
+       0x02b, 0x0000060a,
+       0x02b, 0x00053333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000a,
+       0x02b, 0x0000060a,
+       0x02b, 0x0005b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000b,
+       0x02b, 0x0000060a,
+       0x02b, 0x00063333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000c,
+       0x02b, 0x0000060a,
+       0x02b, 0x0006b333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000d,
+       0x02b, 0x0000060a,
+       0x02b, 0x00073333,
+       0x02c, 0x0000000d,
+       0x02a, 0x0000000e,
+       0x02b, 0x0000050b,
+       0x02b, 0x00066666,
+       0x02c, 0x0000001a,
+       0x02a, 0x000e0000,
+       0x010, 0x0004000f,
+       0x011, 0x000e31fc,
+       0x010, 0x0006000f,
+       0x011, 0x000ff9f8,
+       0x010, 0x0002000f,
+       0x011, 0x000203f9,
+       0x010, 0x0003000f,
+       0x011, 0x000ff500,
+       0x010, 0x00000000,
+       0x011, 0x00000000,
+       0x010, 0x0008000f,
+       0x011, 0x0003f100,
+       0x010, 0x0009000f,
+       0x011, 0x00023100,
+       0x012, 0x000d8000,
+       0x012, 0x00090000,
+       0x012, 0x00051000,
+       0x012, 0x00012000,
+       0x013, 0x00028fb4,
+       0x013, 0x00024fa8,
+       0x013, 0x000207a4,
+       0x013, 0x0001c798,
+       0x013, 0x000183a4,
+       0x013, 0x00014398,
+       0x013, 0x000101a4,
+       0x013, 0x0000c198,
+       0x013, 0x000080a4,
+       0x013, 0x00004098,
+       0x013, 0x00000000,
+       0x014, 0x0001944c,
+       0x014, 0x00059444,
+       0x014, 0x0009944c,
+       0x014, 0x000d9444,
+       0x015, 0x0000f405,
+       0x015, 0x0004f405,
+       0x015, 0x0008f405,
+       0x015, 0x000cf405,
+       0x016, 0x000e0330,
+       0x016, 0x000a0330,
+       0x016, 0x00060330,
+       0x016, 0x00020330,
+       0x000, 0x00010159,
+       0x018, 0x0000f401,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01f, 0x00080003,
+       0x0fe, 0x00000000,
+       0x0fe, 0x00000000,
+       0x01e, 0x00044457,
+       0x01f, 0x00080000,
+       0x000, 0x00030159,
+};
+
+u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
+       0xc78, 0x7b000001,
+       0xc78, 0x7b010001,
+       0xc78, 0x7b020001,
+       0xc78, 0x7b030001,
+       0xc78, 0x7b040001,
+       0xc78, 0x7b050001,
+       0xc78, 0x7b060001,
+       0xc78, 0x7b070001,
+       0xc78, 0x7b080001,
+       0xc78, 0x7a090001,
+       0xc78, 0x790a0001,
+       0xc78, 0x780b0001,
+       0xc78, 0x770c0001,
+       0xc78, 0x760d0001,
+       0xc78, 0x750e0001,
+       0xc78, 0x740f0001,
+       0xc78, 0x73100001,
+       0xc78, 0x72110001,
+       0xc78, 0x71120001,
+       0xc78, 0x70130001,
+       0xc78, 0x6f140001,
+       0xc78, 0x6e150001,
+       0xc78, 0x6d160001,
+       0xc78, 0x6c170001,
+       0xc78, 0x6b180001,
+       0xc78, 0x6a190001,
+       0xc78, 0x691a0001,
+       0xc78, 0x681b0001,
+       0xc78, 0x671c0001,
+       0xc78, 0x661d0001,
+       0xc78, 0x651e0001,
+       0xc78, 0x641f0001,
+       0xc78, 0x63200001,
+       0xc78, 0x62210001,
+       0xc78, 0x61220001,
+       0xc78, 0x60230001,
+       0xc78, 0x46240001,
+       0xc78, 0x45250001,
+       0xc78, 0x44260001,
+       0xc78, 0x43270001,
+       0xc78, 0x42280001,
+       0xc78, 0x41290001,
+       0xc78, 0x402a0001,
+       0xc78, 0x262b0001,
+       0xc78, 0x252c0001,
+       0xc78, 0x242d0001,
+       0xc78, 0x232e0001,
+       0xc78, 0x222f0001,
+       0xc78, 0x21300001,
+       0xc78, 0x20310001,
+       0xc78, 0x06320001,
+       0xc78, 0x05330001,
+       0xc78, 0x04340001,
+       0xc78, 0x03350001,
+       0xc78, 0x02360001,
+       0xc78, 0x01370001,
+       0xc78, 0x00380001,
+       0xc78, 0x00390001,
+       0xc78, 0x003a0001,
+       0xc78, 0x003b0001,
+       0xc78, 0x003c0001,
+       0xc78, 0x003d0001,
+       0xc78, 0x003e0001,
+       0xc78, 0x003f0001,
+       0xc78, 0x7b400001,
+       0xc78, 0x7b410001,
+       0xc78, 0x7b420001,
+       0xc78, 0x7b430001,
+       0xc78, 0x7b440001,
+       0xc78, 0x7b450001,
+       0xc78, 0x7b460001,
+       0xc78, 0x7b470001,
+       0xc78, 0x7b480001,
+       0xc78, 0x7a490001,
+       0xc78, 0x794a0001,
+       0xc78, 0x784b0001,
+       0xc78, 0x774c0001,
+       0xc78, 0x764d0001,
+       0xc78, 0x754e0001,
+       0xc78, 0x744f0001,
+       0xc78, 0x73500001,
+       0xc78, 0x72510001,
+       0xc78, 0x71520001,
+       0xc78, 0x70530001,
+       0xc78, 0x6f540001,
+       0xc78, 0x6e550001,
+       0xc78, 0x6d560001,
+       0xc78, 0x6c570001,
+       0xc78, 0x6b580001,
+       0xc78, 0x6a590001,
+       0xc78, 0x695a0001,
+       0xc78, 0x685b0001,
+       0xc78, 0x675c0001,
+       0xc78, 0x665d0001,
+       0xc78, 0x655e0001,
+       0xc78, 0x645f0001,
+       0xc78, 0x63600001,
+       0xc78, 0x62610001,
+       0xc78, 0x61620001,
+       0xc78, 0x60630001,
+       0xc78, 0x46640001,
+       0xc78, 0x45650001,
+       0xc78, 0x44660001,
+       0xc78, 0x43670001,
+       0xc78, 0x42680001,
+       0xc78, 0x41690001,
+       0xc78, 0x406a0001,
+       0xc78, 0x266b0001,
+       0xc78, 0x256c0001,
+       0xc78, 0x246d0001,
+       0xc78, 0x236e0001,
+       0xc78, 0x226f0001,
+       0xc78, 0x21700001,
+       0xc78, 0x20710001,
+       0xc78, 0x06720001,
+       0xc78, 0x05730001,
+       0xc78, 0x04740001,
+       0xc78, 0x03750001,
+       0xc78, 0x02760001,
+       0xc78, 0x01770001,
+       0xc78, 0x00780001,
+       0xc78, 0x00790001,
+       0xc78, 0x007a0001,
+       0xc78, 0x007b0001,
+       0xc78, 0x007c0001,
+       0xc78, 0x007d0001,
+       0xc78, 0x007e0001,
+       0xc78, 0x007f0001,
+       0xc78, 0x3800001e,
+       0xc78, 0x3801001e,
+       0xc78, 0x3802001e,
+       0xc78, 0x3803001e,
+       0xc78, 0x3804001e,
+       0xc78, 0x3805001e,
+       0xc78, 0x3806001e,
+       0xc78, 0x3807001e,
+       0xc78, 0x3808001e,
+       0xc78, 0x3c09001e,
+       0xc78, 0x3e0a001e,
+       0xc78, 0x400b001e,
+       0xc78, 0x440c001e,
+       0xc78, 0x480d001e,
+       0xc78, 0x4c0e001e,
+       0xc78, 0x500f001e,
+       0xc78, 0x5210001e,
+       0xc78, 0x5611001e,
+       0xc78, 0x5a12001e,
+       0xc78, 0x5e13001e,
+       0xc78, 0x6014001e,
+       0xc78, 0x6015001e,
+       0xc78, 0x6016001e,
+       0xc78, 0x6217001e,
+       0xc78, 0x6218001e,
+       0xc78, 0x6219001e,
+       0xc78, 0x621a001e,
+       0xc78, 0x621b001e,
+       0xc78, 0x621c001e,
+       0xc78, 0x621d001e,
+       0xc78, 0x621e001e,
+       0xc78, 0x621f001e,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
new file mode 100644 (file)
index 0000000..c3d5cd8
--- /dev/null
@@ -0,0 +1,71 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TABLE__H_
+#define __RTL92CU_TABLE__H_
+
+#include <linux/types.h>
+
+#define RTL8192CUPHY_REG_2TARRAY_LENGTH                374
+extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH];
+#define RTL8192CUPHY_REG_1TARRAY_LENGTH                374
+extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH];
+
+#define RTL8192CUPHY_REG_ARRAY_PGLENGTH                336
+extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH];
+
+#define RTL8192CURADIOA_2TARRAYLENGTH  282
+extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH];
+#define RTL8192CURADIOB_2TARRAYLENGTH  78
+extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH];
+#define RTL8192CURADIOA_1TARRAYLENGTH  282
+extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH];
+#define RTL8192CURADIOB_1TARRAYLENGTH  1
+extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH];
+
+#define RTL8192CUMAC_2T_ARRAYLENGTH            172
+extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH];
+
+#define RTL8192CUAGCTAB_2TARRAYLENGTH  320
+extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
+#define RTL8192CUAGCTAB_1TARRAYLENGTH  320
+extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
+
+#define RTL8192CUPHY_REG_1T_HPArrayLength 378
+extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
+
+#define RTL8192CUPHY_REG_Array_PG_HPLength 336
+extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
+
+#define RTL8192CURadioA_1T_HPArrayLength 282
+extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
+#define RTL8192CUAGCTAB_1T_HPArrayLength 320
+extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
new file mode 100644 (file)
index 0000000..d0b0d43
--- /dev/null
@@ -0,0 +1,687 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
+{
+       u8 ep_cfg, txqsele;
+       u8 ep_nums = 0;
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+       rtlusb->out_queue_sel = 0;
+       ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL);
+       ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT;
+       switch (ep_cfg) {
+       case 0:         /* 2 bulk OUT, 1 bulk IN */
+       case 3:
+               rtlusb->out_queue_sel  = TX_SELE_HQ | TX_SELE_LQ;
+               ep_nums = 2;
+               break;
+       case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */
+       case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */
+               txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS);
+               if (txqsele & 0x0F) /* /map all endpoint to High queue */
+                       rtlusb->out_queue_sel =  TX_SELE_HQ;
+               else if (txqsele&0xF0) /* map all endpoint to Low queue */
+                       rtlusb->out_queue_sel =  TX_SELE_LQ;
+               ep_nums = 1;
+               break;
+       default:
+               break;
+       }
+       return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
+{
+       u8 ep_cfg;
+       u8 ep_nums = 0;
+
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+       rtlusb->out_queue_sel = 0;
+       /* Normal and High queue */
+       ep_cfg =  rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1));
+       if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+               rtlusb->out_queue_sel |= TX_SELE_HQ;
+               ep_nums++;
+       }
+       if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
+               rtlusb->out_queue_sel |= TX_SELE_NQ;
+               ep_nums++;
+       }
+       /* Low queue */
+       ep_cfg =  rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2));
+       if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+               rtlusb->out_queue_sel |= TX_SELE_LQ;
+               ep_nums++;
+       }
+       return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
+                            bool  bwificfg, struct rtl_ep_map *ep_map)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (bwificfg) { /* for WMM */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB Chip-B & WMM Setting.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       } else { /* typical setting */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB typical Setting.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_VO]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       }
+}
+
+static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool  bwificfg,
+                              struct rtl_ep_map *ep_map)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       if (bwificfg) { /* for WMM */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB 3EP Setting for WMM.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 5;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VO]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       } else { /* typical setting */
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB 3EP Setting for typical.....\n"));
+               ep_map->ep_mapping[RTL_TXQ_BE]  = 5;
+               ep_map->ep_mapping[RTL_TXQ_BK]  = 5;
+               ep_map->ep_mapping[RTL_TXQ_VI]  = 3;
+               ep_map->ep_mapping[RTL_TXQ_VO]  = 2;
+               ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+               ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+               ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+       }
+}
+
+static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
+{
+       ep_map->ep_mapping[RTL_TXQ_BE]  = 2;
+       ep_map->ep_mapping[RTL_TXQ_BK]  = 2;
+       ep_map->ep_mapping[RTL_TXQ_VI]  = 2;
+       ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+       ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+       ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+       ep_map->ep_mapping[RTL_TXQ_HI]  = 2;
+}
+static int _out_ep_mapping(struct ieee80211_hw *hw)
+{
+       int err = 0;
+       bool bIsChipN, bwificfg = false;
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+       struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
+
+       bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
+       switch (rtlusb->out_ep_nums) {
+       case 2:
+               _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
+               break;
+       case 3:
+               /* Test chip doesn't support three out EPs. */
+               if (!bIsChipN) {
+                       err  =  -EINVAL;
+                       goto err_out;
+               }
+               _ThreeOutEpMapping(hw, bIsChipN, ep_map);
+               break;
+       case 1:
+               _OneOutEpMapping(hw, ep_map);
+               break;
+       default:
+               err  =  -EINVAL;
+               break;
+       }
+err_out:
+       return err;
+
+}
+/* endpoint mapping */
+int  rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       int error = 0;
+       if (likely(IS_NORMAL_CHIP(rtlhal->version)))
+               error = _ConfigVerNOutEP(hw);
+       else
+               error = _ConfigVerTOutEP(hw);
+       if (error)
+               goto err_out;
+       error = _out_ep_mapping(hw);
+       if (error)
+               goto err_out;
+err_out:
+       return error;
+}
+
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
+{
+       u16 hw_queue_index;
+
+       if (unlikely(ieee80211_is_beacon(fc))) {
+               hw_queue_index = RTL_TXQ_BCN;
+               goto out;
+       }
+       if (ieee80211_is_mgmt(fc)) {
+               hw_queue_index = RTL_TXQ_MGT;
+               goto out;
+       }
+       switch (mac80211_queue_index) {
+       case 0:
+               hw_queue_index = RTL_TXQ_VO;
+               break;
+       case 1:
+               hw_queue_index = RTL_TXQ_VI;
+               break;
+       case 2:
+               hw_queue_index = RTL_TXQ_BE;
+               break;
+       case 3:
+               hw_queue_index = RTL_TXQ_BK;
+               break;
+       default:
+               hw_queue_index = RTL_TXQ_BE;
+               RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
+                         mac80211_queue_index));
+               break;
+       }
+out:
+       return hw_queue_index;
+}
+
+static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
+                                        __le16 fc, u16 mac80211_queue_index)
+{
+       enum rtl_desc_qsel qsel;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (unlikely(ieee80211_is_beacon(fc))) {
+               qsel = QSLT_BEACON;
+               goto out;
+       }
+       if (ieee80211_is_mgmt(fc)) {
+               qsel = QSLT_MGNT;
+               goto out;
+       }
+       switch (mac80211_queue_index) {
+       case 0: /* VO */
+               qsel = QSLT_VO;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("VO queue, set qsel = 0x%x\n", QSLT_VO));
+               break;
+       case 1: /* VI */
+               qsel = QSLT_VI;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("VI queue, set qsel = 0x%x\n", QSLT_VI));
+               break;
+       case 3: /* BK */
+               qsel = QSLT_BK;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("BK queue, set qsel = 0x%x\n", QSLT_BK));
+               break;
+       case 2: /* BE */
+       default:
+               qsel = QSLT_BE;
+               RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+                        ("BE queue, set qsel = 0x%x\n", QSLT_BE));
+               break;
+       }
+out:
+       return qsel;
+}
+
+/* =============================================================== */
+
+/*----------------------------------------------------------------------
+ *
+ *     Rx handler
+ *
+ *---------------------------------------------------------------------- */
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+                          struct rtl_stats *stats,
+                          struct ieee80211_rx_status *rx_status,
+                          u8 *p_desc, struct sk_buff *skb)
+{
+       struct rx_fwinfo_92c *p_drvinfo;
+       struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+       u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+       stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+       stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) *
+                                RX_DRV_INFO_SIZE_UNIT;
+       stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+       stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+       stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+       stats->hwerror = (stats->crc | stats->icv);
+       stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+       stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
+       stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+       stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+       stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+                                  && (GET_RX_DESC_FAGGR(pdesc) == 1));
+       stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+       stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+       rx_status->freq = hw->conf.channel->center_freq;
+       rx_status->band = hw->conf.channel->band;
+       if (GET_RX_DESC_CRC32(pdesc))
+               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       if (!GET_RX_DESC_SWDEC(pdesc))
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+       if (GET_RX_DESC_BW(pdesc))
+               rx_status->flag |= RX_FLAG_40MHZ;
+       if (GET_RX_DESC_RX_HT(pdesc))
+               rx_status->flag |= RX_FLAG_HT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+       if (stats->decrypted)
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+       rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+                                               (bool)GET_RX_DESC_RX_HT(pdesc),
+                                               (u8)GET_RX_DESC_RX_MCS(pdesc),
+                                               (bool)GET_RX_DESC_PAGGR(pdesc));
+       rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+       if (phystatus == true) {
+               p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+               rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+                                                p_drvinfo);
+       }
+       /*rx_status->qual = stats->signal; */
+       rx_status->signal = stats->rssi + 10;
+       /*rx_status->noise = -stats->noise; */
+       return true;
+}
+
+#define RTL_RX_DRV_INFO_UNIT           8
+
+static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct ieee80211_rx_status *rx_status =
+                (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
+       u32 skb_len, pkt_len, drvinfo_len;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *rxdesc;
+       struct rtl_stats stats = {
+               .signal = 0,
+               .noise = -98,
+               .rate = 0,
+       };
+       struct rx_fwinfo_92c *p_drvinfo;
+       bool bv;
+       __le16 fc;
+       struct ieee80211_hdr *hdr;
+
+       memset(rx_status, 0, sizeof(rx_status));
+       rxdesc  = skb->data;
+       skb_len = skb->len;
+       drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
+       pkt_len         = GET_RX_DESC_PKT_LEN(rxdesc);
+       /* TODO: Error recovery. drop this skb or something. */
+       WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len));
+       stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc);
+       stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) *
+                               RX_DRV_INFO_SIZE_UNIT;
+       stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03);
+       stats.icv = (u16) GET_RX_DESC_ICV(rxdesc);
+       stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc);
+       stats.hwerror = (stats.crc | stats.icv);
+       stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc);
+       stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc);
+       stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc);
+       stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
+                                  && (GET_RX_DESC_FAGGR(rxdesc) == 1));
+       stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
+       stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+       /* TODO: is center_freq changed when doing scan? */
+       /* TODO: Shall we add protection or just skip those two step? */
+       rx_status->freq = hw->conf.channel->center_freq;
+       rx_status->band = hw->conf.channel->band;
+       if (GET_RX_DESC_CRC32(rxdesc))
+               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       if (!GET_RX_DESC_SWDEC(rxdesc))
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+       if (GET_RX_DESC_BW(rxdesc))
+               rx_status->flag |= RX_FLAG_40MHZ;
+       if (GET_RX_DESC_RX_HT(rxdesc))
+               rx_status->flag |= RX_FLAG_HT;
+       /* Data rate */
+       rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+                                               (bool)GET_RX_DESC_RX_HT(rxdesc),
+                                               (u8)GET_RX_DESC_RX_MCS(rxdesc),
+                                               (bool)GET_RX_DESC_PAGGR(rxdesc)
+                                               );
+       /*  There is a phy status after this rx descriptor. */
+       if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
+               p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
+               rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
+                                (struct rx_desc_92c *)rxdesc, p_drvinfo);
+       }
+       skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
+       hdr = (struct ieee80211_hdr *)(skb->data);
+       fc = hdr->frame_control;
+       bv = ieee80211_is_probe_resp(fc);
+       if (bv)
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("Got probe response frame.\n"));
+       if (ieee80211_is_beacon(fc))
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("Got beacon frame.\n"));
+       if (ieee80211_is_data(fc))
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
+                "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+                (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
+                (u32)hdr->addr1[5]));
+       memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+       ieee80211_rx_irqsafe(hw, skb);
+}
+
+void  rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
+{
+       _rtl_rx_process(hw, skb);
+}
+
+void rtl8192c_rx_segregate_hdl(
+       struct ieee80211_hw *hw,
+       struct sk_buff *skb,
+       struct sk_buff_head *skb_list)
+{
+}
+
+/*----------------------------------------------------------------------
+ *
+ *     Tx handler
+ *
+ *---------------------------------------------------------------------- */
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff  *skb)
+{
+}
+
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+                        struct sk_buff *skb)
+{
+       return 0;
+}
+
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw,
+                                          struct sk_buff_head *list)
+{
+       return skb_dequeue(list);
+}
+
+/*======================================== trx ===============================*/
+
+static void _rtl_fill_usb_tx_desc(u8 *txdesc)
+{
+       SET_TX_DESC_OWN(txdesc, 1);
+       SET_TX_DESC_LAST_SEG(txdesc, 1);
+       SET_TX_DESC_FIRST_SEG(txdesc, 1);
+}
+/**
+ *     For HW recovery information
+ */
+static void _rtl_tx_desc_checksum(u8 *txdesc)
+{
+       u16 *ptr = (u16 *)txdesc;
+       u16     checksum = 0;
+       u32 index;
+
+       /* Clear first */
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
+       for (index = 0; index < 16; index++)
+               checksum = checksum ^ (*(ptr + index));
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+}
+
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+                         struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         unsigned int queue_index)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool defaultadapter = true;
+       struct ieee80211_sta *sta;
+       struct rtl_tcb_desc tcb_desc;
+       u8 *qc = ieee80211_get_qos_ctl(hdr);
+       u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+       u16 seq_number;
+       __le16 fc = hdr->frame_control;
+       u8 rate_flag = info->control.rates[0].flags;
+       u16 pktlen = skb->len;
+       enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc,
+                                               skb_get_queue_mapping(skb));
+       u8 *txdesc;
+
+       seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+       rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
+       txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+       memset(txdesc, 0, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
+       SET_TX_DESC_LINIP(txdesc, 0);
+       SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
+       SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate);
+       if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
+               SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
+       if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
+                   info->flags & IEEE80211_TX_CTL_AMPDU) {
+               SET_TX_DESC_AGG_ENABLE(txdesc, 1);
+               SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14);
+       } else {
+               SET_TX_DESC_AGG_BREAK(txdesc, 1);
+       }
+       SET_TX_DESC_SEQ(txdesc, seq_number);
+       SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable &&
+                              !tcb_desc.cts_enable) ? 1 : 0));
+       SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable ||
+                                 tcb_desc.cts_enable) ? 1 : 0));
+       SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+       SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
+       SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate);
+       SET_TX_DESC_RTS_BW(txdesc, 0);
+       SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc);
+       SET_TX_DESC_RTS_SHORT(txdesc,
+                             ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
+                              (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+                              : (tcb_desc.rts_use_shortgi ? 1 : 0)));
+       if (mac->bw_40) {
+               if (tcb_desc.packet_bw) {
+                       SET_TX_DESC_DATA_BW(txdesc, 1);
+                       SET_TX_DESC_DATA_SC(txdesc, 3);
+               } else {
+                       SET_TX_DESC_DATA_BW(txdesc, 0);
+                               if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
+                                       SET_TX_DESC_DATA_SC(txdesc,
+                                                         mac->cur_40_prime_sc);
+                       }
+       } else {
+               SET_TX_DESC_DATA_BW(txdesc, 0);
+               SET_TX_DESC_DATA_SC(txdesc, 0);
+       }
+       rcu_read_lock();
+       sta = ieee80211_find_sta(mac->vif, mac->bssid);
+       if (sta) {
+               u8 ampdu_density = sta->ht_cap.ampdu_density;
+               SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
+       }
+       rcu_read_unlock();
+       if (info->control.hw_key) {
+               struct ieee80211_key_conf *keyconf = info->control.hw_key;
+               switch (keyconf->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+               case WLAN_CIPHER_SUITE_TKIP:
+                       SET_TX_DESC_SEC_TYPE(txdesc, 0x1);
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       SET_TX_DESC_SEC_TYPE(txdesc, 0x3);
+                       break;
+               default:
+                       SET_TX_DESC_SEC_TYPE(txdesc, 0x0);
+                       break;
+               }
+       }
+       SET_TX_DESC_PKT_ID(txdesc, 0);
+       SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel);
+       SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
+       SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
+       SET_TX_DESC_DISABLE_FB(txdesc, 0);
+       SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0);
+       if (ieee80211_is_data_qos(fc)) {
+               if (mac->rdg_en) {
+                       RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                                ("Enable RDG function.\n"));
+                       SET_TX_DESC_RDG_ENABLE(txdesc, 1);
+                       SET_TX_DESC_HTC(txdesc, 1);
+               }
+       }
+       if (rtlpriv->dm.useramask) {
+               SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index);
+               SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id);
+       } else {
+               SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index);
+               SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index);
+       }
+       if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+             ppsc->fwctrl_lps) {
+               SET_TX_DESC_HWSEQ_EN(txdesc, 1);
+               SET_TX_DESC_PKT_ID(txdesc, 8);
+               if (!defaultadapter)
+                       SET_TX_DESC_QOS(txdesc, 1);
+       }
+       if (ieee80211_has_morefrags(fc))
+               SET_TX_DESC_MORE_FRAG(txdesc, 1);
+       if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+           is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+               SET_TX_DESC_BMC(txdesc, 1);
+       _rtl_fill_usb_tx_desc(txdesc);
+       _rtl_tx_desc_checksum(txdesc);
+       RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
+}
+
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+                             u32 buffer_len, bool bIsPsPoll)
+{
+       /* Clear all status */
+       memset(pDesc, 0, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
+       SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
+       SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
+       SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
+       SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
+       /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
+        * vlaue by Hw. */
+       if (bIsPsPoll) {
+               SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
+       } else {
+               SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
+               SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
+       }
+       SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
+       SET_TX_DESC_OWN(pDesc, 1);
+       SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
+       _rtl_tx_desc_checksum(pDesc);
+}
+
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+                            u8 *pdesc, bool firstseg,
+                            bool lastseg, struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 fw_queue = QSLT_BEACON;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       __le16 fc = hdr->frame_control;
+
+       memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
+       if (firstseg)
+               SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
+       SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+       SET_TX_DESC_SEQ(pdesc, 0);
+       SET_TX_DESC_LINIP(pdesc, 0);
+       SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+       SET_TX_DESC_FIRST_SEG(pdesc, 1);
+       SET_TX_DESC_LAST_SEG(pdesc, 1);
+       SET_TX_DESC_RATE_ID(pdesc, 7);
+       SET_TX_DESC_MACID(pdesc, 0);
+       SET_TX_DESC_OWN(pdesc, 1);
+       SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+       SET_TX_DESC_FIRST_SEG(pdesc, 1);
+       SET_TX_DESC_LAST_SEG(pdesc, 1);
+       SET_TX_DESC_OFFSET(pdesc, 0x20);
+       SET_TX_DESC_USE_RATE(pdesc, 1);
+       if (!ieee80211_is_data_qos(fc)) {
+               SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+               SET_TX_DESC_PKT_ID(pdesc, 8);
+       }
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
+                     pdesc, RTL_TX_DESC_SIZE);
+}
+
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
new file mode 100644 (file)
index 0000000..b396d46
--- /dev/null
@@ -0,0 +1,430 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TRX_H__
+#define __RTL92CU_TRX_H__
+
+#define RTL92C_USB_BULK_IN_NUM                 1
+#define RTL92C_NUM_RX_URBS                     8
+#define RTL92C_NUM_TX_URBS                     32
+
+#define RTL92C_SIZE_MAX_RX_BUFFER              15360   /* 8192 */
+#define RX_DRV_INFO_SIZE_UNIT                  8
+
+enum usb_rx_agg_mode {
+       USB_RX_AGG_DISABLE,
+       USB_RX_AGG_DMA,
+       USB_RX_AGG_USB,
+       USB_RX_AGG_DMA_USB
+};
+
+#define TX_SELE_HQ                             BIT(0)  /* High Queue */
+#define TX_SELE_LQ                             BIT(1)  /* Low Queue */
+#define TX_SELE_NQ                             BIT(2)  /* Normal Queue */
+
+#define RTL_USB_TX_AGG_NUM_DESC                        5
+
+#define RTL_USB_RX_AGG_PAGE_NUM                        4
+#define RTL_USB_RX_AGG_PAGE_TIMEOUT            3
+
+#define RTL_USB_RX_AGG_BLOCK_NUM               5
+#define RTL_USB_RX_AGG_BLOCK_TIMEOUT           3
+
+/*======================== rx status =========================================*/
+
+struct rx_drv_info_92c {
+       /*
+        * Driver info contain PHY status and other variabel size info
+        * PHY Status content as below
+        */
+
+       /* DWORD 0 */
+       u8 gain_trsw[4];
+
+       /* DWORD 1 */
+       u8 pwdb_all;
+       u8 cfosho[4];
+
+       /* DWORD 2 */
+       u8 cfotail[4];
+
+       /* DWORD 3 */
+       s8 rxevm[2];
+       s8 rxsnr[4];
+
+       /* DWORD 4 */
+       u8 pdsnr[2];
+
+       /* DWORD 5 */
+       u8 csi_current[2];
+       u8 csi_target[2];
+
+       /* DWORD 6 */
+       u8 sigevm;
+       u8 max_ex_pwr;
+       u8 ex_intf_flag:1;
+       u8 sgi_en:1;
+       u8 rxsc:2;
+       u8 reserve:4;
+} __packed;
+
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits)            \
+       ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+       BIT_LEN_MASK_32(__bits))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val)     \
+       (*(__le32 *)(__pdesc) =                                 \
+       (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) &     \
+       (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) |                \
+       (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read various fields in RX descriptor */
+
+/* DWORD 0 */
+#define GET_RX_DESC_PKT_LEN(__rxdesc)          \
+       SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
+#define GET_RX_DESC_CRC32(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
+#define GET_RX_DESC_ICV(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
+#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc)     \
+       SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
+#define GET_RX_DESC_QOS(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
+#define GET_RX_DESC_PHY_STATUS(__rxdesc)       \
+       SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
+#define GET_RX_DESC_LAST_SEG(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
+#define GET_RX_DESC_FIRST_SEG(__rxdesc)                \
+       SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
+#define GET_RX_DESC_EOR(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
+#define GET_RX_DESC_OWN(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
+
+/* DWORD 1 */
+#define GET_RX_DESC_MACID(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
+#define GET_RX_DESC_PAGGR(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__rxdesc)           \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__rxdesc)           \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
+#define GET_RX_DESC_MORE_DATA(__rxdesc)                \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
+#define GET_RX_DESC_MORE_FRAG(__rxdesc)                \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__rxdesc)             \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__rxdesc)               \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__rxdesc)               \
+       SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
+
+/* DWORD 2 */
+#define GET_RX_DESC_SEQ(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__rxdesc)             \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
+#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc)   \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
+#define GET_RX_DESC_NEXT_IND(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
+
+/* DWORD 3 */
+#define GET_RX_DESC_RX_MCS(__rxdesc)           \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
+#define GET_RX_DESC_RX_HT(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
+#define GET_RX_DESC_AMSDU(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
+#define GET_RX_DESC_SPLCP(__rxdesc)            \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__rxdesc)               \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
+#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc)      \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
+#define GET_RX_DESC_IP_CHK_RPT(__rxdesc)       \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
+#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc)    \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
+#define GET_RX_DESC_HWPC_ERR(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__rxdesc)         \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
+
+/* DWORD 4 */
+#define GET_RX_DESC_IV1(__rxdesc)              \
+       SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
+
+/* DWORD 5 */
+#define GET_RX_DESC_TSFL(__rxdesc)             \
+       SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
+
+/*======================= tx desc ============================================*/
+
+/* macros to set various fields in TX descriptor */
+
+/* Dword 0 */
+#define SET_TX_DESC_PKT_SIZE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
+#define SET_TX_DESC_OFFSET(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
+#define SET_TX_DESC_BMC(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
+#define SET_TX_DESC_HTC(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
+#define SET_TX_DESC_LAST_SEG(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
+#define SET_TX_DESC_FIRST_SEG(__txdesc, __value)       \
+        SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
+#define SET_TX_DESC_LINIP(__txdesc, __value)           \
+       SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
+#define SET_TX_DESC_NO_ACM(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
+#define SET_TX_DESC_GF(__txdesc, __value)              \
+       SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
+#define SET_TX_DESC_OWN(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
+
+
+/* Dword 1 */
+#define SET_TX_DESC_MACID(__txdesc, __value)           \
+       SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
+#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
+#define SET_TX_DESC_AGG_BREAK(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
+#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
+#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
+#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
+#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value)    \
+       SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
+#define SET_TX_DESC_PIFS(__txdesc, __value)            \
+       SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
+#define SET_TX_DESC_RATE_ID(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
+#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
+#define SET_TX_DESC_SEC_TYPE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
+#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
+
+/* Dword 2 */
+#define SET_TX_DESC_RTS_RC(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
+#define SET_TX_DESC_DATA_RC(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
+#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
+#define SET_TX_DESC_MORE_FRAG(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
+#define SET_TX_DESC_RAW(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
+#define SET_TX_DESC_CCX(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
+#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
+#define SET_TX_DESC_ANTSEL_A(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
+#define SET_TX_DESC_ANTSEL_B(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
+#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
+#define SET_TX_DESC_TX_ANTL(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
+#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
+
+/* Dword 3 */
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value)  \
+       SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
+#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
+#define SET_TX_DESC_SEQ(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
+#define SET_TX_DESC_PKT_ID(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
+
+/* Dword 4 */
+#define SET_TX_DESC_RTS_RATE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
+#define SET_TX_DESC_AP_DCFE(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
+#define SET_TX_DESC_QOS(__txdesc, __value)             \
+       SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
+#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
+#define SET_TX_DESC_USE_RATE(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
+#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value)  \
+       SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
+#define SET_TX_DESC_DISABLE_FB(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
+#define SET_TX_DESC_CTS2SELF(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
+#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
+#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
+#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
+#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
+#define SET_TX_DESC_DATA_SC(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
+#define SET_TX_DESC_DATA_STBC(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
+#define SET_TX_DESC_DATA_SHORT(__txdesc, __value)      \
+       SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
+#define SET_TX_DESC_DATA_BW(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
+#define SET_TX_DESC_RTS_SHORT(__txdesc, __value)       \
+       SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
+#define SET_TX_DESC_RTS_BW(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
+#define SET_TX_DESC_RTS_SC(__txdesc, __value)          \
+       SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
+#define SET_TX_DESC_RTS_STBC(__txdesc, __value)                \
+       SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
+
+/* Dword 5 */
+#define SET_TX_DESC_TX_RATE(__pdesc, __val)            \
+       SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)       \
+       SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val)            \
+       SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value)        \
+       SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
+#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
+
+/* Dword 6 */
+#define SET_TX_DESC_TXAGC_A(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
+#define SET_TX_DESC_TXAGC_B(__txdesc, __value)         \
+       SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
+#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
+#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value)     \
+       SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
+#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
+
+/* Dword 7 */
+#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
+       SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value)   \
+       SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
+#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value)  \
+       SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
+
+
+int  rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+                          struct rtl_stats *stats,
+                          struct ieee80211_rx_status *rx_status,
+                          u8 *p_desc, struct sk_buff *skb);
+void  rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
+void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *,
+                              struct sk_buff_head *);
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff  *skb);
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+                        struct sk_buff *skb);
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
+                                          struct sk_buff_head *);
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+                         struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         unsigned int queue_index);
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+                             u32 buffer_len, bool bIsPsPoll);
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+                            u8 *pdesc, bool b_firstseg,
+                            bool b_lastseg, struct sk_buff *skb);
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
new file mode 100644 (file)
index 0000000..a4b2613
--- /dev/null
@@ -0,0 +1,1035 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+#include <linux/usb.h>
+#include "core.h"
+#include "wifi.h"
+#include "usb.h"
+#include "base.h"
+#include "ps.h"
+
+#define        REALTEK_USB_VENQT_READ                  0xC0
+#define        REALTEK_USB_VENQT_WRITE                 0x40
+#define REALTEK_USB_VENQT_CMD_REQ              0x05
+#define        REALTEK_USB_VENQT_CMD_IDX               0x00
+
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE         254
+
+static void usbctrl_async_callback(struct urb *urb)
+{
+       if (urb)
+               kfree(urb->context);
+}
+
+static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+                                         u16 value, u16 index, void *pdata,
+                                         u16 len)
+{
+       int rc;
+       unsigned int pipe;
+       u8 reqtype;
+       struct usb_ctrlrequest *dr;
+       struct urb *urb;
+       struct rtl819x_async_write_data {
+               u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+               struct usb_ctrlrequest dr;
+       } *buf;
+
+       pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+       reqtype =  REALTEK_USB_VENQT_WRITE;
+
+       buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+       if (!buf)
+               return -ENOMEM;
+
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+
+       dr = &buf->dr;
+
+       dr->bRequestType = reqtype;
+       dr->bRequest = request;
+       dr->wValue = cpu_to_le16(value);
+       dr->wIndex = cpu_to_le16(index);
+       dr->wLength = cpu_to_le16(len);
+       memcpy(buf, pdata, len);
+       usb_fill_control_urb(urb, udev, pipe,
+                            (unsigned char *)dr, buf, len,
+                            usbctrl_async_callback, buf);
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+       if (rc < 0)
+               kfree(buf);
+       usb_free_urb(urb);
+       return rc;
+}
+
+static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+                                       u16 value, u16 index, void *pdata,
+                                       u16 len)
+{
+       unsigned int pipe;
+       int status;
+       u8 reqtype;
+
+       pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+       reqtype =  REALTEK_USB_VENQT_READ;
+
+       status = usb_control_msg(udev, pipe, request, reqtype, value, index,
+                                pdata, len, 0); /* max. timeout */
+
+       if (status < 0)
+               printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
+                      "status:0x%x value=0x%x\n", value, status,
+                      *(u32 *)pdata);
+       return status;
+}
+
+static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
+{
+       u8 request;
+       u16 wvalue;
+       u16 index;
+       u32 *data;
+       u32 ret;
+
+       data = kmalloc(sizeof(u32), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+       wvalue = (u16)addr;
+       _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+       ret = *data;
+       kfree(data);
+       return ret;
+}
+
+static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
+}
+
+static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
+}
+
+static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return _usb_read_sync(to_usb_device(dev), addr, 4);
+}
+
+static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
+                            u16 len)
+{
+       u8 request;
+       u16 wvalue;
+       u16 index;
+       u32 data;
+
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+       wvalue = (u16)(addr&0x0000ffff);
+       data = val;
+       _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
+                                      len);
+}
+
+static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       _usb_write_async(to_usb_device(dev), addr, val, 1);
+}
+
+static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       _usb_write_async(to_usb_device(dev), addr, val, 2);
+}
+
+static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       _usb_write_async(to_usb_device(dev), addr, val, 4);
+}
+
+static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
+                                 u16 len, u8 *pdata)
+{
+       int status;
+       u8 request;
+       u16 wvalue;
+       u16 index;
+
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+       wvalue = (u16)addr;
+       if (read)
+               status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
+                                                     index, pdata, len);
+       else
+               status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
+                                                       index, pdata, len);
+       return status;
+}
+
+static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                          u8 *pdata)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
+                                      pdata);
+}
+
+static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                            u8 *pdata)
+{
+       struct device *dev = rtlpriv->io.dev;
+
+       return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
+                                     pdata);
+}
+
+static void _rtl_usb_io_handler_init(struct device *dev,
+                                    struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->io.dev = dev;
+       mutex_init(&rtlpriv->io.bb_mutex);
+       rtlpriv->io.write8_async        = _usb_write8_async;
+       rtlpriv->io.write16_async       = _usb_write16_async;
+       rtlpriv->io.write32_async       = _usb_write32_async;
+       rtlpriv->io.writeN_async        = _usb_writeN_async;
+       rtlpriv->io.read8_sync          = _usb_read8_sync;
+       rtlpriv->io.read16_sync         = _usb_read16_sync;
+       rtlpriv->io.read32_sync         = _usb_read32_sync;
+       rtlpriv->io.readN_sync          = _usb_readN_sync;
+}
+
+static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       mutex_destroy(&rtlpriv->io.bb_mutex);
+}
+
+/**
+ *
+ *     Default aggregation handler. Do nothing and just return the oldest skb.
+ */
+static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw,
+                                                 struct sk_buff_head *list)
+{
+       return skb_dequeue(list);
+}
+
+#define IS_HIGH_SPEED_USB(udev) \
+               ((USB_SPEED_HIGH == (udev)->speed) ? true : false)
+
+static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
+{
+       u32 i;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev)
+                                                   ? USB_HIGH_SPEED_BULK_SIZE
+                                                   : USB_FULL_SPEED_BULK_SIZE;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
+                rtlusb->max_bulk_out_size));
+
+       for (i = 0; i < __RTL_TXQ_NUM; i++) {
+               u32 ep_num = rtlusb->ep_map.ep_mapping[i];
+               if (!ep_num) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                                ("Invalid endpoint map setting!\n"));
+                       return -EINVAL;
+               }
+       }
+
+       rtlusb->usb_tx_post_hdl =
+                rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl;
+       rtlusb->usb_tx_cleanup  =
+                rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup;
+       rtlusb->usb_tx_aggregate_hdl =
+                (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl)
+                ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl
+                : &_none_usb_tx_aggregate_hdl;
+
+       init_usb_anchor(&rtlusb->tx_submitted);
+       for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+               skb_queue_head_init(&rtlusb->tx_skb_queue[i]);
+               init_usb_anchor(&rtlusb->tx_pending[i]);
+       }
+       return 0;
+}
+
+static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+       rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
+       rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
+       rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
+       rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
+       rtlusb->usb_rx_segregate_hdl =
+               rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
+
+       printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
+               rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
+       init_usb_anchor(&rtlusb->rx_submitted);
+       return 0;
+}
+
+static int _rtl_usb_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+       int err;
+       u8 epidx;
+       struct usb_interface    *usb_intf = rtlusb->intf;
+       u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints;
+
+       rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
+       for (epidx = 0; epidx < epnums; epidx++) {
+               struct usb_endpoint_descriptor *pep_desc;
+               pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
+
+               if (usb_endpoint_dir_in(pep_desc))
+                       rtlusb->in_ep_nums++;
+               else if (usb_endpoint_dir_out(pep_desc))
+                       rtlusb->out_ep_nums++;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                        ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
+                        pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+                        pep_desc->bInterval));
+       }
+       if (rtlusb->in_ep_nums <  rtlpriv->cfg->usb_interface_cfg->in_ep_num)
+               return -EINVAL ;
+
+       /* usb endpoint mapping */
+       err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
+       rtlusb->usb_mq_to_hwq =  rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
+       _rtl_usb_init_tx(hw);
+       _rtl_usb_init_rx(hw);
+       return err;
+}
+
+static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       rtlhal->hw = hw;
+       ppsc->inactiveps = false;
+       ppsc->leisure_ps = false;
+       ppsc->fwctrl_lps = false;
+       ppsc->reg_fwctrl_lps = 3;
+       ppsc->reg_max_lps_awakeintvl = 5;
+       ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
+
+        /* IBSS */
+       mac->beacon_interval = 100;
+
+        /* AMPDU */
+       mac->min_space_cfg = 0;
+       mac->max_mss_density = 0;
+
+       /* set sane AMPDU defaults */
+       mac->current_ampdu_density = 7;
+       mac->current_ampdu_factor = 3;
+
+       /* QOS */
+       rtlusb->acm_method = eAcmWay2_SW;
+
+       /* IRQ */
+       /* HIMR - turn all on */
+       rtlusb->irq_mask[0] = 0xFFFFFFFF;
+       /* HIMR_EX - turn all on */
+       rtlusb->irq_mask[1] = 0xFFFFFFFF;
+       rtlusb->disableHWSM =  true;
+       return 0;
+}
+
+#define __RADIO_TAP_SIZE_RSV   32
+
+static void _rtl_rx_completed(struct urb *urb);
+
+static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
+                                       struct rtl_usb *rtlusb,
+                                       struct urb *urb,
+                                       gfp_t gfp_mask)
+{
+       struct sk_buff *skb;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
+                              gfp_mask);
+       if (!skb) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Failed to __dev_alloc_skb!!\n"))
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* reserve some space for mac80211's radiotap */
+       skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
+       usb_fill_bulk_urb(urb, rtlusb->udev,
+                         usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
+                         skb->data, min(skb_tailroom(skb),
+                         (int)rtlusb->rx_max_size),
+                         _rtl_rx_completed, skb);
+
+       _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
+       return skb;
+}
+
+#undef __RADIO_TAP_SIZE_RSV
+
+static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
+                                   struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *rxdesc = skb->data;
+       struct ieee80211_hdr *hdr;
+       bool unicast = false;
+       __le16 fc;
+       struct ieee80211_rx_status rx_status = {0};
+       struct rtl_stats stats = {
+               .signal = 0,
+               .noise = -98,
+               .rate = 0,
+       };
+
+       skb_pull(skb, RTL_RX_DESC_SIZE);
+       rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+       skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+       hdr = (struct ieee80211_hdr *)(skb->data);
+       fc = hdr->frame_control;
+       if (!stats.crc) {
+               memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+               if (is_broadcast_ether_addr(hdr->addr1)) {
+                       /*TODO*/;
+               } else if (is_multicast_ether_addr(hdr->addr1)) {
+                       /*TODO*/
+               } else {
+                       unicast = true;
+                       rtlpriv->stats.rxbytesunicast +=  skb->len;
+               }
+
+               rtl_is_special_data(hw, skb, false);
+
+               if (ieee80211_is_data(fc)) {
+                       rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+                       if (unicast)
+                               rtlpriv->link_info.num_rx_inperiod++;
+               }
+       }
+}
+
+static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+                                     struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *rxdesc = skb->data;
+       struct ieee80211_hdr *hdr;
+       bool unicast = false;
+       __le16 fc;
+       struct ieee80211_rx_status rx_status = {0};
+       struct rtl_stats stats = {
+               .signal = 0,
+               .noise = -98,
+               .rate = 0,
+       };
+
+       skb_pull(skb, RTL_RX_DESC_SIZE);
+       rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+       skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+       hdr = (struct ieee80211_hdr *)(skb->data);
+       fc = hdr->frame_control;
+       if (!stats.crc) {
+               memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+               if (is_broadcast_ether_addr(hdr->addr1)) {
+                       /*TODO*/;
+               } else if (is_multicast_ether_addr(hdr->addr1)) {
+                       /*TODO*/
+               } else {
+                       unicast = true;
+                       rtlpriv->stats.rxbytesunicast +=  skb->len;
+               }
+
+               rtl_is_special_data(hw, skb, false);
+
+               if (ieee80211_is_data(fc)) {
+                       rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+                       if (unicast)
+                               rtlpriv->link_info.num_rx_inperiod++;
+               }
+               if (likely(rtl_action_proc(hw, skb, false))) {
+                       struct sk_buff *uskb = NULL;
+                       u8 *pdata;
+
+                       uskb = dev_alloc_skb(skb->len + 128);
+                       memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+                              sizeof(rx_status));
+                       pdata = (u8 *)skb_put(uskb, skb->len);
+                       memcpy(pdata, skb->data, skb->len);
+                       dev_kfree_skb_any(skb);
+                       ieee80211_rx_irqsafe(hw, uskb);
+               } else {
+                       dev_kfree_skb_any(skb);
+               }
+       }
+}
+
+static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct sk_buff *_skb;
+       struct sk_buff_head rx_queue;
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       skb_queue_head_init(&rx_queue);
+       if (rtlusb->usb_rx_segregate_hdl)
+               rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
+       WARN_ON(skb_queue_empty(&rx_queue));
+       while (!skb_queue_empty(&rx_queue)) {
+               _skb = skb_dequeue(&rx_queue);
+               _rtl_usb_rx_process_agg(hw, skb);
+               ieee80211_rx_irqsafe(hw, skb);
+       }
+}
+
+static void _rtl_rx_completed(struct urb *_urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)_urb->context;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+       struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int err = 0;
+
+       if (unlikely(IS_USB_STOP(rtlusb)))
+               goto free;
+
+       if (likely(0 == _urb->status)) {
+               /* If this code were moved to work queue, would CPU
+                * utilization be improved?  NOTE: We shall allocate another skb
+                * and reuse the original one.
+                */
+               skb_put(skb, _urb->actual_length);
+
+               if (likely(!rtlusb->usb_rx_segregate_hdl)) {
+                       struct sk_buff *_skb;
+                       _rtl_usb_rx_process_noagg(hw, skb);
+                       _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
+                       if (IS_ERR(_skb)) {
+                               err = PTR_ERR(_skb);
+                               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                                       ("Can't allocate skb for bulk IN!\n"));
+                               return;
+                       }
+                       skb = _skb;
+               } else{
+                       /* TO DO */
+                       _rtl_rx_pre_process(hw, skb);
+                       printk(KERN_ERR "rtlwifi: rx agg not supported\n");
+               }
+               goto resubmit;
+       }
+
+       switch (_urb->status) {
+       /* disconnect */
+       case -ENOENT:
+       case -ECONNRESET:
+       case -ENODEV:
+       case -ESHUTDOWN:
+               goto free;
+       default:
+               break;
+       }
+
+resubmit:
+       skb_reset_tail_pointer(skb);
+       skb_trim(skb, 0);
+
+       usb_anchor_urb(_urb, &rtlusb->rx_submitted);
+       err = usb_submit_urb(_urb, GFP_ATOMIC);
+       if (unlikely(err)) {
+               usb_unanchor_urb(_urb);
+               goto free;
+       }
+       return;
+
+free:
+       dev_kfree_skb_irq(skb);
+}
+
+static int _rtl_usb_receive(struct ieee80211_hw *hw)
+{
+       struct urb *urb;
+       struct sk_buff *skb;
+       int err;
+       int i;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       WARN_ON(0 == rtlusb->rx_urb_num);
+       /* 1600 == 1514 + max WLAN header + rtk info */
+       WARN_ON(rtlusb->rx_max_size < 1600);
+
+       for (i = 0; i < rtlusb->rx_urb_num; i++) {
+               err = -ENOMEM;
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                                ("Failed to alloc URB!!\n"))
+                       goto err_out;
+               }
+
+               skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
+               if (IS_ERR(skb)) {
+                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                                ("Failed to prep_rx_urb!!\n"))
+                       err = PTR_ERR(skb);
+                       goto err_out;
+               }
+
+               usb_anchor_urb(urb, &rtlusb->rx_submitted);
+               err = usb_submit_urb(urb, GFP_KERNEL);
+               if (err)
+                       goto err_out;
+               usb_free_urb(urb);
+       }
+       return 0;
+
+err_out:
+       usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+       return err;
+}
+
+static int rtl_usb_start(struct ieee80211_hw *hw)
+{
+       int err;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       err = rtlpriv->cfg->ops->hw_init(hw);
+       rtl_init_rx_config(hw);
+
+       /* Enable software */
+       SET_USB_START(rtlusb);
+       /* should after adapter start and interrupt enable. */
+       set_hal_start(rtlhal);
+
+       /* Start bulk IN */
+       _rtl_usb_receive(hw);
+
+       return err;
+}
+/**
+ *
+ *
+ */
+
+/*=======================  tx =========================================*/
+static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+{
+       u32 i;
+       struct sk_buff *_skb;
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct ieee80211_tx_info *txinfo;
+
+       SET_USB_STOP(rtlusb);
+
+       /* clean up rx stuff. */
+       usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+       /* clean up tx stuff */
+       for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+               while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
+                       rtlusb->usb_tx_cleanup(hw, _skb);
+                       txinfo = IEEE80211_SKB_CB(_skb);
+                       ieee80211_tx_info_clear_status(txinfo);
+                       txinfo->flags |= IEEE80211_TX_STAT_ACK;
+                       ieee80211_tx_status_irqsafe(hw, _skb);
+               }
+               usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
+       }
+       usb_kill_anchored_urbs(&rtlusb->tx_submitted);
+}
+
+/**
+ *
+ * We may add some struct into struct rtl_usb later. Do deinit here.
+ *
+ */
+static void rtl_usb_deinit(struct ieee80211_hw *hw)
+{
+       rtl_usb_cleanup(hw);
+}
+
+static void rtl_usb_stop(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       /* should after adapter start and interrupt enable. */
+       set_hal_stop(rtlhal);
+       /* Enable software */
+       SET_USB_STOP(rtlusb);
+       rtl_usb_deinit(hw);
+       rtlpriv->cfg->ops->hw_disable(hw);
+}
+
+static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
+{
+       int err;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       usb_anchor_urb(_urb, &rtlusb->tx_submitted);
+       err = usb_submit_urb(_urb, GFP_ATOMIC);
+       if (err < 0) {
+               struct sk_buff *skb;
+
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Failed to submit urb.\n"));
+               usb_unanchor_urb(_urb);
+               skb = (struct sk_buff *)_urb->context;
+               kfree_skb(skb);
+       }
+       usb_free_urb(_urb);
+}
+
+static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
+                       struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct ieee80211_tx_info *txinfo;
+
+       rtlusb->usb_tx_post_hdl(hw, urb, skb);
+       skb_pull(skb, RTL_TX_HEADER_SIZE);
+       txinfo = IEEE80211_SKB_CB(skb);
+       ieee80211_tx_info_clear_status(txinfo);
+       txinfo->flags |= IEEE80211_TX_STAT_ACK;
+
+       if (urb->status) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Urb has error status 0x%X\n", urb->status));
+               goto out;
+       }
+       /*  TODO:       statistics */
+out:
+       ieee80211_tx_status_irqsafe(hw, skb);
+       return urb->status;
+}
+
+static void _rtl_tx_complete(struct urb *urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)urb->context;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+       struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+       int err;
+
+       if (unlikely(IS_USB_STOP(rtlusb)))
+               return;
+       err = _usb_tx_post(hw, urb, skb);
+       if (err) {
+               /* Ignore error and keep issuiing other urbs */
+               return;
+       }
+}
+
+static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
+                               struct sk_buff *skb, u32 ep_num)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct urb *_urb;
+
+       WARN_ON(NULL == skb);
+       _urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!_urb) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("Can't allocate URB for bulk out!\n"));
+               kfree_skb(skb);
+               return NULL;
+       }
+       _rtl_install_trx_info(rtlusb, skb, ep_num);
+       usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
+                         ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
+       _urb->transfer_flags |= URB_ZERO_PACKET;
+       return _urb;
+}
+
+static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      enum rtl_txq qnum)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       u32 ep_num;
+       struct urb *_urb = NULL;
+       struct sk_buff *_skb = NULL;
+       struct sk_buff_head *skb_list;
+       struct usb_anchor *urb_list;
+
+       WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
+       if (unlikely(IS_USB_STOP(rtlusb))) {
+               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+                        ("USB device is stopping...\n"));
+               kfree_skb(skb);
+               return;
+       }
+       ep_num = rtlusb->ep_map.ep_mapping[qnum];
+       skb_list = &rtlusb->tx_skb_queue[ep_num];
+       _skb = skb;
+       _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
+       if (unlikely(!_urb)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't allocate urb. Drop skb!\n"));
+               return;
+       }
+       urb_list = &rtlusb->tx_pending[ep_num];
+       _rtl_submit_tx_urb(hw, _urb);
+}
+
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
+                           u16 hw_queue)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct rtl_tx_desc *pdesc = NULL;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       __le16 fc = hdr->frame_control;
+       u8 *pda_addr = hdr->addr1;
+       /* ssn */
+       u8 *qc = NULL;
+       u8 tid = 0;
+       u16 seq_number = 0;
+
+       if (ieee80211_is_mgmt(fc))
+               rtl_tx_mgmt_proc(hw, skb);
+       rtl_action_proc(hw, skb, true);
+       if (is_multicast_ether_addr(pda_addr))
+               rtlpriv->stats.txbytesmulticast += skb->len;
+       else if (is_broadcast_ether_addr(pda_addr))
+               rtlpriv->stats.txbytesbroadcast += skb->len;
+       else
+               rtlpriv->stats.txbytesunicast += skb->len;
+       if (ieee80211_is_data_qos(fc)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+                            IEEE80211_SCTL_SEQ) >> 4;
+               seq_number += 1;
+               seq_number <<= 4;
+       }
+       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+                                       hw_queue);
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               if (qc)
+                       mac->tids[tid].seq_number = seq_number;
+       }
+       if (ieee80211_is_data(fc))
+               rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+}
+
+static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+       __le16 fc = hdr->frame_control;
+       u16 hw_queue;
+
+       if (unlikely(is_hal_stop(rtlhal)))
+               goto err_free;
+       hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
+       _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+       _rtl_usb_transmit(hw, skb, hw_queue);
+       return NETDEV_TX_OK;
+
+err_free:
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct sk_buff *skb)
+{
+       return false;
+}
+
+static struct rtl_intf_ops rtl_usb_ops = {
+       .adapter_start = rtl_usb_start,
+       .adapter_stop = rtl_usb_stop,
+       .adapter_tx = rtl_usb_tx,
+       .waitq_insert = rtl_usb_tx_chk_waitq_insert,
+};
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+                       const struct usb_device_id *id)
+{
+       int err;
+       struct ieee80211_hw *hw = NULL;
+       struct rtl_priv *rtlpriv = NULL;
+       struct usb_device       *udev;
+       struct rtl_usb_priv *usb_priv;
+
+       hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
+                               sizeof(struct rtl_usb_priv), &rtl_ops);
+       if (!hw) {
+               RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
+               return -ENOMEM;
+       }
+       rtlpriv = hw->priv;
+       SET_IEEE80211_DEV(hw, &intf->dev);
+       udev = interface_to_usbdev(intf);
+       usb_get_dev(udev);
+       usb_priv = rtl_usbpriv(hw);
+       memset(usb_priv, 0, sizeof(*usb_priv));
+       usb_priv->dev.intf = intf;
+       usb_priv->dev.udev = udev;
+       usb_set_intfdata(intf, hw);
+       /* init cfg & intf_ops */
+       rtlpriv->rtlhal.interface = INTF_USB;
+       rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+       rtlpriv->intf_ops = &rtl_usb_ops;
+       rtl_dbgp_flag_init(hw);
+       /* Init IO handler */
+       _rtl_usb_io_handler_init(&udev->dev, hw);
+       rtlpriv->cfg->ops->read_chip_version(hw);
+       /*like read eeprom and so on */
+       rtlpriv->cfg->ops->read_eeprom_info(hw);
+       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't init_sw_vars.\n"));
+               goto error_out;
+       }
+       rtlpriv->cfg->ops->init_sw_leds(hw);
+       err = _rtl_usb_init(hw);
+       err = _rtl_usb_init_sw(hw);
+       /* Init mac80211 sw */
+       err = rtl_init_core(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Can't allocate sw for mac80211.\n"));
+               goto error_out;
+       }
+
+       /*init rfkill */
+       /* rtl_init_rfkill(hw); */
+
+       err = ieee80211_register_hw(hw);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                        ("Can't register mac80211 hw.\n"));
+               goto error_out;
+       } else {
+               rtlpriv->mac80211.mac80211_registered = 1;
+       }
+       set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+       return 0;
+error_out:
+       rtl_deinit_core(hw);
+       _rtl_usb_io_handler_release(hw);
+       ieee80211_free_hw(hw);
+       usb_put_dev(udev);
+       return -ENODEV;
+}
+EXPORT_SYMBOL(rtl_usb_probe);
+
+void rtl_usb_disconnect(struct usb_interface *intf)
+{
+       struct ieee80211_hw *hw = usb_get_intfdata(intf);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+       if (unlikely(!rtlpriv))
+               return;
+       /*ieee80211_unregister_hw will call ops_stop */
+       if (rtlmac->mac80211_registered == 1) {
+               ieee80211_unregister_hw(hw);
+               rtlmac->mac80211_registered = 0;
+       } else {
+               rtl_deinit_deferred_work(hw);
+               rtlpriv->intf_ops->adapter_stop(hw);
+       }
+       /*deinit rfkill */
+       /* rtl_deinit_rfkill(hw); */
+       rtl_usb_deinit(hw);
+       rtl_deinit_core(hw);
+       rtlpriv->cfg->ops->deinit_sw_leds(hw);
+       rtlpriv->cfg->ops->deinit_sw_vars(hw);
+       _rtl_usb_io_handler_release(hw);
+       usb_put_dev(rtlusb->udev);
+       usb_set_intfdata(intf, NULL);
+       ieee80211_free_hw(hw);
+}
+EXPORT_SYMBOL(rtl_usb_disconnect);
+
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+       return 0;
+}
+EXPORT_SYMBOL(rtl_usb_suspend);
+
+int rtl_usb_resume(struct usb_interface *pusb_intf)
+{
+       return 0;
+}
+EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
new file mode 100644 (file)
index 0000000..abadfe9
--- /dev/null
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011  Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_USB_H__
+#define __RTL_USB_H__
+
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#define RTL_USB_DEVICE(vend, prod, cfg) \
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
+       .idVendor = (vend), \
+       .idProduct = (prod), \
+       .driver_info = (kernel_ulong_t)&(cfg)
+
+#define USB_HIGH_SPEED_BULK_SIZE       512
+#define USB_FULL_SPEED_BULK_SIZE       64
+
+
+#define RTL_USB_MAX_TXQ_NUM            4               /* max tx queue */
+#define RTL_USB_MAX_EP_NUM             6               /* max ep number */
+#define RTL_USB_MAX_TX_URBS_NUM                8
+
+enum rtl_txq {
+       /* These definitions shall be consistent with value
+        * returned by skb_get_queue_mapping
+        *------------------------------------*/
+       RTL_TXQ_BK,
+       RTL_TXQ_BE,
+       RTL_TXQ_VI,
+       RTL_TXQ_VO,
+       /*------------------------------------*/
+       RTL_TXQ_BCN,
+       RTL_TXQ_MGT,
+       RTL_TXQ_HI,
+
+       /* Must be last */
+       __RTL_TXQ_NUM,
+};
+
+struct rtl_ep_map {
+       u32 ep_mapping[__RTL_TXQ_NUM];
+};
+
+struct _trx_info {
+       struct rtl_usb *rtlusb;
+       u32 ep_num;
+};
+
+static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
+                                        struct sk_buff *skb,
+                                        u32 ep_num)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       info->rate_driver_data[0] = rtlusb;
+       info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
+}
+
+
+/*  Add suspend/resume later */
+enum rtl_usb_state {
+       USB_STATE_STOP  = 0,
+       USB_STATE_START = 1,
+};
+
+#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state)
+#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state)
+#define SET_USB_STOP(rtlusb_ptr) \
+       do {                                                    \
+               (rtlusb_ptr)->state = USB_STATE_STOP;           \
+       } while (0)
+
+#define SET_USB_START(rtlusb_ptr)                              \
+       do { \
+               (rtlusb_ptr)->state = USB_STATE_START;          \
+       } while (0)
+
+struct rtl_usb {
+       struct usb_device *udev;
+       struct usb_interface *intf;
+       enum rtl_usb_state state;
+
+       /* Bcn control register setting */
+       u32 reg_bcn_ctrl_val;
+       /* for 88/92cu card disable */
+       u8      disableHWSM;
+       /*QOS & EDCA */
+       enum acm_method acm_method;
+       /* irq  . HIMR,HIMR_EX */
+       u32 irq_mask[2];
+       bool irq_enabled;
+
+       u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+
+       /* Tx */
+       u8 out_ep_nums ;
+       u8 out_queue_sel;
+       struct rtl_ep_map ep_map;
+
+       u32 max_bulk_out_size;
+       u32 tx_submitted_urbs;
+       struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM];
+
+       struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM];
+       struct usb_anchor tx_submitted;
+
+       struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+                                               struct sk_buff_head *);
+       int (*usb_tx_post_hdl)(struct ieee80211_hw *,
+                              struct urb *, struct sk_buff *);
+       void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+
+       /* Rx */
+       u8 in_ep_nums ;
+       u32 in_ep;              /* Bulk IN endpoint number */
+       u32 rx_max_size;        /* Bulk IN max buffer size */
+       u32 rx_urb_num;         /* How many Bulk INs are submitted to host. */
+       struct usb_anchor       rx_submitted;
+       void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+                                    struct sk_buff_head *);
+       void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+};
+
+struct rtl_usb_priv {
+       struct rtl_usb dev;
+       struct rtl_led_ctl ledctl;
+};
+
+#define rtl_usbpriv(hw)         (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
+#define rtl_usbdev(usbpriv)    (&((usbpriv)->dev))
+
+
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+                           const struct usb_device_id *id);
+void rtl_usb_disconnect(struct usb_interface *intf);
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
+int rtl_usb_resume(struct usb_interface *pusb_intf);
+
+#endif
index d44d79613d2da8183389a45748299d35b427a8af..01226f8e70f948b32d9d820b61cd033d25dd9ef7 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/firmware.h>
 #include <linux/version.h>
 #include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/usb.h>
 #include <net/mac80211.h>
 #include "debug.h"
 
 #define MAC80211_3ADDR_LEN                     24
 #define MAC80211_4ADDR_LEN                     30
 
+#define CHANNEL_MAX_NUMBER     (14 + 24 + 21)  /* 14 is the max channel no */
+#define CHANNEL_GROUP_MAX      (3 + 9) /*  ch1~3, 4~9, 10~14 = three groups */
+#define MAX_PG_GROUP                   13
+#define        CHANNEL_GROUP_MAX_2G            3
+#define        CHANNEL_GROUP_IDX_5GL           3
+#define        CHANNEL_GROUP_IDX_5GM           6
+#define        CHANNEL_GROUP_IDX_5GH           9
+#define        CHANNEL_GROUP_MAX_5G            9
+#define CHANNEL_MAX_NUMBER_2G          14
+#define AVG_THERMAL_NUM                        8
+
+/* for early mode */
+#define EM_HDR_LEN                     8
 enum intf_type {
        INTF_PCI = 0,
        INTF_USB = 1,
@@ -113,11 +128,38 @@ enum hardware_type {
        HARDWARE_TYPE_RTL8192CU,
        HARDWARE_TYPE_RTL8192DE,
        HARDWARE_TYPE_RTL8192DU,
+       HARDWARE_TYPE_RTL8723E,
+       HARDWARE_TYPE_RTL8723U,
 
-       /*keep it last*/
+       /* keep it last */
        HARDWARE_TYPE_NUM
 };
 
+#define IS_HARDWARE_TYPE_8192SU(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU)
+#define IS_HARDWARE_TYPE_8192SE(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+#define IS_HARDWARE_TYPE_8192CE(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
+#define IS_HARDWARE_TYPE_8192CU(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
+#define IS_HARDWARE_TYPE_8192DE(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+#define IS_HARDWARE_TYPE_8192DU(rtlhal)                        \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU)
+#define IS_HARDWARE_TYPE_8723E(rtlhal)                 \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E)
+#define IS_HARDWARE_TYPE_8723U(rtlhal)                 \
+       (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
+#define        IS_HARDWARE_TYPE_8192S(rtlhal)                  \
+(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal))
+#define        IS_HARDWARE_TYPE_8192C(rtlhal)                  \
+(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal))
+#define        IS_HARDWARE_TYPE_8192D(rtlhal)                  \
+(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
+#define        IS_HARDWARE_TYPE_8723(rtlhal)                   \
+(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
+
 enum scan_operation_backup_opt {
        SCAN_OPT_BACKUP = 0,
        SCAN_OPT_RESTORE,
@@ -315,6 +357,7 @@ enum rf_type {
        RF_1T1R = 0,
        RF_1T2R = 1,
        RF_2T2R = 2,
+       RF_2T2R_GREEN = 3,
 };
 
 enum ht_channel_width {
@@ -359,6 +402,8 @@ enum rtl_var_map {
        EFUSE_LOADER_CLK_EN,
        EFUSE_ANA8M,
        EFUSE_HWSET_MAX_SIZE,
+       EFUSE_MAX_SECTION_MAP,
+       EFUSE_REAL_CONTENT_SIZE,
 
        /*CAM map */
        RWCAM,
@@ -397,6 +442,7 @@ enum rtl_var_map {
        RTL_IMR_ATIMEND,        /*For 92C,ATIM Window End Interrupt */
        RTL_IMR_BDOK,           /*Beacon Queue DMA OK Interrup */
        RTL_IMR_HIGHDOK,        /*High Queue DMA OK Interrupt */
+       RTL_IMR_COMDOK,         /*Command Queue DMA OK Interrupt*/
        RTL_IMR_TBDOK,          /*Transmit Beacon OK interrup */
        RTL_IMR_MGNTDOK,        /*Management Queue DMA OK Interrupt */
        RTL_IMR_TBDER,          /*For 92C,Transmit Beacon Error Interrupt */
@@ -405,7 +451,8 @@ enum rtl_var_map {
        RTL_IMR_VIDOK,          /*AC_VI DMA OK Interrupt */
        RTL_IMR_VODOK,          /*AC_VO DMA Interrupt */
        RTL_IMR_ROK,            /*Receive DMA OK Interrupt */
-       RTL_IBSS_INT_MASKS,     /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/
+       RTL_IBSS_INT_MASKS,     /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+                                * RTL_IMR_TBDER) */
 
        /*CCK Rates, TxHT = 0 */
        RTL_RC_CCK_RATE1M,
@@ -481,6 +528,19 @@ enum acm_method {
        eAcmWay2_SW = 2,
 };
 
+enum macphy_mode {
+       SINGLEMAC_SINGLEPHY = 0,
+       DUALMAC_DUALPHY,
+       DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+       BAND_ON_2_4G = 0,
+       BAND_ON_5G,
+       BAND_ON_BOTH,
+       BANDMAX
+};
+
 /*aci/aifsn Field.
 Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
 union aci_aifsn {
@@ -505,6 +565,17 @@ enum wireless_mode {
        WIRELESS_MODE_N_5G = 0x20
 };
 
+#define IS_WIRELESS_MODE_A(wirelessmode)       \
+       (wirelessmode == WIRELESS_MODE_A)
+#define IS_WIRELESS_MODE_B(wirelessmode)       \
+       (wirelessmode == WIRELESS_MODE_B)
+#define IS_WIRELESS_MODE_G(wirelessmode)       \
+       (wirelessmode == WIRELESS_MODE_G)
+#define IS_WIRELESS_MODE_N_24G(wirelessmode)   \
+       (wirelessmode == WIRELESS_MODE_N_24G)
+#define IS_WIRELESS_MODE_N_5G(wirelessmode)    \
+       (wirelessmode == WIRELESS_MODE_N_5G)
+
 enum ratr_table_mode {
        RATR_INX_WIRELESS_NGB = 0,
        RATR_INX_WIRELESS_NG = 1,
@@ -574,11 +645,11 @@ struct rtl_probe_rsp {
 struct rtl_led {
        void *hw;
        enum rtl_led_pin ledpin;
-       bool b_ledon;
+       bool ledon;
 };
 
 struct rtl_led_ctl {
-       bool bled_opendrain;
+       bool led_opendrain;
        struct rtl_led sw_led0;
        struct rtl_led sw_led1;
 };
@@ -603,6 +674,8 @@ struct false_alarm_statistics {
        u32 cnt_rate_illegal;
        u32 cnt_crc8_fail;
        u32 cnt_mcs_fail;
+       u32 cnt_fast_fsync_fail;
+       u32 cnt_sb_search_fail;
        u32 cnt_ofdm_fail;
        u32 cnt_cck_fail;
        u32 cnt_all;
@@ -690,6 +763,32 @@ struct rtl_rfkill {
        bool rfkill_state;      /*0 is off, 1 is on */
 };
 
+#define IQK_MATRIX_REG_NUM     8
+#define IQK_MATRIX_SETTINGS_NUM        (1 + 24 + 21)
+struct iqk_matrix_regs {
+       bool b_iqk_done;
+       long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct phy_parameters {
+       u16 length;
+       u32 *pdata;
+};
+
+enum hw_param_tab_index {
+       PHY_REG_2T,
+       PHY_REG_1T,
+       PHY_REG_PG,
+       RADIOA_2T,
+       RADIOB_2T,
+       RADIOA_1T,
+       RADIOB_1T,
+       MAC_REG,
+       AGCTAB_2T,
+       AGCTAB_1T,
+       MAX_TAB
+};
+
 struct rtl_phy {
        struct bb_reg_def phyreg_def[4];        /*Radio A/B/C/D */
        struct init_gain initgain_backup;
@@ -705,8 +804,9 @@ struct rtl_phy {
        u8 current_channel;
        u8 h2c_box_num;
        u8 set_io_inprogress;
+       u8 lck_inprogress;
 
-       /*record for power tracking*/
+       /* record for power tracking */
        s32 reg_e94;
        s32 reg_e9c;
        s32 reg_ea4;
@@ -723,26 +823,32 @@ struct rtl_phy {
        u32 iqk_mac_backup[IQK_MAC_REG_NUM];
        u32 iqk_bb_backup[10];
 
-       bool b_rfpi_enable;
+       /* Dual mac */
+       bool need_iqk;
+       struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+       bool rfpi_enable;
 
        u8 pwrgroup_cnt;
-       u8 bcck_high_power;
-       /* 3 groups of pwr diff by rates*/
-       u32 mcs_txpwrlevel_origoffset[4][16];
+       u8 cck_high_power;
+       /* MAX_PG_GROUP groups of pwr diff by rates */
+       u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
        u8 default_initialgain[4];
 
-       /*the current Tx power level*/
+       /* the current Tx power level */
        u8 cur_cck_txpwridx;
        u8 cur_ofdm24g_txpwridx;
 
        u32 rfreg_chnlval[2];
-       bool b_apk_done;
+       bool apk_done;
+       u32 reg_rf3c[2];        /* pathA / pathB  */
 
-       /*fsync*/
        u8 framesync;
        u32 framesync_c34;
 
        u8 num_total_rfpath;
+       struct phy_parameters hwparam_tables[MAX_TAB];
+       u16 rf_pathmap;
 };
 
 #define MAX_TID_COUNT                          9
@@ -768,6 +874,7 @@ struct rtl_tid_data {
 struct rtl_priv;
 struct rtl_io {
        struct device *dev;
+       struct mutex bb_mutex;
 
        /*PCI MEM map */
        unsigned long pci_mem_end;      /*shared mem end        */
@@ -779,11 +886,14 @@ struct rtl_io {
        void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
        void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
        void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
-
-        u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
-        u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
-        u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
-
+       int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                            u8 *pdata);
+
+       u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
+       u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
+       u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
+       int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+                           u8 *pdata);
 };
 
 struct rtl_mac {
@@ -815,16 +925,24 @@ struct rtl_mac {
        bool act_scanning;
        u8 cnt_after_linked;
 
-        /*RDG*/ bool rdg_en;
+       /* early mode */
+       /* skb wait queue */
+       struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+       u8 earlymode_threshold;
+
+       /*RDG*/
+       bool rdg_en;
 
-        /*AP*/ u8 bssid[6];
-       u8 mcs[16];     /*16 bytes mcs for HT rates.*/
-       u32 basic_rates; /*b/g rates*/
+       /*AP*/
+       u8 bssid[6];
+       u32 vendor;
+       u8 mcs[16];     /* 16 bytes mcs for HT rates. */
+       u32 basic_rates; /* b/g rates */
        u8 ht_enable;
        u8 sgi_40;
        u8 sgi_20;
        u8 bw_40;
-       u8 mode;                /*wireless mode*/
+       u8 mode;                /* wireless mode */
        u8 slot_time;
        u8 short_preamble;
        u8 use_cts_protect;
@@ -835,9 +953,11 @@ struct rtl_mac {
        u8 retry_long;
        u16 assoc_id;
 
-        /*IBSS*/ int beacon_interval;
+       /*IBSS*/
+       int beacon_interval;
 
-        /*AMPDU*/ u8 min_space_cfg;    /*For Min spacing configurations */
+       /*AMPDU*/
+       u8 min_space_cfg;       /*For Min spacing configurations */
        u8 max_mss_density;
        u8 current_ampdu_factor;
        u8 current_ampdu_density;
@@ -852,17 +972,54 @@ struct rtl_hal {
 
        enum intf_type interface;
        u16 hw_type;            /*92c or 92d or 92s and so on */
+       u8 ic_class;
        u8 oem_id;
-       u8 version;             /*version of chip */
+       u32 version;            /*version of chip */
        u8 state;               /*stop 0, start 1 */
 
        /*firmware */
+       u32 fwsize;
        u8 *pfirmware;
-       bool b_h2c_setinprogress;
+       u16 fw_version;
+       u16 fw_subversion;
+       bool h2c_setinprogress;
        u8 last_hmeboxnum;
-       bool bfw_ready;
+       bool fw_ready;
        /*Reserve page start offset except beacon in TxQ. */
        u8 fw_rsvdpage_startoffset;
+       u8 h2c_txcmd_seq;
+
+       /* FW Cmd IO related */
+       u16 fwcmd_iomap;
+       u32 fwcmd_ioparam;
+       bool set_fwcmd_inprogress;
+       u8 current_fwcmd_io;
+
+       /**/
+       bool driver_going2unload;
+
+       /*AMPDU init min space*/
+       u8 minspace_cfg;        /*For Min spacing configurations */
+
+       /* Dual mac */
+       enum macphy_mode macphymode;
+       enum band_type current_bandtype;        /* 0:2.4G, 1:5G */
+       enum band_type current_bandtypebackup;
+       enum band_type bandset;
+       /* dual MAC 0--Mac0 1--Mac1 */
+       u32 interfaceindex;
+       /* just for DualMac S3S4 */
+       u8 macphyctl_reg;
+       bool earlymode_enable;
+       /* Dual mac*/
+       bool during_mac0init_radiob;
+       bool during_mac1init_radioa;
+       bool reloadtxpowerindex;
+       /* True if IMR or IQK  have done
+       for 2.4G in scan progress */
+       bool load_imrandiqk_setting_for2g;
+
+       bool disable_amsdu_8k;
 };
 
 struct rtl_security {
@@ -887,48 +1044,61 @@ struct rtl_security {
 };
 
 struct rtl_dm {
-       /*PHY status for DM */
+       /*PHY status for Dynamic Management */
        long entry_min_undecoratedsmoothed_pwdb;
        long undecorated_smoothed_pwdb; /*out dm */
        long entry_max_undecoratedsmoothed_pwdb;
-       bool b_dm_initialgain_enable;
-       bool bdynamic_txpower_enable;
-       bool bcurrent_turbo_edca;
-       bool bis_any_nonbepkts; /*out dm */
-       bool bis_cur_rdlstate;
-       bool btxpower_trackingInit;
-       bool b_disable_framebursting;
-       bool b_cck_inch14;
-       bool btxpower_tracking;
-       bool b_useramask;
-       bool brfpath_rxenable[4];
-
+       bool dm_initialgain_enable;
+       bool dynamic_txpower_enable;
+       bool current_turbo_edca;
+       bool is_any_nonbepkts;  /*out dm */
+       bool is_cur_rdlstate;
+       bool txpower_trackingInit;
+       bool disable_framebursting;
+       bool cck_inch14;
+       bool txpower_tracking;
+       bool useramask;
+       bool rfpath_rxenable[4];
+       bool inform_fw_driverctrldm;
+       bool current_mrc_switch;
+       u8 txpowercount;
+
+       u8 thermalvalue_rxgain;
        u8 thermalvalue_iqk;
        u8 thermalvalue_lck;
        u8 thermalvalue;
        u8 last_dtp_lvl;
+       u8 thermalvalue_avg[AVG_THERMAL_NUM];
+       u8 thermalvalue_avg_index;
+       bool done_txpower;
        u8 dynamic_txhighpower_lvl;     /*Tx high power level */
-       u8 dm_flag;     /*Indicate if each dynamic mechanism's status. */
+       u8 dm_flag;             /*Indicate each dynamic mechanism's status. */
        u8 dm_type;
        u8 txpower_track_control;
-
+       bool interrupt_migration;
+       bool disable_tx_int;
        char ofdm_index[2];
        char cck_index;
+       u8 power_index_backup[6];
 };
 
-#define        EFUSE_MAX_LOGICAL_SIZE                   128
+#define        EFUSE_MAX_LOGICAL_SIZE                  256
 
 struct rtl_efuse {
-       bool bautoLoad_ok;
+       bool autoLoad_ok;
        bool bootfromefuse;
        u16 max_physical_size;
-       u8 contents[EFUSE_MAX_LOGICAL_SIZE];
 
        u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
        u16 efuse_usedbytes;
        u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+       bool efuse_re_pg_sec1flag;
+       u8 efuse_re_pg_data[8];
+#endif
 
        u8 autoload_failflag;
+       u8 autoload_status;
 
        short epromtype;
        u16 eeprom_vid;
@@ -938,69 +1108,90 @@ struct rtl_efuse {
        u8 eeprom_oemid;
        u16 eeprom_channelplan;
        u8 eeprom_version;
+       u8 board_type;
+       u8 external_pa;
 
        u8 dev_addr[6];
 
-       bool b_txpwr_fromeprom;
+       bool txpwr_fromeprom;
+       u8 eeprom_crystalcap;
        u8 eeprom_tssi[2];
-       u8 eeprom_pwrlimit_ht20[3];
-       u8 eeprom_pwrlimit_ht40[3];
-       u8 eeprom_chnlarea_txpwr_cck[2][3];
-       u8 eeprom_chnlarea_txpwr_ht40_1s[2][3];
-       u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3];
-       u8 txpwrlevel_cck[2][14];
-       u8 txpwrlevel_ht40_1s[2][14];   /*For HT 40MHZ pwr */
-       u8 txpwrlevel_ht40_2s[2][14];   /*For HT 40MHZ pwr */
+       u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+       u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+       u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+       u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+       u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+       u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+       u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
+       u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER];   /*For HT 40MHZ pwr */
+       u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER];   /*For HT 40MHZ pwr */
+
+       u8 internal_pa_5g[2];   /* pathA / pathB */
+       u8 eeprom_c9;
+       u8 eeprom_cc;
 
        /*For power group */
-       u8 pwrgroup_ht20[2][14];
-       u8 pwrgroup_ht40[2][14];
-
-       char txpwr_ht20diff[2][14];     /*HT 20<->40 Pwr diff */
-       u8 txpwr_legacyhtdiff[2][14];   /*For HT<->legacy pwr diff */
+       u8 eeprom_pwrgroup[2][3];
+       u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+       u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+       char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
+       /*For HT<->legacy pwr diff*/
+       u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+       u8 txpwr_safetyflag;                    /* Band edge enable flag */
+       u16 eeprom_txpowerdiff;
+       u8 legacy_httxpowerdiff;        /* Legacy to HT rate power diff */
+       u8 antenna_txpwdiff[3];
 
        u8 eeprom_regulatory;
        u8 eeprom_thermalmeter;
-       /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
-       u8 thermalmeter[2];
+       u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
+       u16 tssi_13dbm;
+       u8 crystalcap;          /* CrystalCap. */
+       u8 delta_iqk;
+       u8 delta_lck;
 
        u8 legacy_ht_txpowerdiff;       /*Legacy to HT rate power diff */
-       bool b_apk_thermalmeterignore;
+       bool apk_thermalmeterignore;
+
+       bool b1x1_recvcombine;
+       bool b1ss_support;
+
+       /*channel plan */
+       u8 channel_plan;
 };
 
 struct rtl_ps_ctl {
+       bool pwrdomain_protect;
        bool set_rfpowerstate_inprogress;
-       bool b_in_powersavemode;
+       bool in_powersavemode;
        bool rfchange_inprogress;
-       bool b_swrf_processing;
-       bool b_hwradiooff;
-
-       u32 last_sleep_jiffies;
-       u32 last_awake_jiffies;
-       u32 last_delaylps_stamp_jiffies;
+       bool swrf_processing;
+       bool hwradiooff;
 
        /*
         * just for PCIE ASPM
         * If it supports ASPM, Offset[560h] = 0x40,
         * otherwise Offset[560h] = 0x00.
         * */
-       bool b_support_aspm;
-       bool b_support_backdoor;
+       bool support_aspm;
+       bool support_backdoor;
 
        /*for LPS */
        enum rt_psmode dot11_psmode;    /*Power save mode configured. */
-       bool b_leisure_ps;
-       bool b_fwctrl_lps;
+       bool swctrl_lps;
+       bool leisure_ps;
+       bool fwctrl_lps;
        u8 fwctrl_psmode;
        /*For Fw control LPS mode */
-       u8 b_reg_fwctrl_lps;
+       u8 reg_fwctrl_lps;
        /*Record Fw PS mode status. */
-       bool b_fw_current_inpsmode;
+       bool fw_current_inpsmode;
        u8 reg_max_lps_awakeintvl;
        bool report_linked;
 
        /*for IPS */
-       bool b_inactiveps;
+       bool inactiveps;
 
        u32 rfoff_reason;
 
@@ -1011,8 +1202,26 @@ struct rtl_ps_ctl {
        /*just for PCIE ASPM */
        u8 const_amdpci_aspm;
 
+       bool pwrdown_mode;
+
        enum rf_pwrstate inactive_pwrstate;
        enum rf_pwrstate rfpwr_state;   /*cur power state */
+
+       /* for SW LPS*/
+       bool sw_ps_enabled;
+       bool state;
+       bool state_inap;
+       bool multi_buffered;
+       u16 nullfunc_seq;
+       unsigned int dtim_counter;
+       unsigned int sleep_ms;
+       unsigned long last_sleep_jiffies;
+       unsigned long last_awake_jiffies;
+       unsigned long last_delaylps_stamp_jiffies;
+       unsigned long last_dtim;
+       unsigned long last_beacon;
+       unsigned long last_action;
+       unsigned long last_slept;
 };
 
 struct rtl_stats {
@@ -1038,10 +1247,10 @@ struct rtl_stats {
        s32 recvsignalpower;
        s8 rxpower;             /*in dBm Translate from PWdB */
        u8 signalstrength;      /*in 0-100 index. */
-       u16 b_hwerror:1;
-       u16 b_crc:1;
-       u16 b_icv:1;
-       u16 b_shortpreamble:1;
+       u16 hwerror:1;
+       u16 crc:1;
+       u16 icv:1;
+       u16 shortpreamble:1;
        u16 antenna:1;
        u16 decrypted:1;
        u16 wakeup:1;
@@ -1050,15 +1259,16 @@ struct rtl_stats {
 
        u8 rx_drvinfo_size;
        u8 rx_bufshift;
-       bool b_isampdu;
+       bool isampdu;
+       bool isfirst_ampdu;
        bool rx_is40Mhzpacket;
        u32 rx_pwdb_all;
        u8 rx_mimo_signalstrength[4];   /*in 0~100 index */
        s8 rx_mimo_signalquality[2];
-       bool b_packet_matchbssid;
-       bool b_is_cck;
-       bool b_packet_toself;
-       bool b_packet_beacon;   /*for rssi */
+       bool packet_matchbssid;
+       bool is_cck;
+       bool packet_toself;
+       bool packet_beacon;     /*for rssi */
        char cck_adc_pwdb[4];   /*for rx path selection */
 };
 
@@ -1069,23 +1279,23 @@ struct rt_link_detect {
        u32 num_tx_inperiod;
        u32 num_rx_inperiod;
 
-       bool b_busytraffic;
-       bool b_higher_busytraffic;
-       bool b_higher_busyrxtraffic;
+       bool busytraffic;
+       bool higher_busytraffic;
+       bool higher_busyrxtraffic;
 };
 
 struct rtl_tcb_desc {
-       u8 b_packet_bw:1;
-       u8 b_multicast:1;
-       u8 b_broadcast:1;
-
-       u8 b_rts_stbc:1;
-       u8 b_rts_enable:1;
-       u8 b_cts_enable:1;
-       u8 b_rts_use_shortpreamble:1;
-       u8 b_rts_use_shortgi:1;
+       u8 packet_bw:1;
+       u8 multicast:1;
+       u8 broadcast:1;
+
+       u8 rts_stbc:1;
+       u8 rts_enable:1;
+       u8 cts_enable:1;
+       u8 rts_use_shortpreamble:1;
+       u8 rts_use_shortgi:1;
        u8 rts_sc:1;
-       u8 b_rts_bw:1;
+       u8 rts_bw:1;
        u8 rts_rate;
 
        u8 use_shortgi:1;
@@ -1096,20 +1306,34 @@ struct rtl_tcb_desc {
        u8 ratr_index;
        u8 mac_id;
        u8 hw_rate;
+
+       u8 last_inipkt:1;
+       u8 cmd_or_init:1;
+       u8 queue_index;
+
+       /* early mode */
+       u8 empkt_num;
+       /* The max value by HW */
+       u32 empkt_len[5];
 };
 
 struct rtl_hal_ops {
        int (*init_sw_vars) (struct ieee80211_hw *hw);
        void (*deinit_sw_vars) (struct ieee80211_hw *hw);
+       void (*read_chip_version)(struct ieee80211_hw *hw);
        void (*read_eeprom_info) (struct ieee80211_hw *hw);
        void (*interrupt_recognized) (struct ieee80211_hw *hw,
                                      u32 *p_inta, u32 *p_intb);
        int (*hw_init) (struct ieee80211_hw *hw);
        void (*hw_disable) (struct ieee80211_hw *hw);
+       void (*hw_suspend) (struct ieee80211_hw *hw);
+       void (*hw_resume) (struct ieee80211_hw *hw);
        void (*enable_interrupt) (struct ieee80211_hw *hw);
        void (*disable_interrupt) (struct ieee80211_hw *hw);
        int (*set_network_type) (struct ieee80211_hw *hw,
                                 enum nl80211_iftype type);
+       void (*set_chk_bssid)(struct ieee80211_hw *hw,
+                               bool check_bssid);
        void (*set_bw_mode) (struct ieee80211_hw *hw,
                             enum nl80211_channel_type ch_type);
         u8(*switch_channel) (struct ieee80211_hw *hw);
@@ -1126,23 +1350,26 @@ struct rtl_hal_ops {
                              struct ieee80211_hdr *hdr, u8 *pdesc_tx,
                              struct ieee80211_tx_info *info,
                              struct sk_buff *skb, unsigned int queue_index);
+       void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc,
+                                 u32 buffer_len, bool bIsPsPoll);
        void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
-                                bool b_firstseg, bool b_lastseg,
+                                bool firstseg, bool lastseg,
                                 struct sk_buff *skb);
-        bool(*query_rx_desc) (struct ieee80211_hw *hw,
+       bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       bool (*query_rx_desc) (struct ieee80211_hw *hw,
                               struct rtl_stats *stats,
                               struct ieee80211_rx_status *rx_status,
                               u8 *pdesc, struct sk_buff *skb);
        void (*set_channel_access) (struct ieee80211_hw *hw);
-        bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
+       bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
        void (*dm_watchdog) (struct ieee80211_hw *hw);
        void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
-        bool(*set_rf_power_state) (struct ieee80211_hw *hw,
+       bool (*set_rf_power_state) (struct ieee80211_hw *hw,
                                    enum rf_pwrstate rfpwr_state);
        void (*led_control) (struct ieee80211_hw *hw,
                             enum led_ctl_mode ledaction);
        void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
-        u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+       u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
        void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
        void (*enable_hw_sec) (struct ieee80211_hw *hw);
        void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1150,22 +1377,36 @@ struct rtl_hal_ops {
                         bool is_wepkey, bool clear_all);
        void (*init_sw_leds) (struct ieee80211_hw *hw);
        void (*deinit_sw_leds) (struct ieee80211_hw *hw);
-        u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+       u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
        void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
                           u32 data);
-        u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
+       u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                          u32 regaddr, u32 bitmask);
        void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data);
+       bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
+       void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
+                                           u8 *powerlevel);
+       void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
+                                            u8 *ppowerlevel, u8 channel);
+       bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
+                                          u8 configtype);
+       bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
+                                            u8 configtype);
+       void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
+       void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
+       void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
 };
 
 struct rtl_intf_ops {
        /*com */
+       void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
        int (*adapter_start) (struct ieee80211_hw *hw);
        void (*adapter_stop) (struct ieee80211_hw *hw);
 
        int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
        int (*reset_trx_ring) (struct ieee80211_hw *hw);
+       bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
 
        /*pci */
        void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1179,11 +1420,36 @@ struct rtl_mod_params {
        int sw_crypto;
 };
 
+struct rtl_hal_usbint_cfg {
+       /* data - rx */
+       u32 in_ep_num;
+       u32 rx_urb_num;
+       u32 rx_max_size;
+
+       /* op - rx */
+       void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+       void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+                                    struct sk_buff_head *);
+
+       /* tx */
+       void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+       int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *,
+                              struct sk_buff *);
+       struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+                                               struct sk_buff_head *);
+
+       /* endpoint mapping */
+       int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
+       u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+};
+
 struct rtl_hal_cfg {
+       u8 bar_id;
        char *name;
        char *fw_name;
        struct rtl_hal_ops *ops;
        struct rtl_mod_params *mod_params;
+       struct rtl_hal_usbint_cfg *usb_interface_cfg;
 
        /*this map used for some registers or vars
           defined int HAL but used in MAIN */
@@ -1202,6 +1468,11 @@ struct rtl_locks {
        spinlock_t rf_ps_lock;
        spinlock_t rf_lock;
        spinlock_t lps_lock;
+       spinlock_t waitq_lock;
+       spinlock_t tx_urb_lock;
+
+       /*Dual mac*/
+       spinlock_t cck_and_rw_pagea_lock;
 };
 
 struct rtl_works {
@@ -1218,12 +1489,20 @@ struct rtl_works {
        struct workqueue_struct *rtl_wq;
        struct delayed_work watchdog_wq;
        struct delayed_work ips_nic_off_wq;
+
+       /* For SW LPS */
+       struct delayed_work ps_work;
+       struct delayed_work ps_rfon_wq;
 };
 
 struct rtl_debug {
        u32 dbgp_type[DBGP_TYPE_MAX];
        u32 global_debuglevel;
        u64 global_debugcomponents;
+
+       /* add for proc debug */
+       struct proc_dir_entry *proc_dir;
+       char proc_name[20];
 };
 
 struct rtl_priv {
@@ -1274,6 +1553,91 @@ struct rtl_priv {
 #define rtl_efuse(rtlpriv)     (&((rtlpriv)->efuse))
 #define rtl_psc(rtlpriv)       (&((rtlpriv)->psc))
 
+
+/***************************************
+    Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+       ANT_X2 = 0,
+       ANT_X1 = 1,
+};
+
+enum bt_co_type {
+       BT_2WIRE = 0,
+       BT_ISSC_3WIRE = 1,
+       BT_ACCEL = 2,
+       BT_CSR_BC4 = 3,
+       BT_CSR_BC8 = 4,
+       BT_RTL8756 = 5,
+};
+
+enum bt_cur_state {
+       BT_OFF = 0,
+       BT_ON = 1,
+};
+
+enum bt_service_type {
+       BT_SCO = 0,
+       BT_A2DP = 1,
+       BT_HID = 2,
+       BT_HID_IDLE = 3,
+       BT_SCAN = 4,
+       BT_IDLE = 5,
+       BT_OTHER_ACTION = 6,
+       BT_BUSY = 7,
+       BT_OTHERBUSY = 8,
+       BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+       BT_RADIO_SHARED = 0,
+       BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+       /* EEPROM BT info. */
+       u8 eeprom_bt_coexist;
+       u8 eeprom_bt_type;
+       u8 eeprom_bt_ant_num;
+       u8 eeprom_bt_ant_isolation;
+       u8 eeprom_bt_radio_shared;
+
+       u8 bt_coexistence;
+       u8 bt_ant_num;
+       u8 bt_coexist_type;
+       u8 bt_state;
+       u8 bt_cur_state;        /* 0:on, 1:off */
+       u8 bt_ant_isolation;    /* 0:good, 1:bad */
+       u8 bt_pape_ctrl;        /* 0:SW, 1:SW/HW dynamic */
+       u8 bt_service;
+       u8 bt_radio_shared_type;
+       u8 bt_rfreg_origin_1e;
+       u8 bt_rfreg_origin_1f;
+       u8 bt_rssi_state;
+       u32 ratio_tx;
+       u32 ratio_pri;
+       u32 bt_edca_ul;
+       u32 bt_edca_dl;
+
+       bool b_init_set;
+       bool b_bt_busy_traffic;
+       bool b_bt_traffic_mode_set;
+       bool b_bt_non_traffic_mode_set;
+
+       bool b_fw_coexist_all_off;
+       bool b_sw_coexist_all_off;
+       u32 current_state;
+       u32 previous_state;
+       u8 bt_pre_rssi_state;
+
+       u8 b_reg_bt_iso;
+       u8 b_reg_bt_sco;
+
+};
+
+
 /****************************************
        mem access macro define start
        Call endian free function when
@@ -1281,7 +1645,7 @@ struct rtl_priv {
        2. Before write integer to IO.
        3. After read integer from IO.
 ****************************************/
-/* Convert little data endian to host */
+/* Convert little data endian to host ordering */
 #define EF1BYTE(_val)          \
        ((u8)(_val))
 #define EF2BYTE(_val)          \
@@ -1289,27 +1653,21 @@ struct rtl_priv {
 #define EF4BYTE(_val)          \
        (le32_to_cpu(_val))
 
-/* Read data from memory */
-#define READEF1BYTE(_ptr)      \
-       EF1BYTE(*((u8 *)(_ptr)))
+/* Read le16 data from memory and convert to host ordering */
 #define READEF2BYTE(_ptr)      \
        EF2BYTE(*((u16 *)(_ptr)))
-#define READEF4BYTE(_ptr)      \
-       EF4BYTE(*((u32 *)(_ptr)))
 
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val)       \
-       (*((u8 *)(_ptr))) = EF1BYTE(_val)
+/* Write le16 data to memory in host ordering */
 #define WRITEEF2BYTE(_ptr, _val)       \
        (*((u16 *)(_ptr))) = EF2BYTE(_val)
-#define WRITEEF4BYTE(_ptr, _val)       \
-       (*((u32 *)(_ptr))) = EF4BYTE(_val)
-
-/*Example:
-BIT_LEN_MASK_32(0) => 0x00000000
-BIT_LEN_MASK_32(1) => 0x00000001
-BIT_LEN_MASK_32(2) => 0x00000003
-BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
 #define BIT_LEN_MASK_32(__bitlen)       \
        (0xFFFFFFFF >> (32 - (__bitlen)))
 #define BIT_LEN_MASK_16(__bitlen)       \
@@ -1317,9 +1675,11 @@ BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
 #define BIT_LEN_MASK_8(__bitlen) \
        (0xFF >> (8 - (__bitlen)))
 
-/*Example:
-BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
-BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
 #define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
        (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
 #define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
@@ -1328,8 +1688,9 @@ BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
        (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
 
 /*Description:
-Return 4-byte value in host byte ordering from
-4-byte pointer in little-endian system.*/
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
 #define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
        (EF4BYTE(*((u32 *)(__pstart))))
 #define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
@@ -1337,28 +1698,10 @@ Return 4-byte value in host byte ordering from
 #define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
        (EF1BYTE(*((u8 *)(__pstart))))
 
-/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset))  & \
-               BIT_LEN_MASK_32(__bitlen) \
-       )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
-               BIT_LEN_MASK_16(__bitlen) \
-       )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
-       ( \
-               (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
-               BIT_LEN_MASK_8(__bitlen) \
-       )
-
-/*Description:
-Mask subfield (continuous bits in little-endian) of 4-byte value
-and return the result in 4-byte value in host byte ordering.*/
+/* Description:
+ * Mask subfield (continuous bits in little-endian) of 4-byte value
+ * and return the result in 4-byte value in host byte ordering.
+ */
 #define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
        ( \
                LE_P4BYTE_TO_HOST_4BYTE(__pstart)  & \
@@ -1375,20 +1718,9 @@ and return the result in 4-byte value in host byte ordering.*/
                (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
        )
 
-/*Description:
-Set subfield of little-endian 4-byte value to specified value. */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u32 *)(__pstart)) = EF4BYTE \
-       ( \
-               LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
-               ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
-       );
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u16 *)(__pstart)) = EF2BYTE \
-       ( \
-               LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
-               ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
-       );
+/* Description:
+ * Set subfield of little-endian 4-byte value to specified value.
+ */
 #define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
        *((u8 *)(__pstart)) = EF1BYTE \
        ( \
@@ -1400,13 +1732,14 @@ Set subfield of little-endian 4-byte value to specified value.  */
        mem access macro define end
 ****************************************/
 
-#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
+#define byte(x, n) ((x >> (8 * n)) & 0xff)
+
 #define RTL_WATCH_DOG_TIME     2000
 #define MSECS(t)               msecs_to_jiffies(t)
-#define WLAN_FC_GET_VERS(fc)   ((fc) & IEEE80211_FCTL_VERS)
-#define WLAN_FC_GET_TYPE(fc)   ((fc) & IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc)  ((fc) & IEEE80211_FCTL_STYPE)
-#define WLAN_FC_MORE_DATA(fc)  ((fc) & IEEE80211_FCTL_MOREDATA)
+#define WLAN_FC_GET_VERS(fc)   (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc)   (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc)  (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc)  (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
 #define SEQ_TO_SN(seq)         (((seq) & IEEE80211_SCTL_SEQ) >> 4)
 #define SN_TO_SEQ(ssn)         (((ssn) << 4) & IEEE80211_SCTL_SEQ)
 #define MAX_SN                 ((IEEE80211_SCTL_SEQ) >> 4)
@@ -1420,6 +1753,8 @@ Set subfield of little-endian 4-byte value to specified value.    */
 #define        RT_RF_OFF_LEVL_FW_32K           BIT(5)  /*FW in 32k */
 /*Always enable ASPM and Clock Req in initialization.*/
 #define        RT_RF_PS_LEVEL_ALWAYS_ASPM      BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define        RT_PS_LEVEL_ASPM                BIT(7)
 /*When LPS is on, disable 2R if no packet is received or transmittd.*/
 #define        RT_RF_LPS_DISALBE_2R            BIT(30)
 #define        RT_RF_LPS_LEVEL_ASPM            BIT(31) /*LPS with ASPM */
@@ -1433,15 +1768,6 @@ Set subfield of little-endian 4-byte value to specified value.   */
 #define container_of_dwork_rtl(x, y, z) \
        container_of(container_of(x, struct delayed_work, work), y, z)
 
-#define FILL_OCTET_STRING(_os, _octet, _len)   \
-               (_os).octet = (u8 *)(_octet);           \
-               (_os).length = (_len);
-
-#define CP_MACADDR(des, src)   \
-       ((des)[0] = (src)[0], (des)[1] = (src)[1],\
-       (des)[2] = (src)[2], (des)[3] = (src)[3],\
-       (des)[4] = (src)[4], (des)[5] = (src)[5])
-
 static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
 {
        return rtlpriv->io.read8_sync(rtlpriv, addr);
index 64a0214cfb29b2b23df56b988e882841e2743edb..ef8370edace709f8c1fb3437403fe4447f7d7d12 100644 (file)
@@ -776,6 +776,31 @@ out:
        return ret;
 }
 
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+                       u8 depth, enum wl1251_acx_low_rssi_type type)
+{
+       struct acx_low_rssi *rssi;
+       int ret;
+
+       wl1251_debug(DEBUG_ACX, "acx low rssi");
+
+       rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
+       if (!rssi)
+               return -ENOMEM;
+
+       rssi->threshold = threshold;
+       rssi->weight = weight;
+       rssi->depth = depth;
+       rssi->type = type;
+
+       ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
+       if (ret < 0)
+               wl1251_warning("failed to set low rssi threshold: %d", ret);
+
+       kfree(rssi);
+       return ret;
+}
+
 int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
 {
        struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
        return ret;
 }
 
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+                         u8 max_consecutive)
+{
+       struct wl1251_acx_bet_enable *acx;
+       int ret;
+
+       wl1251_debug(DEBUG_ACX, "acx bet enable");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->enable = mode;
+       acx->max_consecutive = max_consecutive;
+
+       ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1251_warning("wl1251 acx bet enable failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifs, u16 txop)
 {
index efcc3aaca14f53d84675e5a4044d48691ac7c853..c2ba100f9b1ab3e659301f88328a4274beaf2047 100644 (file)
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
        u8 pad[2];
 } __packed;
 
+enum wl1251_acx_low_rssi_type {
+       /*
+        * The event is a "Level" indication which keeps triggering
+        * as long as the average RSSI is below the threshold.
+        */
+       WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
+
+       /*
+        * The event is an "Edge" indication which triggers
+        * only when the RSSI threshold is crossed from above.
+        */
+       WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
+};
+
+struct acx_low_rssi {
+       struct acx_header header;
+
+       /*
+        * The threshold (in dBm) below (or above after low rssi
+        * indication) which the firmware generates an interrupt to the
+        * host. This parameter is signed.
+        */
+       s8 threshold;
+
+       /*
+        * The weight of the current RSSI sample, before adding the new
+        * sample, that is used to calculate the average RSSI.
+        */
+       u8 weight;
+
+       /*
+        * The number of Beacons/Probe response frames that will be
+        * received before issuing the Low or Regained RSSI event.
+        */
+       u8 depth;
+
+       /*
+        * Configures how the Low RSSI Event is triggered. Refer to
+        * enum wl1251_acx_low_rssi_type for more.
+        */
+       u8 type;
+} __packed;
+
 struct acx_beacon_filter_option {
        struct acx_header header;
 
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
        u8  padding;
 } __packed;
 
+enum wl1251_acx_bet_mode {
+       WL1251_ACX_BET_DISABLE = 0,
+       WL1251_ACX_BET_ENABLE = 1,
+};
+
+struct wl1251_acx_bet_enable {
+       struct acx_header header;
+
+       /*
+        * Specifies if beacon early termination procedure is enabled or
+        * disabled, see enum wl1251_acx_bet_mode.
+        */
+       u8 enable;
+
+       /*
+        * Specifies the maximum number of consecutive beacons that may be
+        * early terminated. After this number is reached at least one full
+        * beacon must be correctly received in FW before beacon ET
+        * resumes. Range 0 - 255.
+        */
+       u8 max_consecutive;
+
+       u8 padding[2];
+} __packed;
+
 struct wl1251_acx_ac_cfg {
        struct acx_header header;
 
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
 int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
 int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
 int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+                       u8 depth, enum wl1251_acx_low_rssi_type type);
 int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
 int wl1251_acx_cts_protect(struct wl1251 *wl,
                            enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
 int wl1251_acx_rate_policies(struct wl1251 *wl);
 int wl1251_acx_mem_cfg(struct wl1251 *wl);
 int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+                         u8 max_consecutive);
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifs, u16 txop);
 int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
index 712372e50a8794ae550f91709a31589d83e4b90a..dfc4579acb06f6682395e3f2b55dcae3a8b1d081 100644 (file)
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                }
        }
 
+       if (wl->vif && wl->rssi_thold) {
+               if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
+                       wl1251_debug(DEBUG_EVENT,
+                                    "ROAMING_TRIGGER_LOW_RSSI_EVENT");
+                       ieee80211_cqm_rssi_notify(wl->vif,
+                               NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+                               GFP_KERNEL);
+               }
+
+               if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
+                       wl1251_debug(DEBUG_EVENT,
+                                    "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
+                       ieee80211_cqm_rssi_notify(wl->vif,
+                               NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+                               GFP_KERNEL);
+               }
+       }
+
        return 0;
 }
 
index 40372bac948291106197a62475b6e5faf2ba1464..12c9e635a6d6d7bf62b543fb6e14b95ad371b1a3 100644 (file)
@@ -375,7 +375,7 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1251 *wl = hw->priv;
        unsigned long flags;
@@ -401,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                wl->tx_queue_stopped = true;
                spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
-
-       return NETDEV_TX_OK;
 }
 
 static int wl1251_op_start(struct ieee80211_hw *hw)
@@ -502,6 +500,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
        wl->psm = 0;
        wl->tx_queue_stopped = false;
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+       wl->rssi_thold = 0;
        wl->channel = WL1251_DEFAULT_CHANNEL;
 
        wl1251_debugfs_reset(wl);
@@ -959,6 +958,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
+       if (changed & BSS_CHANGED_CQM) {
+               ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
+                                         WL1251_DEFAULT_LOW_RSSI_WEIGHT,
+                                         WL1251_DEFAULT_LOW_RSSI_DEPTH,
+                                         WL1251_ACX_LOW_RSSI_TYPE_EDGE);
+               if (ret < 0)
+                       goto out;
+               wl->rssi_thold = bss_conf->cqm_rssi_thold;
+       }
+
        if (changed & BSS_CHANGED_BSSID) {
                memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
 
@@ -1313,9 +1322,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
        wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
                IEEE80211_HW_SUPPORTS_PS |
                IEEE80211_HW_BEACON_FILTER |
-               IEEE80211_HW_SUPPORTS_UAPSD;
+               IEEE80211_HW_SUPPORTS_UAPSD |
+               IEEE80211_HW_SUPPORTS_CQM_RSSI;
 
-       wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+       wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                        BIT(NL80211_IFTYPE_ADHOC);
        wl->hw->wiphy->max_scan_ssids = 1;
        wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
 
@@ -1377,6 +1388,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
        wl->psm_requested = false;
        wl->tx_queue_stopped = false;
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+       wl->rssi_thold = 0;
        wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
        wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
        wl->vif = NULL;
index 5ed47c8373d2ae846db307b9d7306781f69564a6..9ba23ede51bd964c26412ad3a79b48b12654c4bb 100644 (file)
@@ -153,6 +153,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
                if (ret < 0)
                        return ret;
 
+               ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
+                                           WL1251_DEFAULT_BET_CONSECUTIVE);
+               if (ret < 0)
+                       return ret;
+
                ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
                if (ret < 0)
                        return ret;
@@ -170,6 +175,12 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
                if (ret < 0)
                        return ret;
 
+               /* disable BET */
+               ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
+                                           WL1251_DEFAULT_BET_CONSECUTIVE);
+               if (ret < 0)
+                       return ret;
+
                /* disable beacon filtering */
                ret = wl1251_acx_beacon_filter_opt(wl, false);
                if (ret < 0)
index efa53607d5c9ac9e5ef2f572739f46b562334117..c1b3b3f03da257ca567c79a0dfcdc4af47fd387c 100644 (file)
@@ -78,9 +78,10 @@ static void wl1251_rx_status(struct wl1251 *wl,
         */
        wl->noise = desc->rssi - desc->snr / 2;
 
-       status->freq = ieee80211_channel_to_frequency(desc->channel);
+       status->freq = ieee80211_channel_to_frequency(desc->channel,
+                                                     status->band);
 
-       status->flag |= RX_FLAG_TSFT;
+       status->flag |= RX_FLAG_MACTIME_MPDU;
 
        if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
                status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
        if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
                status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
+       switch (desc->rate) {
+               /* skip 1 and 12 Mbps because they have same value 0x0a */
+       case RATE_2MBPS:
+               status->rate_idx = 1;
+               break;
+       case RATE_5_5MBPS:
+               status->rate_idx = 2;
+               break;
+       case RATE_11MBPS:
+               status->rate_idx = 3;
+               break;
+       case RATE_6MBPS:
+               status->rate_idx = 4;
+               break;
+       case RATE_9MBPS:
+               status->rate_idx = 5;
+               break;
+       case RATE_18MBPS:
+               status->rate_idx = 7;
+               break;
+       case RATE_24MBPS:
+               status->rate_idx = 8;
+               break;
+       case RATE_36MBPS:
+               status->rate_idx = 9;
+               break;
+       case RATE_48MBPS:
+               status->rate_idx = 10;
+               break;
+       case RATE_54MBPS:
+               status->rate_idx = 11;
+               break;
+       }
+
+       /* for 1 and 12 Mbps we have to check the modulation */
+       if (desc->rate == RATE_1MBPS) {
+               if (!(desc->mod_pre & OFDM_RATE_BIT))
+                       /* CCK -> RATE_1MBPS */
+                       status->rate_idx = 0;
+               else
+                       /* OFDM -> RATE_12MBPS */
+                       status->rate_idx = 6;
+       }
 
-       /* FIXME: set status->rate_idx */
+       if (desc->mod_pre & SHORT_PREAMBLE_BIT)
+               status->flag |= RX_FLAG_SHORTPRE;
 }
 
 static void wl1251_rx_body(struct wl1251 *wl,
index 554b4f9a3d3ecf3141ebdec2e0773fb967fcefa6..28121c590a2b1a62effa4c17b335b83264c99acf 100644 (file)
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
                wl1251_debug(DEBUG_TX, "skb offset %d", offset);
 
                /* check whether the current skb can be used */
-               if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
-                       unsigned char *src = skb->data;
+               if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
+                       struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
+                                                                GFP_KERNEL);
+
+                       if (unlikely(newskb == NULL)) {
+                               wl1251_error("Can't allocate skb!");
+                               return -EINVAL;
+                       }
 
-                       /* align the buffer on a 4-byte boundary */
+                       tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
+
+                       dev_kfree_skb_any(skb);
+                       wl->tx_frames[tx_hdr->id] = skb = newskb;
+
+                       offset = (4 - (long)skb->data) & 0x03;
+                       wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
+               }
+
+               /* align the buffer on a 4-byte boundary */
+               if (offset) {
+                       unsigned char *src = skb->data;
                        skb_reserve(skb, offset);
                        memmove(skb->data, src, skb->len);
                        tx_hdr = (struct tx_double_buffer_desc *) skb->data;
-               } else {
-                       wl1251_info("No handler, fixme!");
-                       return -EINVAL;
                }
        }
 
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
 {
        struct ieee80211_tx_info *info;
        struct sk_buff *skb;
-       int hdrlen, ret;
+       int hdrlen;
        u8 *frame;
 
        skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
        ieee80211_tx_status(wl->hw, skb);
 
        wl->tx_frames[result->id] = NULL;
-
-       if (wl->tx_queue_stopped) {
-               wl1251_debug(DEBUG_TX, "cb: queue was stopped");
-
-               skb = skb_dequeue(&wl->tx_queue);
-
-               /* The skb can be NULL because tx_work might have been
-                  scheduled before the queue was stopped making the
-                  queue empty */
-
-               if (skb) {
-                       ret = wl1251_tx_frame(wl, skb);
-                       if (ret == -EBUSY) {
-                               /* firmware buffer is still full */
-                               wl1251_debug(DEBUG_TX, "cb: fw buffer "
-                                            "still full");
-                               skb_queue_head(&wl->tx_queue, skb);
-                               return;
-                       } else if (ret < 0) {
-                               dev_kfree_skb(skb);
-                               return;
-                       }
-               }
-
-               wl1251_debug(DEBUG_TX, "cb: waking queues");
-               ieee80211_wake_queues(wl->hw);
-               wl->tx_queue_stopped = false;
-       }
 }
 
 /* Called upon reception of a TX complete interrupt */
 void wl1251_tx_complete(struct wl1251 *wl)
 {
-       int i, result_index, num_complete = 0;
+       int i, result_index, num_complete = 0, queue_len;
        struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
        unsigned long flags;
 
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
                }
        }
 
-       if (wl->tx_queue_stopped
-           &&
-           skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
+       queue_len = skb_queue_len(&wl->tx_queue);
 
-               /* firmware buffer has space, restart queues */
+       if ((num_complete > 0) && (queue_len > 0)) {
+               /* firmware buffer has space, reschedule tx_work */
+               wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       }
+
+       if (wl->tx_queue_stopped &&
+           queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+               /* tx_queue has space, restart queues */
                wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
                spin_lock_irqsave(&wl->wl_lock, flags);
                ieee80211_wake_queues(wl->hw);
                wl->tx_queue_stopped = false;
                spin_unlock_irqrestore(&wl->wl_lock, flags);
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
-
        }
 
        /* Every completed frame needs to be acknowledged */
index c0ce2c8b43b8715fbb2c502f4c5bce710e901da7..bb23cd522b22daa9da053969afa98adfcf0bd960 100644 (file)
@@ -370,6 +370,8 @@ struct wl1251 {
        /* in dBm */
        int power_level;
 
+       int rssi_thold;
+
        struct wl1251_stats stats;
        struct wl1251_debugfs debugfs;
 
@@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
 
 #define WL1251_DEFAULT_CHANNEL 0
 
+#define WL1251_DEFAULT_BET_CONSECUTIVE 10
+
 #define CHIP_ID_1251_PG10                 (0x7010101)
 #define CHIP_ID_1251_PG11                 (0x7020101)
 #define CHIP_ID_1251_PG12                 (0x7030101)
@@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
 #define WL1251_PART_WORK_REG_START     REGISTERS_BASE
 #define WL1251_PART_WORK_REG_SIZE      REGISTERS_WORK_SIZE
 
+#define WL1251_DEFAULT_LOW_RSSI_WEIGHT          10
+#define WL1251_DEFAULT_LOW_RSSI_DEPTH           10
+
 #endif
index 0e65bce457d608784fb7676fc8d4a90e7b87a9e3..692ebff38fc89ceb67e342a2f08c20f8f0284532 100644 (file)
@@ -54,7 +54,7 @@ config WL12XX_SDIO
 
 config WL12XX_SDIO_TEST
        tristate "TI wl12xx SDIO testing support"
-       depends on WL12XX && MMC
+       depends on WL12XX && MMC && WL12XX_SDIO
        default n
        ---help---
          This module adds support for the SDIO bus testing with the
index cc4068d2b4a8443ca814bd25cf9c246f2095bf3c..3badc6bb7866027d2415f3318afb9e95958ce280 100644 (file)
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
        return 0;
 }
 
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
 {
-       struct acx_rate_policy *acx;
-       struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+       struct acx_sta_rate_policy *acx;
+       struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
        int idx = 0;
        int ret = 0;
 
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
 
        acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
 
+       wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
+               acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
+               acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
+
        ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
        if (ret < 0) {
                wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
        return ret;
 }
 
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+                     u8 idx)
+{
+       struct acx_ap_rate_policy *acx;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_ACX, "acx ap rate policy");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
+       acx->rate_policy.short_retry_limit = c->short_retry_limit;
+       acx->rate_policy.long_retry_limit = c->long_retry_limit;
+       acx->rate_policy.aflags = c->aflags;
+
+       acx->rate_policy_idx = cpu_to_le32(idx);
+
+       ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("Setting of ap rate policy failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifsn, u16 txop)
 {
@@ -915,9 +951,9 @@ out:
        return ret;
 }
 
-int wl1271_acx_mem_cfg(struct wl1271 *wl)
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
 {
-       struct wl1271_acx_config_memory *mem_conf;
+       struct wl1271_acx_ap_config_memory *mem_conf;
        int ret;
 
        wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
        }
 
        /* memory config */
-       mem_conf->num_stations = DEFAULT_NUM_STATIONS;
-       mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
-       mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
-       mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
+       mem_conf->num_stations = wl->conf.mem.num_stations;
+       mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+       mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+       mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
        mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
 
        ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +983,45 @@ out:
        return ret;
 }
 
-int wl1271_acx_init_mem_config(struct wl1271 *wl)
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
 {
+       struct wl1271_acx_sta_config_memory *mem_conf;
        int ret;
 
-       ret = wl1271_acx_mem_cfg(wl);
-       if (ret < 0)
-               return ret;
+       wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
+
+       mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
+       if (!mem_conf) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* memory config */
+       mem_conf->num_stations = wl->conf.mem.num_stations;
+       mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+       mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+       mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
+       mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
+       mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory;
+       mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks;
+       mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks;
+       mem_conf->tx_min = wl->conf.mem.tx_min;
+
+       ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
+                                  sizeof(*mem_conf));
+       if (ret < 0) {
+               wl1271_warning("wl1271 mem config failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(mem_conf);
+       return ret;
+}
+
+int wl1271_acx_init_mem_config(struct wl1271 *wl)
+{
+       int ret;
 
        wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
                                     GFP_KERNEL);
@@ -1233,6 +1301,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
        struct wl1271_acx_ht_capabilities *acx;
        u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        int ret = 0;
+       u32 ht_capabilites = 0;
 
        wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
 
@@ -1244,27 +1313,26 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
 
        /* Allow HT Operation ? */
        if (allow_ht_operation) {
-               acx->ht_capabilites =
+               ht_capabilites =
                        WL1271_ACX_FW_CAP_HT_OPERATION;
                if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
-                       acx->ht_capabilites |=
+                       ht_capabilites |=
                                WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
                if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
-                       acx->ht_capabilites |=
+                       ht_capabilites |=
                                WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
                if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
-                       acx->ht_capabilites |=
+                       ht_capabilites |=
                                WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
 
                /* get data from A-MPDU parameters field */
                acx->ampdu_max_length = ht_cap->ampdu_factor;
                acx->ampdu_min_spacing = ht_cap->ampdu_density;
-
-               memcpy(acx->mac_address, mac_address, ETH_ALEN);
-       } else { /* HT operations are not allowed */
-               acx->ht_capabilites = 0;
        }
 
+       memcpy(acx->mac_address, mac_address, ETH_ALEN);
+       acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+
        ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
        if (ret < 0) {
                wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1309,6 +1377,91 @@ out:
        return ret;
 }
 
+/* Configure BA session initiator/receiver parameters setting in the FW. */
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+                              enum ieee80211_back_parties direction,
+                              u8 tid_index, u8 policy)
+{
+       struct wl1271_acx_ba_session_policy *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx ba session setting");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* ANY role */
+       acx->role_id = 0xff;
+       acx->tid = tid_index;
+       acx->enable = policy;
+       acx->ba_direction = direction;
+
+       switch (direction) {
+       case WLAN_BACK_INITIATOR:
+               acx->win_size = wl->conf.ht.tx_ba_win_size;
+               acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
+               break;
+       case WLAN_BACK_RECIPIENT:
+               acx->win_size = RX_BA_WIN_SIZE;
+               acx->inactivity_timeout = 0;
+               break;
+       default:
+               wl1271_error("Incorrect acx command id=%x\n", direction);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = wl1271_cmd_configure(wl,
+                                  ACX_BA_SESSION_POLICY_CFG,
+                                  acx,
+                                  sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx ba session setting failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
+/* setup BA session receiver setting in the FW. */
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+                                       bool enable)
+{
+       struct wl1271_acx_ba_receiver_setup *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* Single link for now */
+       acx->link_id = 1;
+       acx->tid = tid_index;
+       acx->enable = enable;
+       acx->win_size = 0;
+       acx->ssn = ssn;
+
+       ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
+                                  sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx ba receiver session failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
 {
        struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1487,82 @@ out:
        kfree(tsf_info);
        return ret;
 }
+
+int wl1271_acx_max_tx_retry(struct wl1271 *wl)
+{
+       struct wl1271_acx_max_tx_retry *acx = NULL;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx max tx retry");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx)
+               return -ENOMEM;
+
+       acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
+
+       ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx max tx retry failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
+int wl1271_acx_config_ps(struct wl1271 *wl)
+{
+       struct wl1271_acx_config_ps *config_ps;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx config ps");
+
+       config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
+       if (!config_ps) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
+       config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
+       config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+
+       ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
+                                  sizeof(*config_ps));
+
+       if (ret < 0) {
+               wl1271_warning("acx config ps failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(config_ps);
+       return ret;
+}
+
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+{
+       struct wl1271_acx_inconnection_sta *acx = NULL;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx)
+               return -ENOMEM;
+
+       memcpy(acx->addr, addr, ETH_ALEN);
+
+       ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
+                                  acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx set inconnaction sta failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
index 7bd8e4db4a71368f0fb756888124b9d28ea6a68c..dd19b01d807b021233cc02dca6d9224ab6f6c4c7 100644 (file)
@@ -133,7 +133,6 @@ enum {
 
 #define DEFAULT_UCAST_PRIORITY          0
 #define DEFAULT_RX_Q_PRIORITY           0
-#define DEFAULT_NUM_STATIONS            1
 #define DEFAULT_RXQ_PRIORITY            0 /* low 0 .. 15 high  */
 #define DEFAULT_RXQ_TYPE                0x07    /* All frames, Data/Ctrl/Mgmt */
 #define TRACE_BUFFER_MAX_SIZE           256
@@ -747,13 +746,23 @@ struct acx_rate_class {
 #define ACX_TX_BASIC_RATE      0
 #define ACX_TX_AP_FULL_RATE    1
 #define ACX_TX_RATE_POLICY_CNT 2
-struct acx_rate_policy {
+struct acx_sta_rate_policy {
        struct acx_header header;
 
        __le32 rate_class_cnt;
        struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
 } __packed;
 
+
+#define ACX_TX_AP_MODE_MGMT_RATE 4
+#define ACX_TX_AP_MODE_BCST_RATE 5
+struct acx_ap_rate_policy {
+       struct acx_header header;
+
+       __le32 rate_policy_idx;
+       struct acx_rate_class rate_policy;
+} __packed;
+
 struct acx_ac_cfg {
        struct acx_header header;
        u8 ac;
@@ -787,12 +796,9 @@ struct acx_tx_config_options {
        __le16 tx_compl_threshold;   /* number of packets */
 } __packed;
 
-#define ACX_RX_MEM_BLOCKS     70
-#define ACX_TX_MIN_MEM_BLOCKS 40
 #define ACX_TX_DESCRIPTORS    32
-#define ACX_NUM_SSID_PROFILES 1
 
-struct wl1271_acx_config_memory {
+struct wl1271_acx_ap_config_memory {
        struct acx_header header;
 
        u8 rx_mem_block_num;
@@ -802,6 +808,20 @@ struct wl1271_acx_config_memory {
        __le32 total_tx_descriptors;
 } __packed;
 
+struct wl1271_acx_sta_config_memory {
+       struct acx_header header;
+
+       u8 rx_mem_block_num;
+       u8 tx_min_mem_block_num;
+       u8 num_stations;
+       u8 num_ssid_profiles;
+       __le32 total_tx_descriptors;
+       u8 dyn_mem_enable;
+       u8 tx_free_req;
+       u8 rx_free_req;
+       u8 tx_min;
+} __packed;
+
 struct wl1271_acx_mem_map {
        struct acx_header header;
 
@@ -1051,6 +1071,59 @@ struct wl1271_acx_ht_information {
        u8 padding[3];
 } __packed;
 
+#define RX_BA_WIN_SIZE 8
+
+struct wl1271_acx_ba_session_policy {
+       struct acx_header header;
+       /*
+        * Specifies role Id, Range 0-7, 0xFF means ANY role.
+        * Future use. For now this field is irrelevant
+        */
+       u8 role_id;
+       /*
+        * Specifies Link Id, Range 0-31, 0xFF means ANY  Link Id.
+        * Not applicable if Role Id is set to ANY.
+        */
+       u8 link_id;
+
+       u8 tid;
+
+       u8 enable;
+
+       /* Windows size in number of packets */
+       u16 win_size;
+
+       /*
+        * As initiator inactivity timeout in time units(TU) of 1024us.
+        * As receiver reserved
+        */
+       u16 inactivity_timeout;
+
+       /* Initiator = 1/Receiver = 0 */
+       u8 ba_direction;
+
+       u8 padding[3];
+} __packed;
+
+struct wl1271_acx_ba_receiver_setup {
+       struct acx_header header;
+
+       /* Specifies Link Id, Range 0-31, 0xFF means ANY  Link Id */
+       u8 link_id;
+
+       u8 tid;
+
+       u8 enable;
+
+       u8 padding[1];
+
+       /* Windows size in number of packets */
+       u16 win_size;
+
+       /* BA session starting sequence number.  RANGE 0-FFF */
+       u16 ssn;
+} __packed;
+
 struct wl1271_acx_fw_tsf_information {
        struct acx_header header;
 
@@ -1062,6 +1135,33 @@ struct wl1271_acx_fw_tsf_information {
        u8 padding[3];
 } __packed;
 
+struct wl1271_acx_max_tx_retry {
+       struct acx_header header;
+
+       /*
+        * the number of frames transmission failures before
+        * issuing the aging event.
+        */
+       __le16 max_tx_retry;
+       u8 padding_1[2];
+} __packed;
+
+struct wl1271_acx_config_ps {
+       struct acx_header header;
+
+       u8 exit_retries;
+       u8 enter_retries;
+       u8 padding[2];
+       __le32 null_data_rate;
+} __packed;
+
+struct wl1271_acx_inconnection_sta {
+       struct acx_header header;
+
+       u8 addr[ETH_ALEN];
+       u8 padding1[2];
+} __packed;
+
 enum {
        ACX_WAKE_UP_CONDITIONS      = 0x0002,
        ACX_MEM_CFG                 = 0x0003,
@@ -1113,22 +1213,24 @@ enum {
        ACX_RSSI_SNR_WEIGHTS        = 0x0052,
        ACX_KEEP_ALIVE_MODE         = 0x0053,
        ACX_SET_KEEP_ALIVE_CONFIG   = 0x0054,
-       ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
-       ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
+       ACX_BA_SESSION_POLICY_CFG   = 0x0055,
+       ACX_BA_SESSION_RX_SETUP     = 0x0056,
        ACX_PEER_HT_CAP             = 0x0057,
        ACX_HT_BSS_OPERATION        = 0x0058,
        ACX_COEX_ACTIVITY           = 0x0059,
        ACX_SET_DCO_ITRIM_PARAMS    = 0x0061,
+       ACX_GEN_FW_CMD              = 0x0070,
+       ACX_HOST_IF_CFG_BITMAP      = 0x0071,
+       ACX_MAX_TX_FAILURE          = 0x0072,
+       ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
        DOT11_RX_MSDU_LIFE_TIME     = 0x1004,
        DOT11_CUR_TX_PWR            = 0x100D,
        DOT11_RX_DOT11_MODE         = 0x1012,
        DOT11_RTS_THRESHOLD         = 0x1013,
        DOT11_GROUP_ADDRESS_TBL     = 0x1014,
        ACX_PM_CONFIG               = 0x1016,
-
-       MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
-
-       MAX_IE = 0xFFFF
+       ACX_CONFIG_PS               = 0x1017,
+       ACX_CONFIG_HANGOVER         = 0x1018,
 };
 
 
@@ -1160,7 +1262,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
 int wl1271_acx_cts_protect(struct wl1271 *wl,
                           enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+                     u8 idx);
 int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
                      u8 aifsn, u16 txop);
 int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1168,7 +1272,8 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
                       u32 apsd_conf0, u32 apsd_conf1);
 int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
 int wl1271_acx_tx_config_options(struct wl1271 *wl);
-int wl1271_acx_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
 int wl1271_acx_init_mem_config(struct wl1271 *wl);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 int wl1271_acx_smart_reflex(struct wl1271 *wl);
@@ -1185,6 +1290,14 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
                                    bool allow_ht_operation);
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
                                   u16 ht_operation_mode);
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+                             enum ieee80211_back_parties direction,
+                             u8 tid_index, u8 policy);
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+                                      bool enable);
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl1271_acx_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 
 #endif /* __WL1271_ACX_H__ */
index 4df04f84d7f106a71fd87741d2e857f48bf01cc0..1ffbad67d2d8475a94714f51d5c33248a39d71cb 100644 (file)
@@ -28,6 +28,7 @@
 #include "boot.h"
 #include "io.h"
 #include "event.h"
+#include "rx.h"
 
 static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
        [PART_DOWN] = {
@@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
        wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
 }
 
+static void wl1271_parse_fw_ver(struct wl1271 *wl)
+{
+       int ret;
+
+       ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+                    &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+                    &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+                    &wl->chip.fw_ver[4]);
+
+       if (ret != 5) {
+               wl1271_warning("fw version incorrect value");
+               memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+               return;
+       }
+}
+
 static void wl1271_boot_fw_version(struct wl1271 *wl)
 {
        struct wl1271_static_data static_data;
@@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
        wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
                    false);
 
-       strncpy(wl->chip.fw_ver, static_data.fw_version,
-               sizeof(wl->chip.fw_ver));
+       strncpy(wl->chip.fw_ver_str, static_data.fw_version,
+               sizeof(wl->chip.fw_ver_str));
 
        /* make sure the string is NULL-terminated */
-       wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
+       wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+       wl1271_parse_fw_ver(wl);
 }
 
 static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
         */
        if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
            wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
-               if (wl->nvs->general_params.dual_mode_select)
+               /* for now 11a is unsupported in AP mode */
+               if (wl->bss_type != BSS_TYPE_AP_BSS &&
+                   wl->nvs->general_params.dual_mode_select)
                        wl->enable_11a = true;
        }
 
@@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
                PSPOLL_DELIVERY_FAILURE_EVENT_ID |
                SOFT_GEMINI_SENSE_EVENT_ID;
 
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
+
        ret = wl1271_event_unmask(wl);
        if (ret < 0) {
                wl1271_error("EVENT mask setting failed");
@@ -595,8 +619,7 @@ int wl1271_boot(struct wl1271 *wl)
        wl1271_boot_enable_interrupts(wl);
 
        /* set the wl1271 default filters */
-       wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-       wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+       wl1271_set_default_filters(wl);
 
        wl1271_event_mbox_config(wl);
 
index 0106628aa5a2057c52da81386f2a471d769dc6a5..97ffd7aa57a81e54509d5fcffab7a589ccbe2462 100644 (file)
@@ -36,6 +36,7 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
+#include "tx.h"
 
 #define WL1271_CMD_FAST_POLL_COUNT       50
 
@@ -221,7 +222,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
  * Poll the mailbox event field until any of the bits in the mask is set or a
  * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
  */
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
 {
        u32 events_vector, event;
        unsigned long timeout;
@@ -230,7 +231,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
 
        do {
                if (time_after(jiffies, timeout)) {
-                       ieee80211_queue_work(wl->hw, &wl->recovery_work);
+                       wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
+                                    (int)mask);
                        return -ETIMEDOUT;
                }
 
@@ -248,6 +250,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
        return 0;
 }
 
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+       int ret;
+
+       ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
+       if (ret != 0) {
+               ieee80211_queue_work(wl->hw, &wl->recovery_work);
+               return ret;
+       }
+
+       return 0;
+}
+
 int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
 {
        struct wl1271_cmd_join *join;
@@ -271,6 +286,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
        join->rx_filter_options = cpu_to_le32(wl->rx_filter);
        join->bss_type = bss_type;
        join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+       join->supported_rate_set = cpu_to_le32(wl->rate_set);
 
        if (wl->band == IEEE80211_BAND_5GHZ)
                join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +304,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
        wl->tx_security_last_seq = 0;
        wl->tx_security_seq = 0;
 
+       wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
+               join->basic_rate_set, join->supported_rate_set);
+
        ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
        if (ret < 0) {
                wl1271_error("failed to initiate cmd join");
@@ -439,7 +458,7 @@ out:
        return ret;
 }
 
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
 {
        struct wl1271_cmd_ps_params *ps_params = NULL;
        int ret = 0;
@@ -453,10 +472,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
        }
 
        ps_params->ps_mode = ps_mode;
-       ps_params->send_null_data = send;
-       ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
-       ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
-       ps_params->null_data_rate = cpu_to_le32(rates);
 
        ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
                              sizeof(*ps_params), 0);
@@ -490,8 +505,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
        cmd->len = cpu_to_le16(buf_len);
        cmd->template_type = template_id;
        cmd->enabled_rates = cpu_to_le32(rates);
-       cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
-       cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+       cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
+       cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
        cmd->index = index;
 
        if (buf)
@@ -659,15 +674,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
 
        /* llc layer */
        memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
-       tmpl.llc_type = htons(ETH_P_ARP);
+       tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
 
        /* arp header */
        arp_hdr = &tmpl.arp_hdr;
-       arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
-       arp_hdr->ar_pro = htons(ETH_P_IP);
+       arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
+       arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
        arp_hdr->ar_hln = ETH_ALEN;
        arp_hdr->ar_pln = 4;
-       arp_hdr->ar_op = htons(ARPOP_REPLY);
+       arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
 
        /* arp payload */
        memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +717,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
                                       wl->basic_rate);
 }
 
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
 {
-       struct wl1271_cmd_set_keys *cmd;
+       struct wl1271_cmd_set_sta_keys *cmd;
        int ret = 0;
 
        wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +746,42 @@ out:
        return ret;
 }
 
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
+{
+       struct wl1271_cmd_set_ap_keys *cmd;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->hlid = WL1271_AP_BROADCAST_HLID;
+       cmd->key_id = id;
+       cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+       cmd->key_action = cpu_to_le16(KEY_SET_ID);
+       cmd->key_type = KEY_WEP;
+
+       ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(cmd);
+
+       return ret;
+}
+
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                       u8 key_size, const u8 *key, const u8 *addr,
                       u32 tx_seq_32, u16 tx_seq_16)
 {
-       struct wl1271_cmd_set_keys *cmd;
+       struct wl1271_cmd_set_sta_keys *cmd;
        int ret = 0;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +834,67 @@ out:
        return ret;
 }
 
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                       u16 tx_seq_16)
+{
+       struct wl1271_cmd_set_ap_keys *cmd;
+       int ret = 0;
+       u8 lid_type;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       if (hlid == WL1271_AP_BROADCAST_HLID) {
+               if (key_type == KEY_WEP)
+                       lid_type = WEP_DEFAULT_LID_TYPE;
+               else
+                       lid_type = BROADCAST_LID_TYPE;
+       } else {
+               lid_type = UNICAST_LID_TYPE;
+       }
+
+       wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
+                    " hlid: %d", (int)action, (int)id, (int)lid_type,
+                    (int)key_type, (int)hlid);
+
+       cmd->lid_key_type = lid_type;
+       cmd->hlid = hlid;
+       cmd->key_action = cpu_to_le16(action);
+       cmd->key_size = key_size;
+       cmd->key_type = key_type;
+       cmd->key_id = id;
+       cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+       cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
+       if (key_type == KEY_TKIP) {
+               /*
+                * We get the key in the following form:
+                * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+                * but the target is expecting:
+                * TKIP - RX MIC - TX MIC
+                */
+               memcpy(cmd->key, key, 16);
+               memcpy(cmd->key + 16, key + 24, 8);
+               memcpy(cmd->key + 24, key + 16, 8);
+       } else {
+               memcpy(cmd->key, key, key_size);
+       }
+
+       wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
+
+       ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_warning("could not set ap keys");
+               goto out;
+       }
+
+out:
+       kfree(cmd);
+       return ret;
+}
+
 int wl1271_cmd_disconnect(struct wl1271 *wl)
 {
        struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +957,180 @@ out_free:
 out:
        return ret;
 }
+
+int wl1271_cmd_start_bss(struct wl1271 *wl)
+{
+       struct wl1271_cmd_bss_start *cmd;
+       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd start bss");
+
+       /*
+        * FIXME: We currently do not support hidden SSID. The real SSID
+        * should be fetched from mac80211 first.
+        */
+       if (wl->ssid_len == 0) {
+               wl1271_warning("Hidden SSID currently not supported for AP");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
+
+       cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
+       cmd->bss_index = WL1271_AP_BSS_INDEX;
+       cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
+       cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
+       cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+       cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->dtim_interval = bss_conf->dtim_period;
+       cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+       cmd->channel = wl->channel;
+       cmd->ssid_len = wl->ssid_len;
+       cmd->ssid_type = SSID_TYPE_PUBLIC;
+       memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
+
+       switch (wl->band) {
+       case IEEE80211_BAND_2GHZ:
+               cmd->band = RADIO_BAND_2_4GHZ;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               cmd->band = RADIO_BAND_5GHZ;
+               break;
+       default:
+               wl1271_warning("bss start - unknown band: %d", (int)wl->band);
+               cmd->band = RADIO_BAND_2_4GHZ;
+               break;
+       }
+
+       ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd start bss");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
+
+int wl1271_cmd_stop_bss(struct wl1271 *wl)
+{
+       struct wl1271_cmd_bss_start *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd stop bss");
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->bss_index = WL1271_AP_BSS_INDEX;
+
+       ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd stop bss");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
+
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+{
+       struct wl1271_cmd_add_sta *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* currently we don't support UAPSD */
+       cmd->sp_len = 0;
+
+       memcpy(cmd->addr, sta->addr, ETH_ALEN);
+       cmd->bss_index = WL1271_AP_BSS_INDEX;
+       cmd->aid = sta->aid;
+       cmd->hlid = hlid;
+
+       /*
+        * FIXME: Does STA support QOS? We need to propagate this info from
+        * hostapd. Currently not that important since this is only used for
+        * sending the correct flavor of null-data packet in response to a
+        * trigger.
+        */
+       cmd->wmm = 0;
+
+       cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
+                                               sta->supp_rates[wl->band]));
+
+       wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+
+       ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd add sta");
+               goto out_free;
+       }
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
+
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+{
+       struct wl1271_cmd_remove_sta *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       cmd->hlid = hlid;
+       /* We never send a deauth, mac80211 is in charge of this */
+       cmd->reason_opcode = 0;
+       cmd->send_deauth_flag = 0;
+
+       ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to initiate cmd remove sta");
+               goto out_free;
+       }
+
+       /*
+        * We are ok with a timeout here. The event is sometimes not sent
+        * due to a firmware bug.
+        */
+       wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
+
+out_free:
+       kfree(cmd);
+
+out:
+       return ret;
+}
index 2a1d9db7ceb88e661d527f3d721b34dfb93189d1..54c12e71417e9ce50f60c6213bae219784371de6 100644 (file)
@@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
                           size_t len);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
 int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
 int wl1271_build_qos_null_data(struct wl1271 *wl);
 int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-                      u8 key_size, const u8 *key, const u8 *addr,
-                      u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                          u8 key_size, const u8 *key, const u8 *addr,
+                          u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                         u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                         u16 tx_seq_16);
 int wl1271_cmd_disconnect(struct wl1271 *wl);
 int wl1271_cmd_set_sta_state(struct wl1271 *wl);
+int wl1271_cmd_start_bss(struct wl1271 *wl);
+int wl1271_cmd_stop_bss(struct wl1271 *wl);
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
 
 enum wl1271_commands {
        CMD_INTERROGATE     = 1,    /*use this to read information elements*/
@@ -98,6 +106,12 @@ enum wl1271_commands {
        CMD_STOP_PERIODIC_SCAN       = 51,
        CMD_SET_STA_STATE            = 52,
 
+       /* AP mode commands */
+       CMD_BSS_START                = 60,
+       CMD_BSS_STOP                 = 61,
+       CMD_ADD_STA                  = 62,
+       CMD_REMOVE_STA               = 63,
+
        NUM_COMMANDS,
        MAX_COMMAND_ID = 0xFFFF,
 };
@@ -126,6 +140,14 @@ enum cmd_templ {
                                  * For CTS-to-self (FastCTS) mechanism
                                  * for BT/WLAN coexistence (SoftGemini). */
        CMD_TEMPL_ARP_RSP,
+       CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+
+       /* AP-mode specific */
+       CMD_TEMPL_AP_BEACON = 13,
+       CMD_TEMPL_AP_PROBE_RESPONSE,
+       CMD_TEMPL_AP_ARP_RSP,
+       CMD_TEMPL_DEAUTH_AP,
+
        CMD_TEMPL_MAX = 0xff
 };
 
@@ -195,6 +217,7 @@ struct wl1271_cmd_join {
         * ACK or CTS frames).
         */
        __le32 basic_rate_set;
+       __le32 supported_rate_set;
        u8 dtim_interval;
        /*
         * bits 0-2: This bitwise field specifies the type
@@ -257,20 +280,11 @@ struct wl1271_cmd_ps_params {
        struct wl1271_cmd_header header;
 
        u8 ps_mode; /* STATION_* */
-       u8 send_null_data; /* Do we have to send NULL data packet ? */
-       u8 retries; /* Number of retires for the initial NULL data packet */
-
-        /*
-         * TUs during which the target stays awake after switching
-         * to power save mode.
-         */
-       u8 hang_over_period;
-       __le32 null_data_rate;
+       u8 padding[3];
 } __packed;
 
 /* HW encryption keys */
 #define NUM_ACCESS_CATEGORIES_COPY 4
-#define MAX_KEY_SIZE 32
 
 enum wl1271_cmd_key_action {
        KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +303,7 @@ enum wl1271_cmd_key_type {
 
 /* FIXME: Add description for key-types */
 
-struct wl1271_cmd_set_keys {
+struct wl1271_cmd_set_sta_keys {
        struct wl1271_cmd_header header;
 
        /* Ignored for default WEP key */
@@ -318,6 +332,57 @@ struct wl1271_cmd_set_keys {
        __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
 } __packed;
 
+enum wl1271_cmd_lid_key_type {
+       UNICAST_LID_TYPE     = 0,
+       BROADCAST_LID_TYPE   = 1,
+       WEP_DEFAULT_LID_TYPE = 2
+};
+
+struct wl1271_cmd_set_ap_keys {
+       struct wl1271_cmd_header header;
+
+       /*
+        * Indicates whether the HLID is a unicast key set
+        * or broadcast key set. A special value 0xFF is
+        * used to indicate that the HLID is on WEP-default
+        * (multi-hlids). of type wl1271_cmd_lid_key_type.
+        */
+       u8 hlid;
+
+       /*
+        * In WEP-default network (hlid == 0xFF) used to
+        * indicate which network STA/IBSS/AP role should be
+        * changed
+        */
+       u8 lid_key_type;
+
+       /*
+        * Key ID - For TKIP and AES key types, this field
+        * indicates the value that should be inserted into
+        * the KeyID field of frames transmitted using this
+        * key entry. For broadcast keys the index use as a
+        * marker for TX/RX key.
+        * For WEP default network (HLID=0xFF), this field
+        * indicates the ID of the key to add or remove.
+        */
+       u8 key_id;
+       u8 reserved_1;
+
+       /* key_action_e */
+       __le16 key_action;
+
+       /* key size in bytes */
+       u8 key_size;
+
+       /* key_type_e */
+       u8 key_type;
+
+       /* This field holds the security key data to add to the STA table */
+       u8 key[MAX_KEY_SIZE];
+       __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+       __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __packed;
+
 struct wl1271_cmd_test_header {
        u8 id;
        u8 padding[3];
@@ -412,4 +477,68 @@ struct wl1271_cmd_set_sta_state {
        u8 padding[3];
 } __packed;
 
+enum wl1271_ssid_type {
+       SSID_TYPE_PUBLIC = 0,
+       SSID_TYPE_HIDDEN = 1
+};
+
+struct wl1271_cmd_bss_start {
+       struct wl1271_cmd_header header;
+
+       /* wl1271_ssid_type */
+       u8 ssid_type;
+       u8 ssid_len;
+       u8 ssid[IW_ESSID_MAX_SIZE];
+       u8 padding_1[2];
+
+       /* Basic rate set */
+       __le32 basic_rate_set;
+       /* Aging period in seconds*/
+       __le16 aging_period;
+
+       /*
+        * This field specifies the time between target beacon
+        * transmission times (TBTTs), in time units (TUs).
+        * Valid values are 1 to 1024.
+        */
+       __le16 beacon_interval;
+       u8 bssid[ETH_ALEN];
+       u8 bss_index;
+       /* Radio band */
+       u8 band;
+       u8 channel;
+       /* The host link id for the AP's global queue */
+       u8 global_hlid;
+       /* The host link id for the AP's broadcast queue */
+       u8 broadcast_hlid;
+       /* DTIM count */
+       u8 dtim_interval;
+       /* Beacon expiry time in ms */
+       u8 beacon_expiry;
+       u8 padding_2[3];
+} __packed;
+
+struct wl1271_cmd_add_sta {
+       struct wl1271_cmd_header header;
+
+       u8 addr[ETH_ALEN];
+       u8 hlid;
+       u8 aid;
+       u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
+       __le32 supported_rates;
+       u8 bss_index;
+       u8 sp_len;
+       u8 wmm;
+       u8 padding1;
+} __packed;
+
+struct wl1271_cmd_remove_sta {
+       struct wl1271_cmd_header header;
+
+       u8 hlid;
+       u8 reason_opcode;
+       u8 send_deauth_flag;
+       u8 padding1;
+} __packed;
+
 #endif /* __WL1271_CMD_H__ */
index a16b3616e4308fa95a1ebe37419e46a97db12a63..856a8a2fff4f5fa7b35fcd295b1367d427b95245 100644 (file)
@@ -496,6 +496,26 @@ struct conf_rx_settings {
                                        CONF_HW_BIT_RATE_2MBPS)
 #define CONF_TX_RATE_RETRY_LIMIT       10
 
+/*
+ * Rates supported for data packets when operating as AP. Note the absense
+ * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
+ * one. The rate dropped is not mandatory under any operating mode.
+ */
+#define CONF_TX_AP_ENABLED_RATES       (CONF_HW_BIT_RATE_1MBPS | \
+       CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS |      \
+       CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS |        \
+       CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS |      \
+       CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS |      \
+       CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
+       CONF_HW_BIT_RATE_54MBPS)
+
+/*
+ * Default rates for management traffic when operating in AP mode. This
+ * should be configured according to the basic rate set of the AP
+ */
+#define CONF_TX_AP_DEFAULT_MGMT_RATES  (CONF_HW_BIT_RATE_1MBPS | \
+       CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+
 struct conf_tx_rate_class {
 
        /*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
 
        /*
         * Configuration for rate classes for TX (currently only one
-        * rate class supported.)
+        * rate class supported). Used in non-AP mode.
         */
-       struct conf_tx_rate_class rc_conf;
+       struct conf_tx_rate_class sta_rc_conf;
 
        /*
         * Configuration for access categories for TX rate control.
@@ -646,6 +666,28 @@ struct conf_tx_settings {
        u8 ac_conf_count;
        struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
 
+       /*
+        * Configuration for rate classes in AP-mode. These rate classes
+        * are for the AC TX queues
+        */
+       struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
+
+       /*
+        * Management TX rate class for AP-mode.
+        */
+       struct conf_tx_rate_class ap_mgmt_conf;
+
+       /*
+        * Broadcast TX rate class for AP-mode.
+        */
+       struct conf_tx_rate_class ap_bcst_conf;
+
+       /*
+        * AP-mode - allow this number of TX retries to a station before an
+        * event is triggered from FW.
+        */
+       u16 ap_max_tx_retries;
+
        /*
         * Configuration for TID parameters.
         */
@@ -687,6 +729,12 @@ struct conf_tx_settings {
         * Range: CONF_HW_BIT_RATE_* bit mask
         */
        u32 basic_rate_5;
+
+       /*
+        * TX retry limits for templates
+        */
+       u8 tmpl_short_retry_limit;
+       u8 tmpl_long_retry_limit;
 };
 
 enum {
@@ -911,6 +959,14 @@ struct conf_conn_settings {
         */
        u8 psm_entry_retries;
 
+       /*
+        * Specifies the maximum number of times to try PSM exit if it fails
+        * (if sending the appropriate null-func message fails.)
+        *
+        * Range 0 - 255
+        */
+       u8 psm_exit_retries;
+
        /*
         * Specifies the maximum number of times to try transmit the PSM entry
         * null-func frame for each PSM entry attempt
@@ -1036,30 +1092,30 @@ struct conf_scan_settings {
        /*
         * The minimum time to wait on each channel for active scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 min_dwell_time_active;
+       u32 min_dwell_time_active;
 
        /*
         * The maximum time to wait on each channel for active scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 max_dwell_time_active;
+       u32 max_dwell_time_active;
 
        /*
-        * The maximum time to wait on each channel for passive scans
+        * The minimum time to wait on each channel for passive scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 min_dwell_time_passive;
+       u32 min_dwell_time_passive;
 
        /*
         * The maximum time to wait on each channel for passive scans
         *
-        * Range: 0 - 65536 tu
+        * Range: u32 tu/1000
         */
-       u16 max_dwell_time_passive;
+       u32 max_dwell_time_passive;
 
        /*
         * Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1146,51 @@ struct conf_rf_settings {
        u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
 };
 
+struct conf_ht_setting {
+       u16 tx_ba_win_size;
+       u16 inactivity_timeout;
+};
+
+struct conf_memory_settings {
+       /* Number of stations supported in IBSS mode */
+       u8 num_stations;
+
+       /* Number of ssid profiles used in IBSS mode */
+       u8 ssid_profiles;
+
+       /* Number of memory buffers allocated to rx pool */
+       u8 rx_block_num;
+
+       /* Minimum number of blocks allocated to tx pool */
+       u8 tx_min_block_num;
+
+       /* Disable/Enable dynamic memory */
+       u8 dynamic_memory;
+
+       /*
+        * Minimum required free tx memory blocks in order to assure optimum
+        * performence
+        *
+        * Range: 0-120
+        */
+       u8 min_req_tx_blocks;
+
+       /*
+        * Minimum required free rx memory blocks in order to assure optimum
+        * performence
+        *
+        * Range: 0-120
+        */
+       u8 min_req_rx_blocks;
+
+       /*
+        * Minimum number of mem blocks (free+used) guaranteed for TX
+        *
+        * Range: 0-120
+        */
+       u8 tx_min;
+};
+
 struct conf_drv_settings {
        struct conf_sg_settings sg;
        struct conf_rx_settings rx;
@@ -1100,6 +1201,8 @@ struct conf_drv_settings {
        struct conf_roam_trigger_settings roam_trigger;
        struct conf_scan_settings scan;
        struct conf_rf_settings rf;
+       struct conf_ht_setting ht;
+       struct conf_memory_settings mem;
 };
 
 #endif
index ec6077760157bc7e6ddc2a6d4dffbcce99a371f4..bebfa28a171abbe598263bfd4af74afb94594382 100644 (file)
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
        unsigned long value;
        int ret;
 
-       mutex_lock(&wl->mutex);
-
        len = min(count, sizeof(buf) - 1);
        if (copy_from_user(buf, user_buf, len)) {
-               ret = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
        buf[len] = '\0';
 
        ret = strict_strtoul(buf, 0, &value);
        if (ret < 0) {
                wl1271_warning("illegal value in gpio_power");
-               goto out;
+               return -EINVAL;
        }
 
+       mutex_lock(&wl->mutex);
+
        if (value)
                wl1271_power_on(wl);
        else
                wl1271_power_off(wl);
 
-out:
        mutex_unlock(&wl->mutex);
        return count;
 }
@@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = {
        .llseek = default_llseek,
 };
 
-static int wl1271_debugfs_add_files(struct wl1271 *wl)
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+                                    struct dentry *rootdir)
 {
        int ret = 0;
        struct dentry *entry, *stats;
 
-       stats = debugfs_create_dir("fw-statistics", wl->rootdir);
+       stats = debugfs_create_dir("fw-statistics", rootdir);
        if (!stats || IS_ERR(stats)) {
                entry = stats;
                goto err;
@@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
        DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
        DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
 
-       DEBUGFS_ADD(tx_queue_len, wl->rootdir);
-       DEBUGFS_ADD(retry_count, wl->rootdir);
-       DEBUGFS_ADD(excessive_retries, wl->rootdir);
-
-       DEBUGFS_ADD(gpio_power, wl->rootdir);
+       DEBUGFS_ADD(tx_queue_len, rootdir);
+       DEBUGFS_ADD(retry_count, rootdir);
+       DEBUGFS_ADD(excessive_retries, rootdir);
 
-       entry = debugfs_create_x32("debug_level", 0600, wl->rootdir,
-                                  &wl12xx_debug_level);
-       if (!entry || IS_ERR(entry))
-               goto err;
+       DEBUGFS_ADD(gpio_power, rootdir);
 
        return 0;
 
@@ -419,7 +413,7 @@ err:
 
 void wl1271_debugfs_reset(struct wl1271 *wl)
 {
-       if (!wl->rootdir)
+       if (!wl->stats.fw_stats)
                return;
 
        memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
 int wl1271_debugfs_init(struct wl1271 *wl)
 {
        int ret;
+       struct dentry *rootdir;
 
-       wl->rootdir = debugfs_create_dir(KBUILD_MODNAME,
-                                        wl->hw->wiphy->debugfsdir);
+       rootdir = debugfs_create_dir(KBUILD_MODNAME,
+                                    wl->hw->wiphy->debugfsdir);
 
-       if (IS_ERR(wl->rootdir)) {
-               ret = PTR_ERR(wl->rootdir);
-               wl->rootdir = NULL;
+       if (IS_ERR(rootdir)) {
+               ret = PTR_ERR(rootdir);
                goto err;
        }
 
@@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
 
        wl->stats.fw_stats_update = jiffies;
 
-       ret = wl1271_debugfs_add_files(wl);
+       ret = wl1271_debugfs_add_files(wl, rootdir);
 
        if (ret < 0)
                goto err_file;
@@ -462,8 +456,7 @@ err_file:
        wl->stats.fw_stats = NULL;
 
 err_fw:
-       debugfs_remove_recursive(wl->rootdir);
-       wl->rootdir = NULL;
+       debugfs_remove_recursive(rootdir);
 
 err:
        return ret;
@@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
 {
        kfree(wl->stats.fw_stats);
        wl->stats.fw_stats = NULL;
-
-       debugfs_remove_recursive(wl->rootdir);
-       wl->rootdir = NULL;
-
 }
index f9146f5242fb93dce4e4e0b8398f834043fa1744..1b170c5cc595f22dde0b1c494b7276059e278960 100644 (file)
@@ -135,20 +135,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
                /* go to extremely low power mode */
                wl1271_ps_elp_sleep(wl);
                break;
-       case EVENT_EXIT_POWER_SAVE_FAIL:
-               wl1271_debug(DEBUG_PSM, "PSM exit failed");
-
-               if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
-                       wl->psm_entry_retry = 0;
-                       break;
-               }
-
-               /* make sure the firmware goes to active mode - the frame to
-                  be sent next will indicate to the AP, that we are active. */
-               ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                        wl->basic_rate, false);
-               break;
-       case EVENT_EXIT_POWER_SAVE_SUCCESS:
        default:
                break;
        }
@@ -186,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
        int ret;
        u32 vector;
        bool beacon_loss = false;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 
        wl1271_event_mbox_dump(mbox);
 
@@ -218,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
         * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
         *
         */
-       if (vector & BSS_LOSE_EVENT_ID) {
+       if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
                wl1271_info("Beacon loss detected.");
 
                /* indicate to the stack, that beacons have been lost */
                beacon_loss = true;
        }
 
-       if (vector & PS_REPORT_EVENT_ID) {
+       if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
                wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
                ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
                if (ret < 0)
                        return ret;
        }
 
-       if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+       if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
                wl1271_event_pspoll_delivery_fail(wl);
 
        if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
index 6cce0143adb581ca2d0f6b2577d70fd494e64f48..0e80886f3031fe740fa74b1ff4faaa4294f4e387 100644 (file)
@@ -59,6 +59,7 @@ enum {
        BSS_LOSE_EVENT_ID                        = BIT(18),
        REGAINED_BSS_EVENT_ID                    = BIT(19),
        ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID    = BIT(20),
+       STA_REMOVE_COMPLETE_EVENT_ID             = BIT(21), /* AP */
        SOFT_GEMINI_SENSE_EVENT_ID               = BIT(22),
        SOFT_GEMINI_PREDICTION_EVENT_ID          = BIT(23),
        SOFT_GEMINI_AVALANCHE_EVENT_ID           = BIT(24),
@@ -74,8 +75,6 @@ enum {
 enum {
        EVENT_ENTER_POWER_SAVE_FAIL = 0,
        EVENT_ENTER_POWER_SAVE_SUCCESS,
-       EVENT_EXIT_POWER_SAVE_FAIL,
-       EVENT_EXIT_POWER_SAVE_SUCCESS,
 };
 
 struct event_debug_report {
@@ -115,7 +114,12 @@ struct event_mailbox {
        u8 scheduled_scan_status;
        u8 ps_status;
 
-       u8 reserved_5[29];
+       /* AP FW only */
+       u8 hlid_removed;
+       __le16 sta_aging_status;
+       __le16 sta_tx_retry_exceeded;
+
+       u8 reserved_5[24];
 } __packed;
 
 int wl1271_event_unmask(struct wl1271 *wl);
index 785a5304bfc4435c592791589b19b504978b32dc..6072fe457135ab4a4e4b2715d8ddf499491fe73b 100644 (file)
 #include "acx.h"
 #include "cmd.h"
 #include "reg.h"
+#include "tx.h"
 
-static int wl1271_init_hwenc_config(struct wl1271 *wl)
-{
-       int ret;
-
-       ret = wl1271_acx_feature_cfg(wl);
-       if (ret < 0) {
-               wl1271_warning("couldn't set feature config");
-               return ret;
-       }
-
-       ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
-       if (ret < 0) {
-               wl1271_warning("couldn't set default key");
-               return ret;
-       }
-
-       return 0;
-}
-
-int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_sta_init_templates_config(struct wl1271 *wl)
 {
        int ret, i;
 
@@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
        return 0;
 }
 
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+{
+       struct wl12xx_disconn_template *tmpl;
+       int ret;
+
+       tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+       if (!tmpl) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                            IEEE80211_STYPE_DEAUTH);
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+                                     tmpl, sizeof(*tmpl), 0,
+                                     wl1271_tx_min_rate_get(wl));
+
+out:
+       kfree(tmpl);
+       return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl)
+{
+       struct ieee80211_hdr_3addr *nullfunc;
+       int ret;
+
+       nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+       if (!nullfunc) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                             IEEE80211_STYPE_NULLFUNC |
+                                             IEEE80211_FCTL_FROMDS);
+
+       /* nullfunc->addr1 is filled by FW */
+
+       memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
+       memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+                                     sizeof(*nullfunc), 0,
+                                     wl1271_tx_min_rate_get(wl));
+
+out:
+       kfree(nullfunc);
+       return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+{
+       struct ieee80211_qos_hdr *qosnull;
+       int ret;
+
+       qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+       if (!qosnull) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                            IEEE80211_STYPE_QOS_NULLFUNC |
+                                            IEEE80211_FCTL_FROMDS);
+
+       /* qosnull->addr1 is filled by FW */
+
+       memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
+       memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+                                     sizeof(*qosnull), 0,
+                                     wl1271_tx_min_rate_get(wl));
+
+out:
+       kfree(qosnull);
+       return ret;
+}
+
+static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+{
+       int ret;
+
+       /*
+        * Put very large empty placeholders for all templates. These
+        * reserve memory for later.
+        */
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+                                     sizeof
+                                     (struct wl12xx_probe_resp_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+                                     sizeof
+                                     (struct wl12xx_beacon_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+                                     sizeof
+                                     (struct wl12xx_disconn_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+                                     sizeof(struct wl12xx_null_data_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+                                     sizeof
+                                     (struct wl12xx_qos_null_data_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
 {
        int ret;
@@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
-       if (ret < 0)
-               return ret;
-
        ret = wl1271_acx_service_period_timeout(wl);
        if (ret < 0)
                return ret;
@@ -213,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
        return 0;
 }
 
+static int wl1271_sta_hw_init(struct wl1271 *wl)
+{
+       int ret;
+
+       ret = wl1271_cmd_ext_radio_parms(wl);
+       if (ret < 0)
+               return ret;
+
+       /* PS config */
+       ret = wl1271_acx_config_ps(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_sta_init_templates_config(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Initialize connection monitoring thresholds */
+       ret = wl1271_acx_conn_monit_params(wl, false);
+       if (ret < 0)
+               return ret;
+
+       /* Beacon filtering */
+       ret = wl1271_init_beacon_filter(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Bluetooth WLAN coexistence */
+       ret = wl1271_init_pta(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Beacons and broadcast settings */
+       ret = wl1271_init_beacon_broadcast(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Configure for ELP power saving */
+       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+       if (ret < 0)
+               return ret;
+
+       /* Configure rssi/snr averaging weights */
+       ret = wl1271_acx_rssi_snr_avg_weights(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_sta_rate_policies(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_sta_mem_cfg(wl);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+{
+       int ret, i;
+
+       ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
+       if (ret < 0) {
+               wl1271_warning("couldn't set default key");
+               return ret;
+       }
+
+       /* disable all keep-alive templates */
+       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+               ret = wl1271_acx_keep_alive_config(wl, i,
+                                                  ACX_KEEP_ALIVE_TPL_INVALID);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* disable the keep-alive feature */
+       ret = wl1271_acx_keep_alive_mode(wl, false);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int wl1271_ap_hw_init(struct wl1271 *wl)
+{
+       int ret, i;
+
+       ret = wl1271_ap_init_templates_config(wl);
+       if (ret < 0)
+               return ret;
+
+       /* Configure for power always on */
+       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+       if (ret < 0)
+               return ret;
+
+       /* Configure initial TX rate classes */
+       for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+               ret = wl1271_acx_ap_rate_policy(wl,
+                               &wl->conf.tx.ap_rc_conf[i], i);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = wl1271_acx_ap_rate_policy(wl,
+                                       &wl->conf.tx.ap_mgmt_conf,
+                                       ACX_TX_AP_MODE_MGMT_RATE);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_ap_rate_policy(wl,
+                                       &wl->conf.tx.ap_bcst_conf,
+                                       ACX_TX_AP_MODE_BCST_RATE);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_max_tx_retry(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_acx_ap_mem_cfg(wl);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+{
+       int ret;
+
+       ret = wl1271_ap_init_deauth_template(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_ap_init_null_template(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_ap_init_qos_null_template(wl);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void wl1271_check_ba_support(struct wl1271 *wl)
+{
+       /* validate FW cose ver x.x.x.50-60.x */
+       if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
+           (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
+               wl->ba_support = true;
+               return;
+       }
+
+       wl->ba_support = false;
+}
+
+static int wl1271_set_ba_policies(struct wl1271 *wl)
+{
+       u8 tid_index;
+       int ret = 0;
+
+       /* Reset the BA RX indicators */
+       wl->ba_rx_bitmap = 0;
+
+       /* validate that FW support BA */
+       wl1271_check_ba_support(wl);
+
+       if (wl->ba_support)
+               /* 802.11n initiator BA session setting */
+               for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
+                    ++tid_index) {
+                       ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
+                                                       tid_index, true);
+                       if (ret < 0)
+                               break;
+               }
+
+       return ret;
+}
+
 int wl1271_hw_init(struct wl1271 *wl)
 {
        struct conf_tx_ac_category *conf_ac;
        struct conf_tx_tid *conf_tid;
        int ret, i;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 
        ret = wl1271_cmd_general_parms(wl);
        if (ret < 0)
@@ -227,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_cmd_ext_radio_parms(wl);
-       if (ret < 0)
-               return ret;
+       /* Mode specific init */
+       if (is_ap)
+               ret = wl1271_ap_hw_init(wl);
+       else
+               ret = wl1271_sta_hw_init(wl);
 
-       /* Template settings */
-       ret = wl1271_init_templates_config(wl);
        if (ret < 0)
                return ret;
 
@@ -259,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Initialize connection monitoring thresholds */
-       ret = wl1271_acx_conn_monit_params(wl, false);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Beacon filtering */
-       ret = wl1271_init_beacon_filter(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Configure TX patch complete interrupt behavior */
        ret = wl1271_acx_tx_config_options(wl);
        if (ret < 0)
@@ -279,21 +561,11 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Bluetooth WLAN coexistence */
-       ret = wl1271_init_pta(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Energy detection */
        ret = wl1271_init_energy_detection(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Beacons and boradcast settings */
-       ret = wl1271_init_beacon_broadcast(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Default fragmentation threshold */
        ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
        if (ret < 0)
@@ -321,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl)
                        goto out_free_memmap;
        }
 
-       /* Configure TX rate classes */
-       ret = wl1271_acx_rate_policies(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Enable data path */
        ret = wl1271_cmd_data_path(wl, 1);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure for ELP power saving */
-       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* Configure HW encryption */
-       ret = wl1271_init_hwenc_config(wl);
+       ret = wl1271_acx_feature_cfg(wl);
        if (ret < 0)
                goto out_free_memmap;
 
@@ -346,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* disable all keep-alive templates */
-       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-               ret = wl1271_acx_keep_alive_config(wl, i,
-                                                  ACX_KEEP_ALIVE_TPL_INVALID);
-               if (ret < 0)
-                       goto out_free_memmap;
-       }
+       /* Mode specific init - post mem init */
+       if (is_ap)
+               ret = wl1271_ap_hw_init_post_mem(wl);
+       else
+               ret = wl1271_sta_hw_init_post_mem(wl);
 
-       /* disable the keep-alive feature */
-       ret = wl1271_acx_keep_alive_mode(wl, false);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure rssi/snr averaging weights */
-       ret = wl1271_acx_rssi_snr_avg_weights(wl);
+       /* Configure initiator BA sessions policies */
+       ret = wl1271_set_ba_policies(wl);
        if (ret < 0)
                goto out_free_memmap;
 
index 7762421f86021ac7c283df660c0640eb70141c28..3a8bd3f426d287f0749cfcb68706c4b9eab5fbd6 100644 (file)
@@ -27,7 +27,7 @@
 #include "wl12xx.h"
 
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_sta_init_templates_config(struct wl1271 *wl);
 int wl1271_init_phy_config(struct wl1271 *wl);
 int wl1271_init_pta(struct wl1271 *wl);
 int wl1271_init_energy_detection(struct wl1271 *wl);
index 062247ef3ad2742378c7993649eef5780842c665..947491a1d9cc718fbad166353106bfabfd8e270b 100644 (file)
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
        },
        .tx = {
                .tx_energy_detection         = 0,
-               .rc_conf                     = {
+               .sta_rc_conf                 = {
                        .enabled_rates       = 0,
                        .short_retry_limit   = 10,
                        .long_retry_limit    = 10,
-                       .aflags              = 0
+                       .aflags              = 0,
                },
                .ac_conf_count               = 4,
                .ac_conf                     = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
                                .tx_op_limit = 1504,
                        },
                },
+               .ap_rc_conf                  = {
+                       [0] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+                       [1] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+                       [2] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+                       [3] = {
+                               .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+                               .short_retry_limit = 10,
+                               .long_retry_limit = 10,
+                               .aflags      = 0,
+                       },
+               },
+               .ap_mgmt_conf = {
+                       .enabled_rates       = CONF_TX_AP_DEFAULT_MGMT_RATES,
+                       .short_retry_limit   = 10,
+                       .long_retry_limit    = 10,
+                       .aflags              = 0,
+               },
+               .ap_bcst_conf = {
+                       .enabled_rates       = CONF_HW_BIT_RATE_1MBPS,
+                       .short_retry_limit   = 10,
+                       .long_retry_limit    = 10,
+                       .aflags              = 0,
+               },
+               .ap_max_tx_retries = 100,
                .tid_conf_count = 4,
                .tid_conf = {
                        [CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
                .tx_compl_threshold          = 4,
                .basic_rate                  = CONF_HW_BIT_RATE_1MBPS,
                .basic_rate_5                = CONF_HW_BIT_RATE_6MBPS,
+               .tmpl_short_retry_limit      = 10,
+               .tmpl_long_retry_limit       = 10,
        },
        .conn = {
                .wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
@@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = {
                .bet_enable                  = CONF_BET_MODE_ENABLE,
                .bet_max_consecutive         = 10,
                .psm_entry_retries           = 5,
+               .psm_exit_retries            = 255,
                .psm_entry_nullfunc_retries  = 3,
                .psm_entry_hangover_period   = 1,
                .keep_alive_interval         = 55000,
@@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = {
                .avg_weight_rssi_beacon       = 20,
                .avg_weight_rssi_data         = 10,
                .avg_weight_snr_beacon        = 20,
-               .avg_weight_snr_data          = 10
+               .avg_weight_snr_data          = 10,
        },
        .scan = {
                .min_dwell_time_active        = 7500,
                .max_dwell_time_active        = 30000,
-               .min_dwell_time_passive       = 30000,
-               .max_dwell_time_passive       = 60000,
+               .min_dwell_time_passive       = 100000,
+               .max_dwell_time_passive       = 100000,
                .num_probe_reqs               = 2,
        },
        .rf = {
@@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = {
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
                },
        },
+       .ht = {
+               .tx_ba_win_size = 64,
+               .inactivity_timeout = 10000,
+       },
+       .mem = {
+               .num_stations                 = 1,
+               .ssid_profiles                = 1,
+               .rx_block_num                 = 70,
+               .tx_min_block_num             = 40,
+               .dynamic_memory               = 0,
+               .min_req_tx_blocks            = 104,
+               .min_req_rx_blocks            = 22,
+               .tx_min                       = 27,
+       }
 };
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl);
+static void wl1271_free_ap_keys(struct wl1271 *wl);
 
 
 static void wl1271_device_release(struct device *dev)
@@ -393,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_init_templates_config(wl);
+       ret = wl1271_sta_init_templates_config(wl);
        if (ret < 0)
                return ret;
 
@@ -425,6 +482,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
+       ret = wl1271_acx_sta_mem_cfg(wl);
+       if (ret < 0)
+               goto out_free_memmap;
+
        /* Default fragmentation threshold */
        ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
        if (ret < 0)
@@ -476,14 +537,71 @@ static int wl1271_plt_init(struct wl1271 *wl)
        return ret;
 }
 
+static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+{
+       bool fw_ps;
+
+       /* only regulate station links */
+       if (hlid < WL1271_AP_STA_HLID_START)
+               return;
+
+       fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+
+       /*
+        * Wake up from high level PS if the STA is asleep with too little
+        * blocks in FW or if the STA is awake.
+        */
+       if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_end(wl, hlid);
+
+       /* Start high-level PS if the STA is asleep with enough blocks in FW */
+       else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_start(wl, hlid, true);
+}
+
+static void wl1271_irq_update_links_status(struct wl1271 *wl,
+                                      struct wl1271_fw_ap_status *status)
+{
+       u32 cur_fw_ps_map;
+       u8 hlid;
+
+       cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+       if (wl->ap_fw_ps_map != cur_fw_ps_map) {
+               wl1271_debug(DEBUG_PSM,
+                            "link ps prev 0x%x cur 0x%x changed 0x%x",
+                            wl->ap_fw_ps_map, cur_fw_ps_map,
+                            wl->ap_fw_ps_map ^ cur_fw_ps_map);
+
+               wl->ap_fw_ps_map = cur_fw_ps_map;
+       }
+
+       for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
+               u8 cnt = status->tx_lnk_free_blks[hlid] -
+                       wl->links[hlid].prev_freed_blks;
+
+               wl->links[hlid].prev_freed_blks =
+                       status->tx_lnk_free_blks[hlid];
+               wl->links[hlid].allocated_blks -= cnt;
+
+               wl1271_irq_ps_regulate_link(wl, hlid,
+                                           wl->links[hlid].allocated_blks);
+       }
+}
+
 static void wl1271_fw_status(struct wl1271 *wl,
-                            struct wl1271_fw_status *status)
+                            struct wl1271_fw_full_status *full_status)
 {
+       struct wl1271_fw_common_status *status = &full_status->common;
        struct timespec ts;
        u32 total = 0;
        int i;
 
-       wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+                               sizeof(struct wl1271_fw_ap_status), false);
+       else
+               wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+                               sizeof(struct wl1271_fw_sta_status), false);
 
        wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
                     "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -507,6 +625,10 @@ static void wl1271_fw_status(struct wl1271 *wl,
        if (total)
                clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
+       /* for AP update num of allocated TX blocks per link and ps status */
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               wl1271_irq_update_links_status(wl, &full_status->ap);
+
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
        wl->time_offset = (timespec_to_ns(&ts) >> 10) -
@@ -542,7 +664,7 @@ static void wl1271_irq_work(struct work_struct *work)
                loopcount--;
 
                wl1271_fw_status(wl, wl->fw_status);
-               intr = le32_to_cpu(wl->fw_status->intr);
+               intr = le32_to_cpu(wl->fw_status->common.intr);
                if (!intr) {
                        wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
                        spin_lock_irqsave(&wl->wl_lock, flags);
@@ -564,7 +686,7 @@ static void wl1271_irq_work(struct work_struct *work)
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
                        /* check for tx results */
-                       if (wl->fw_status->tx_results_counter !=
+                       if (wl->fw_status->common.tx_results_counter !=
                            (wl->tx_results_count & 0xff))
                                wl1271_tx_complete(wl);
 
@@ -578,7 +700,7 @@ static void wl1271_irq_work(struct work_struct *work)
                                wl1271_tx_work_locked(wl);
                        }
 
-                       wl1271_rx(wl, wl->fw_status);
+                       wl1271_rx(wl, &wl->fw_status->common);
                }
 
                if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -616,9 +738,26 @@ out:
 static int wl1271_fetch_firmware(struct wl1271 *wl)
 {
        const struct firmware *fw;
+       const char *fw_name;
        int ret;
 
-       ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+       switch (wl->bss_type) {
+       case BSS_TYPE_AP_BSS:
+               fw_name = WL1271_AP_FW_NAME;
+               break;
+       case BSS_TYPE_IBSS:
+       case BSS_TYPE_STA_BSS:
+               fw_name = WL1271_FW_NAME;
+               break;
+       default:
+               wl1271_error("no compatible firmware for bss_type %d",
+                            wl->bss_type);
+               return -EINVAL;
+       }
+
+       wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
+
+       ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
 
        if (ret < 0) {
                wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +771,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
                goto out;
        }
 
+       vfree(wl->fw);
        wl->fw_len = fw->size;
        wl->fw = vmalloc(wl->fw_len);
 
@@ -642,7 +782,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
        }
 
        memcpy(wl->fw, fw->data, wl->fw_len);
-
+       wl->fw_bss_type = wl->bss_type;
        ret = 0;
 
 out:
@@ -778,7 +918,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                goto out;
        }
 
-       if (wl->fw == NULL) {
+       /* Make sure the firmware type matches the BSS type */
+       if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
                ret = wl1271_fetch_firmware(wl);
                if (ret < 0)
                        goto out;
@@ -811,6 +952,8 @@ int wl1271_plt_start(struct wl1271 *wl)
                goto out;
        }
 
+       wl->bss_type = BSS_TYPE_STA_BSS;
+
        while (retries) {
                retries--;
                ret = wl1271_chip_wakeup(wl);
@@ -827,7 +970,7 @@ int wl1271_plt_start(struct wl1271 *wl)
 
                wl->state = WL1271_STATE_PLT;
                wl1271_notice("firmware booted in PLT mode (%s)",
-                             wl->chip.fw_ver);
+                             wl->chip.fw_ver_str);
                goto out;
 
 irq_disable:
@@ -854,12 +997,10 @@ out:
        return ret;
 }
 
-int wl1271_plt_stop(struct wl1271 *wl)
+int __wl1271_plt_stop(struct wl1271 *wl)
 {
        int ret = 0;
 
-       mutex_lock(&wl->mutex);
-
        wl1271_notice("power down");
 
        if (wl->state != WL1271_STATE_PLT) {
@@ -875,56 +1016,55 @@ int wl1271_plt_stop(struct wl1271 *wl)
        wl->state = WL1271_STATE_OFF;
        wl->rx_counter = 0;
 
-out:
        mutex_unlock(&wl->mutex);
-
        cancel_work_sync(&wl->irq_work);
        cancel_work_sync(&wl->recovery_work);
+       mutex_lock(&wl->mutex);
+out:
+       return ret;
+}
 
+int wl1271_plt_stop(struct wl1271 *wl)
+{
+       int ret;
+
+       mutex_lock(&wl->mutex);
+       ret = __wl1271_plt_stop(wl);
+       mutex_unlock(&wl->mutex);
        return ret;
 }
 
-static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = txinfo->control.sta;
        unsigned long flags;
        int q;
+       u8 hlid = 0;
+
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       wl->tx_queue_count++;
 
        /*
-        * peek into the rates configured in the STA entry.
-        * The rates set after connection stage, The first block only BG sets:
-        * the compare is for bit 0-16 of sta_rate_set. The second block add
-        * HT rates in case of HT supported.
+        * The workqueue is slow to process the tx_queue and we need stop
+        * the queue here, otherwise the queue will get too long.
         */
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       if (sta &&
-           (sta->supp_rates[conf->channel->band] !=
-           (wl->sta_rate_set & HW_BG_RATES_MASK))) {
-               wl->sta_rate_set = sta->supp_rates[conf->channel->band];
-               set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+       if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+               wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
+               ieee80211_stop_queues(wl->hw);
+               set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
        }
 
-#ifdef CONFIG_WL12XX_HT
-       if (sta &&
-           sta->ht_cap.ht_supported &&
-           ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
-             sta->ht_cap.mcs.rx_mask[0])) {
-               /* Clean MCS bits before setting them */
-               wl->sta_rate_set &= HW_BG_RATES_MASK;
-               wl->sta_rate_set |=
-                       (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
-               set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
-       }
-#endif
-       wl->tx_queue_count++;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        /* queue the packet */
        q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-       skb_queue_tail(&wl->tx_queue[q], skb);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               hlid = wl1271_tx_get_hlid(skb);
+               wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+               skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+       } else {
+               skb_queue_tail(&wl->tx_queue[q], skb);
+       }
 
        /*
         * The chip specific setup must run before the first TX packet -
@@ -933,21 +1073,6 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
                ieee80211_queue_work(wl->hw, &wl->tx_work);
-
-       /*
-        * The workqueue is slow to process the tx_queue and we need stop
-        * the queue here, otherwise the queue will get too long.
-        */
-       if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
-               wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
-
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               ieee80211_stop_queues(wl->hw);
-               set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-       }
-
-       return NETDEV_TX_OK;
 }
 
 static struct notifier_block wl1271_dev_notifier = {
@@ -967,6 +1092,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
         *
         * The MAC address is first known when the corresponding interface
         * is added. That is where we will initialize the hardware.
+        *
+        * In addition, we currently have different firmwares for AP and managed
+        * operation. We will know which to boot according to interface type.
         */
 
        return 0;
@@ -1006,6 +1134,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                wl->bss_type = BSS_TYPE_IBSS;
                wl->set_bss_type = BSS_TYPE_STA_BSS;
                break;
+       case NL80211_IFTYPE_AP:
+               wl->bss_type = BSS_TYPE_AP_BSS;
+               break;
        default:
                ret = -EOPNOTSUPP;
                goto out;
@@ -1061,11 +1192,11 @@ power_off:
 
        wl->vif = vif;
        wl->state = WL1271_STATE_ON;
-       wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+       wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
 
        /* update hw/fw version info in wiphy struct */
        wiphy->hw_version = wl->chip.id;
-       strncpy(wiphy->fw_version, wl->chip.fw_ver,
+       strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
                sizeof(wiphy->fw_version));
 
        /*
@@ -1147,10 +1278,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
        wl->time_offset = 0;
        wl->session_counter = 0;
        wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->sta_rate_set = 0;
        wl->flags = 0;
        wl->vif = NULL;
        wl->filters = 0;
+       wl1271_free_ap_keys(wl);
+       memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
+       wl->ap_fw_ps_map = 0;
+       wl->ap_ps_map = 0;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
                wl->tx_blocks_freed[i] = 0;
@@ -1186,8 +1320,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
 
 static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
 {
-       wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-       wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+       wl1271_set_default_filters(wl);
 
        /* combine requested filters with current filter config */
        filters = wl->filters | filters;
@@ -1322,25 +1455,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
                wl->basic_rate_set = wl->conf.tx.basic_rate_5;
 }
 
-static u32 wl1271_min_rate_get(struct wl1271 *wl)
-{
-       int i;
-       u32 rate = 0;
-
-       if (!wl->basic_rate_set) {
-               WARN_ON(1);
-               wl->basic_rate_set = wl->conf.tx.basic_rate;
-       }
-
-       for (i = 0; !rate; i++) {
-               if ((wl->basic_rate_set >> i) & 0x1)
-                       rate = 1 << i;
-       }
-
-       return rate;
-}
-
-static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
 {
        int ret;
 
@@ -1350,9 +1465,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
                        if (ret < 0)
                                goto out;
                }
-               wl->rate_set = wl1271_min_rate_get(wl);
-               wl->sta_rate_set = 0;
-               ret = wl1271_acx_rate_policies(wl);
+               wl->rate_set = wl1271_tx_min_rate_get(wl);
+               ret = wl1271_acx_sta_rate_policies(wl);
                if (ret < 0)
                        goto out;
                ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1495,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
        struct wl1271 *wl = hw->priv;
        struct ieee80211_conf *conf = &hw->conf;
        int channel, ret = 0;
+       bool is_ap;
 
        channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
+       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+                    " changed 0x%x",
                     channel,
                     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
                     conf->power_level,
-                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+                        changed);
 
        /*
         * mac80211 will go to idle nearly immediately after transmitting some
@@ -1406,6 +1523,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                goto out;
        }
 
+       is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out;
@@ -1417,31 +1536,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                wl->band = conf->channel->band;
                wl->channel = channel;
 
-               /*
-                * FIXME: the mac80211 should really provide a fixed rate
-                * to use here. for now, just use the smallest possible rate
-                * for the band as a fixed rate for association frames and
-                * other control messages.
-                */
-               if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-                       wl1271_set_band_rate(wl);
-
-               wl->basic_rate = wl1271_min_rate_get(wl);
-               ret = wl1271_acx_rate_policies(wl);
-               if (ret < 0)
-                       wl1271_warning("rate policy for update channel "
-                                      "failed %d", ret);
+               if (!is_ap) {
+                       /*
+                        * FIXME: the mac80211 should really provide a fixed
+                        * rate to use here. for now, just use the smallest
+                        * possible rate for the band as a fixed rate for
+                        * association frames and other control messages.
+                        */
+                       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+                               wl1271_set_band_rate(wl);
 
-               if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
-                       ret = wl1271_join(wl, false);
+                       wl->basic_rate = wl1271_tx_min_rate_get(wl);
+                       ret = wl1271_acx_sta_rate_policies(wl);
                        if (ret < 0)
-                               wl1271_warning("cmd join to update channel "
+                               wl1271_warning("rate policy for channel "
                                               "failed %d", ret);
+
+                       if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+                               ret = wl1271_join(wl, false);
+                               if (ret < 0)
+                                       wl1271_warning("cmd join on channel "
+                                                      "failed %d", ret);
+                       }
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_IDLE) {
-               ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+       if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
+               ret = wl1271_sta_handle_idle(wl,
+                                       conf->flags & IEEE80211_CONF_IDLE);
                if (ret < 0)
                        wl1271_warning("idle mode change failed %d", ret);
        }
@@ -1548,7 +1670,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        struct wl1271 *wl = hw->priv;
        int ret;
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
+       wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
+                    " total %x", changed, *total);
 
        mutex_lock(&wl->mutex);
 
@@ -1562,15 +1685,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-
-       if (*total & FIF_ALLMULTI)
-               ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
-       else if (fp)
-               ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
-                                                  fp->mc_list,
-                                                  fp->mc_list_length);
-       if (ret < 0)
-               goto out_sleep;
+       if (wl->bss_type != BSS_TYPE_AP_BSS) {
+               if (*total & FIF_ALLMULTI)
+                       ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+               else if (fp)
+                       ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+                                                          fp->mc_list,
+                                                          fp->mc_list_length);
+               if (ret < 0)
+                       goto out_sleep;
+       }
 
        /* determine, whether supported filter values have changed */
        if (changed == 0)
@@ -1593,38 +1717,192 @@ out:
        kfree(fp);
 }
 
+static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
+                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                       u16 tx_seq_16)
+{
+       struct wl1271_ap_key *ap_key;
+       int i;
+
+       wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
+
+       if (key_size > MAX_KEY_SIZE)
+               return -EINVAL;
+
+       /*
+        * Find next free entry in ap_keys. Also check we are not replacing
+        * an existing key.
+        */
+       for (i = 0; i < MAX_NUM_KEYS; i++) {
+               if (wl->recorded_ap_keys[i] == NULL)
+                       break;
+
+               if (wl->recorded_ap_keys[i]->id == id) {
+                       wl1271_warning("trying to record key replacement");
+                       return -EINVAL;
+               }
+       }
+
+       if (i == MAX_NUM_KEYS)
+               return -EBUSY;
+
+       ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
+       if (!ap_key)
+               return -ENOMEM;
+
+       ap_key->id = id;
+       ap_key->key_type = key_type;
+       ap_key->key_size = key_size;
+       memcpy(ap_key->key, key, key_size);
+       ap_key->hlid = hlid;
+       ap_key->tx_seq_32 = tx_seq_32;
+       ap_key->tx_seq_16 = tx_seq_16;
+
+       wl->recorded_ap_keys[i] = ap_key;
+       return 0;
+}
+
+static void wl1271_free_ap_keys(struct wl1271 *wl)
+{
+       int i;
+
+       for (i = 0; i < MAX_NUM_KEYS; i++) {
+               kfree(wl->recorded_ap_keys[i]);
+               wl->recorded_ap_keys[i] = NULL;
+       }
+}
+
+static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+{
+       int i, ret = 0;
+       struct wl1271_ap_key *key;
+       bool wep_key_added = false;
+
+       for (i = 0; i < MAX_NUM_KEYS; i++) {
+               if (wl->recorded_ap_keys[i] == NULL)
+                       break;
+
+               key = wl->recorded_ap_keys[i];
+               ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+                                           key->id, key->key_type,
+                                           key->key_size, key->key,
+                                           key->hlid, key->tx_seq_32,
+                                           key->tx_seq_16);
+               if (ret < 0)
+                       goto out;
+
+               if (key->key_type == KEY_WEP)
+                       wep_key_added = true;
+       }
+
+       if (wep_key_added) {
+               ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       wl1271_free_ap_keys(wl);
+       return ret;
+}
+
+static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+                      u8 key_size, const u8 *key, u32 tx_seq_32,
+                      u16 tx_seq_16, struct ieee80211_sta *sta)
+{
+       int ret;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+       if (is_ap) {
+               struct wl1271_station *wl_sta;
+               u8 hlid;
+
+               if (sta) {
+                       wl_sta = (struct wl1271_station *)sta->drv_priv;
+                       hlid = wl_sta->hlid;
+               } else {
+                       hlid = WL1271_AP_BROADCAST_HLID;
+               }
+
+               if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+                       /*
+                        * We do not support removing keys after AP shutdown.
+                        * Pretend we do to make mac80211 happy.
+                        */
+                       if (action != KEY_ADD_OR_REPLACE)
+                               return 0;
+
+                       ret = wl1271_record_ap_key(wl, id,
+                                            key_type, key_size,
+                                            key, hlid, tx_seq_32,
+                                            tx_seq_16);
+               } else {
+                       ret = wl1271_cmd_set_ap_key(wl, action,
+                                            id, key_type, key_size,
+                                            key, hlid, tx_seq_32,
+                                            tx_seq_16);
+               }
+
+               if (ret < 0)
+                       return ret;
+       } else {
+               const u8 *addr;
+               static const u8 bcast_addr[ETH_ALEN] = {
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+               };
+
+               addr = sta ? sta->addr : bcast_addr;
+
+               if (is_zero_ether_addr(addr)) {
+                       /* We dont support TX only encryption */
+                       return -EOPNOTSUPP;
+               }
+
+               /* The wl1271 does not allow to remove unicast keys - they
+                  will be cleared automatically on next CMD_JOIN. Ignore the
+                  request silently, as we dont want the mac80211 to emit
+                  an error message. */
+               if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
+                       return 0;
+
+               ret = wl1271_cmd_set_sta_key(wl, action,
+                                            id, key_type, key_size,
+                                            key, addr, tx_seq_32,
+                                            tx_seq_16);
+               if (ret < 0)
+                       return ret;
+
+               /* the default WEP key needs to be configured at least once */
+               if (key_type == KEY_WEP) {
+                       ret = wl1271_cmd_set_sta_default_wep_key(wl,
+                                                       wl->default_key);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                             struct ieee80211_vif *vif,
                             struct ieee80211_sta *sta,
                             struct ieee80211_key_conf *key_conf)
 {
        struct wl1271 *wl = hw->priv;
-       const u8 *addr;
        int ret;
        u32 tx_seq_32 = 0;
        u16 tx_seq_16 = 0;
        u8 key_type;
 
-       static const u8 bcast_addr[ETH_ALEN] =
-               { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
        wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
 
-       addr = sta ? sta->addr : bcast_addr;
-
-       wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
-       wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+       wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
        wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
                     key_conf->cipher, key_conf->keyidx,
                     key_conf->keylen, key_conf->flags);
        wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
 
-       if (is_zero_ether_addr(addr)) {
-               /* We dont support TX only encryption */
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
        mutex_lock(&wl->mutex);
 
        if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1671,36 +1949,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        switch (cmd) {
        case SET_KEY:
-               ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
-                                        key_conf->keyidx, key_type,
-                                        key_conf->keylen, key_conf->key,
-                                        addr, tx_seq_32, tx_seq_16);
+               ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+                                key_conf->keyidx, key_type,
+                                key_conf->keylen, key_conf->key,
+                                tx_seq_32, tx_seq_16, sta);
                if (ret < 0) {
                        wl1271_error("Could not add or replace key");
                        goto out_sleep;
                }
-
-               /* the default WEP key needs to be configured at least once */
-               if (key_type == KEY_WEP) {
-                       ret = wl1271_cmd_set_default_wep_key(wl,
-                                                            wl->default_key);
-                       if (ret < 0)
-                               goto out_sleep;
-               }
                break;
 
        case DISABLE_KEY:
-               /* The wl1271 does not allow to remove unicast keys - they
-                  will be cleared automatically on next CMD_JOIN. Ignore the
-                  request silently, as we dont want the mac80211 to emit
-                  an error message. */
-               if (!is_broadcast_ether_addr(addr))
-                       break;
-
-               ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
-                                        key_conf->keyidx, key_type,
-                                        key_conf->keylen, key_conf->key,
-                                        addr, 0, 0);
+               ret = wl1271_set_key(wl, KEY_REMOVE,
+                                    key_conf->keyidx, key_type,
+                                    key_conf->keylen, key_conf->key,
+                                    0, 0, sta);
                if (ret < 0) {
                        wl1271_error("Could not remove key");
                        goto out_sleep;
@@ -1719,7 +1982,6 @@ out_sleep:
 out_unlock:
        mutex_unlock(&wl->mutex);
 
-out:
        return ret;
 }
 
@@ -1821,7 +2083,7 @@ out:
        return ret;
 }
 
-static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
                            int offset)
 {
        u8 *ptr = skb->data + offset;
@@ -1831,89 +2093,213 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
                if (ptr[0] == WLAN_EID_SSID) {
                        wl->ssid_len = ptr[1];
                        memcpy(wl->ssid, ptr+2, wl->ssid_len);
-                       return;
+                       return 0;
                }
                ptr += (ptr[1] + 2);
        }
+
        wl1271_error("No SSID in IEs!\n");
+       return -ENOENT;
 }
 
-static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif,
+static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
                                       struct ieee80211_bss_conf *bss_conf,
                                       u32 changed)
 {
-       enum wl1271_cmd_ps_mode mode;
-       struct wl1271 *wl = hw->priv;
-       struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
-       bool do_join = false;
-       bool set_assoc = false;
-       int ret;
-
-       wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
-
-       mutex_lock(&wl->mutex);
-
-       if (unlikely(wl->state == WL1271_STATE_OFF))
-               goto out;
-
-       ret = wl1271_ps_elp_wakeup(wl, false);
-       if (ret < 0)
-               goto out;
-
-       if ((changed & BSS_CHANGED_BEACON_INT) &&
-           (wl->bss_type == BSS_TYPE_IBSS)) {
-               wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
-                       bss_conf->beacon_int);
+       int ret = 0;
 
-               wl->beacon_int = bss_conf->beacon_int;
-               do_join = true;
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               if (bss_conf->use_short_slot)
+                       ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+               else
+                       ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+               if (ret < 0) {
+                       wl1271_warning("Set slot time failed %d", ret);
+                       goto out;
+               }
        }
 
-       if ((changed & BSS_CHANGED_BEACON) &&
-           (wl->bss_type == BSS_TYPE_IBSS)) {
-               struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
-
-               wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
-
-               if (beacon) {
-                       struct ieee80211_hdr *hdr;
-                       int ieoffset = offsetof(struct ieee80211_mgmt,
-                                               u.beacon.variable);
-
-                       wl1271_ssid_set(wl, beacon, ieoffset);
+       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+               if (bss_conf->use_short_preamble)
+                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+               else
+                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+       }
 
-                       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
-                                                     beacon->data,
-                                                     beacon->len, 0,
-                                                     wl1271_min_rate_get(wl));
+       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+               if (bss_conf->use_cts_prot)
+                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+               else
+                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+               if (ret < 0) {
+                       wl1271_warning("Set ctsprotect failed %d", ret);
+                       goto out;
+               }
+       }
 
-                       if (ret < 0) {
-                               dev_kfree_skb(beacon);
-                               goto out_sleep;
-                       }
+out:
+       return ret;
+}
 
-                       hdr = (struct ieee80211_hdr *) beacon->data;
-                       hdr->frame_control = cpu_to_le16(
-                               IEEE80211_FTYPE_MGMT |
-                               IEEE80211_STYPE_PROBE_RESP);
+static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
+                                         struct ieee80211_vif *vif,
+                                         struct ieee80211_bss_conf *bss_conf,
+                                         u32 changed)
+{
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       int ret = 0;
+
+       if ((changed & BSS_CHANGED_BEACON_INT)) {
+               wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
+                       bss_conf->beacon_int);
+
+               wl->beacon_int = bss_conf->beacon_int;
+       }
 
-                       ret = wl1271_cmd_template_set(wl,
-                                                     CMD_TEMPL_PROBE_RESPONSE,
-                                                     beacon->data,
-                                                     beacon->len, 0,
-                                                     wl1271_min_rate_get(wl));
+       if ((changed & BSS_CHANGED_BEACON)) {
+               struct ieee80211_hdr *hdr;
+               int ieoffset = offsetof(struct ieee80211_mgmt,
+                                       u.beacon.variable);
+               struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+               u16 tmpl_id;
+
+               if (!beacon)
+                       goto out;
+
+               wl1271_debug(DEBUG_MASTER, "beacon updated");
+
+               ret = wl1271_ssid_set(wl, beacon, ieoffset);
+               if (ret < 0) {
                        dev_kfree_skb(beacon);
-                       if (ret < 0)
-                               goto out_sleep;
+                       goto out;
+               }
+               tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+                                 CMD_TEMPL_BEACON;
+               ret = wl1271_cmd_template_set(wl, tmpl_id,
+                                             beacon->data,
+                                             beacon->len, 0,
+                                             wl1271_tx_min_rate_get(wl));
+               if (ret < 0) {
+                       dev_kfree_skb(beacon);
+                       goto out;
+               }
 
-                       /* Need to update the SSID (for filtering etc) */
-                       do_join = true;
+               hdr = (struct ieee80211_hdr *) beacon->data;
+               hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                                IEEE80211_STYPE_PROBE_RESP);
+
+               tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
+                                 CMD_TEMPL_PROBE_RESPONSE;
+               ret = wl1271_cmd_template_set(wl,
+                                             tmpl_id,
+                                             beacon->data,
+                                             beacon->len, 0,
+                                             wl1271_tx_min_rate_get(wl));
+               dev_kfree_skb(beacon);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       return ret;
+}
+
+/* AP mode changes */
+static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_bss_conf *bss_conf,
+                                      u32 changed)
+{
+       int ret = 0;
+
+       if ((changed & BSS_CHANGED_BASIC_RATES)) {
+               u32 rates = bss_conf->basic_rates;
+               struct conf_tx_rate_class mgmt_rc;
+
+               wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
+               wl->basic_rate = wl1271_tx_min_rate_get(wl);
+               wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
+                            wl->basic_rate_set);
+
+               /* update the AP management rate policy with the new rates */
+               mgmt_rc.enabled_rates = wl->basic_rate_set;
+               mgmt_rc.long_retry_limit = 10;
+               mgmt_rc.short_retry_limit = 10;
+               mgmt_rc.aflags = 0;
+               ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
+                                               ACX_TX_AP_MODE_MGMT_RATE);
+               if (ret < 0) {
+                       wl1271_error("AP mgmt policy change failed %d", ret);
+                       goto out;
+               }
+       }
+
+       ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
+       if (ret < 0)
+               goto out;
+
+       if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+               if (bss_conf->enable_beacon) {
+                       if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+                               ret = wl1271_cmd_start_bss(wl);
+                               if (ret < 0)
+                                       goto out;
+
+                               set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               wl1271_debug(DEBUG_AP, "started AP");
+
+                               ret = wl1271_ap_init_hwenc(wl);
+                               if (ret < 0)
+                                       goto out;
+                       }
+               } else {
+                       if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+                               ret = wl1271_cmd_stop_bss(wl);
+                               if (ret < 0)
+                                       goto out;
+
+                               clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               wl1271_debug(DEBUG_AP, "stopped AP");
+                       }
                }
        }
 
-       if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-           (wl->bss_type == BSS_TYPE_IBSS)) {
+       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       if (ret < 0)
+               goto out;
+out:
+       return;
+}
+
+/* STA/IBSS mode changes */
+static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_bss_conf *bss_conf,
+                                       u32 changed)
+{
+       bool do_join = false, set_assoc = false;
+       bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+       u32 sta_rate_set = 0;
+       int ret;
+       struct ieee80211_sta *sta;
+       bool sta_exists = false;
+       struct ieee80211_sta_ht_cap sta_ht_cap;
+
+       if (is_ibss) {
+               ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
+                                                    changed);
+               if (ret < 0)
+                       goto out;
+       }
+
+       if ((changed & BSS_CHANGED_BEACON_INT)  && is_ibss)
+               do_join = true;
+
+       /* Need to update the SSID (for filtering etc) */
+       if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+               do_join = true;
+
+       if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
                wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
                             bss_conf->enable_beacon ? "enabled" : "disabled");
 
@@ -1924,7 +2310,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                do_join = true;
        }
 
-       if (changed & BSS_CHANGED_CQM) {
+       if ((changed & BSS_CHANGED_CQM)) {
                bool enable = false;
                if (bss_conf->cqm_rssi_thold)
                        enable = true;
@@ -1942,24 +2328,70 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
             * and enable the BSSID filter
             */
            memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
-                       memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+               memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
 
+               if (!is_zero_ether_addr(wl->bssid)) {
                        ret = wl1271_cmd_build_null_data(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        ret = wl1271_build_qos_null_data(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* filter out all packets not from this BSSID */
                        wl1271_configure_filters(wl, 0);
 
                        /* Need to update the BSSID (for filtering etc) */
                        do_join = true;
+               }
        }
 
-       if (changed & BSS_CHANGED_ASSOC) {
+       rcu_read_lock();
+       sta = ieee80211_find_sta(vif, bss_conf->bssid);
+       if (sta)  {
+               /* save the supp_rates of the ap */
+               sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
+               if (sta->ht_cap.ht_supported)
+                       sta_rate_set |=
+                           (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+               sta_ht_cap = sta->ht_cap;
+               sta_exists = true;
+       }
+       rcu_read_unlock();
+
+       if (sta_exists) {
+               /* handle new association with HT and HT information change */
+               if ((changed & BSS_CHANGED_HT) &&
+                   (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+                       ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+                                                            true);
+                       if (ret < 0) {
+                               wl1271_warning("Set ht cap true failed %d",
+                                              ret);
+                               goto out;
+                       }
+                       ret = wl1271_acx_set_ht_information(wl,
+                                               bss_conf->ht_operation_mode);
+                       if (ret < 0) {
+                               wl1271_warning("Set ht information failed %d",
+                                              ret);
+                               goto out;
+                       }
+               }
+               /* handle new association without HT and disassociation */
+               else if (changed & BSS_CHANGED_ASSOC) {
+                       ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+                                                            false);
+                       if (ret < 0) {
+                               wl1271_warning("Set ht cap false failed %d",
+                                              ret);
+                               goto out;
+                       }
+               }
+       }
+
+       if ((changed & BSS_CHANGED_ASSOC)) {
                if (bss_conf->assoc) {
                        u32 rates;
                        int ieoffset;
@@ -1975,10 +2407,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        rates = bss_conf->basic_rates;
                        wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
                                                                         rates);
-                       wl->basic_rate = wl1271_min_rate_get(wl);
-                       ret = wl1271_acx_rate_policies(wl);
+                       wl->basic_rate = wl1271_tx_min_rate_get(wl);
+                       if (sta_rate_set)
+                               wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+                                                               sta_rate_set);
+                       ret = wl1271_acx_sta_rate_policies(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /*
                         * with wl1271, we don't need to update the
@@ -1988,7 +2423,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                         */
                        ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /*
                         * Get a template for hardware connection maintenance
@@ -2002,17 +2437,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        /* enable the connection monitoring feature */
                        ret = wl1271_acx_conn_monit_params(wl, true);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* If we want to go in PSM but we're not there yet */
                        if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
                            !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+                               enum wl1271_cmd_ps_mode mode;
+
                                mode = STATION_POWER_SAVE_MODE;
                                ret = wl1271_ps_set_mode(wl, mode,
                                                         wl->basic_rate,
                                                         true);
                                if (ret < 0)
-                                       goto out_sleep;
+                                       goto out;
                        }
                } else {
                        /* use defaults when not associated */
@@ -2029,10 +2466,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 
                        /* revert back to minimum rates for the current band */
                        wl1271_set_band_rate(wl);
-                       wl->basic_rate = wl1271_min_rate_get(wl);
-                       ret = wl1271_acx_rate_policies(wl);
+                       wl->basic_rate = wl1271_tx_min_rate_get(wl);
+                       ret = wl1271_acx_sta_rate_policies(wl);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* disable connection monitor features */
                        ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2477,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        /* Disable the keep-alive feature */
                        ret = wl1271_acx_keep_alive_mode(wl, false);
                        if (ret < 0)
-                               goto out_sleep;
+                               goto out;
 
                        /* restore the bssid filter and go to dummy bssid */
                        wl1271_unjoin(wl);
                        wl1271_dummy_join(wl);
                }
-
-       }
-
-       if (changed & BSS_CHANGED_ERP_SLOT) {
-               if (bss_conf->use_short_slot)
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
-               else
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
-               if (ret < 0) {
-                       wl1271_warning("Set slot time failed %d", ret);
-                       goto out_sleep;
-               }
-       }
-
-       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-               if (bss_conf->use_short_preamble)
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
-               else
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
-       }
-
-       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-               if (bss_conf->use_cts_prot)
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
-               else
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
-               if (ret < 0) {
-                       wl1271_warning("Set ctsprotect failed %d", ret);
-                       goto out_sleep;
-               }
        }
 
-       /*
-        * Takes care of: New association with HT enable,
-        *                HT information change in beacon.
-        */
-       if (sta &&
-           (changed & BSS_CHANGED_HT) &&
-           (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-               ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
-               if (ret < 0) {
-                       wl1271_warning("Set ht cap true failed %d", ret);
-                       goto out_sleep;
-               }
-                       ret = wl1271_acx_set_ht_information(wl,
-                               bss_conf->ht_operation_mode);
-               if (ret < 0) {
-                       wl1271_warning("Set ht information failed %d", ret);
-                       goto out_sleep;
-               }
-       }
-       /*
-        * Takes care of: New association without HT,
-        *                Disassociation.
-        */
-       else if (sta && (changed & BSS_CHANGED_ASSOC)) {
-               ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
-               if (ret < 0) {
-                       wl1271_warning("Set ht cap false failed %d", ret);
-                       goto out_sleep;
-               }
-       }
+       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       if (ret < 0)
+               goto out;
 
        if (changed & BSS_CHANGED_ARP_FILTER) {
                __be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,76 +2504,128 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                        ret = wl1271_cmd_build_arp_rsp(wl, addr);
                        if (ret < 0) {
                                wl1271_warning("build arp rsp failed: %d", ret);
-                               goto out_sleep;
+                               goto out;
                        }
 
                        ret = wl1271_acx_arp_ip_filter(wl,
-                               (ACX_ARP_FILTER_ARP_FILTERING |
-                                ACX_ARP_FILTER_AUTO_ARP),
+                               ACX_ARP_FILTER_ARP_FILTERING,
                                addr);
                } else
                        ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
 
                if (ret < 0)
-                       goto out_sleep;
+                       goto out;
        }
 
        if (do_join) {
                ret = wl1271_join(wl, set_assoc);
                if (ret < 0) {
                        wl1271_warning("cmd join failed %d", ret);
-                       goto out_sleep;
+                       goto out;
                }
        }
 
-out_sleep:
-       wl1271_ps_elp_sleep(wl);
-
 out:
-       mutex_unlock(&wl->mutex);
+       return;
 }
 
-static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
-                            const struct ieee80211_tx_queue_params *params)
+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_bss_conf *bss_conf,
+                                      u32 changed)
 {
        struct wl1271 *wl = hw->priv;
-       u8 ps_scheme;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
        int ret;
 
-       mutex_lock(&wl->mutex);
+       wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
+                    (int)changed);
 
-       wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+       mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               ret = -EAGAIN;
+       if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
-       }
 
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out;
 
-       /* the txop is confed in units of 32us by the mac80211, we need us */
-       ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
-                               params->cw_min, params->cw_max,
-                               params->aifs, params->txop << 5);
-       if (ret < 0)
-               goto out_sleep;
+       if (is_ap)
+               wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
+       else
+               wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
+
+       wl1271_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+}
+
+static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+                            const struct ieee80211_tx_queue_params *params)
+{
+       struct wl1271 *wl = hw->priv;
+       u8 ps_scheme;
+       int ret = 0;
+
+       mutex_lock(&wl->mutex);
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
 
        if (params->uapsd)
                ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
        else
                ps_scheme = CONF_PS_SCHEME_LEGACY;
 
-       ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
-                                CONF_CHANNEL_TYPE_EDCF,
-                                wl1271_tx_get_queue(queue),
-                                ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
-       if (ret < 0)
-               goto out_sleep;
+       if (wl->state == WL1271_STATE_OFF) {
+               /*
+                * If the state is off, the parameters will be recorded and
+                * configured on init. This happens in AP-mode.
+                */
+               struct conf_tx_ac_category *conf_ac =
+                       &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
+               struct conf_tx_tid *conf_tid =
+                       &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
+
+               conf_ac->ac = wl1271_tx_get_queue(queue);
+               conf_ac->cw_min = (u8)params->cw_min;
+               conf_ac->cw_max = params->cw_max;
+               conf_ac->aifsn = params->aifs;
+               conf_ac->tx_op_limit = params->txop << 5;
+
+               conf_tid->queue_id = wl1271_tx_get_queue(queue);
+               conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
+               conf_tid->tsid = wl1271_tx_get_queue(queue);
+               conf_tid->ps_scheme = ps_scheme;
+               conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
+               conf_tid->apsd_conf[0] = 0;
+               conf_tid->apsd_conf[1] = 0;
+       } else {
+               ret = wl1271_ps_elp_wakeup(wl, false);
+               if (ret < 0)
+                       goto out;
+
+               /*
+                * the txop is confed in units of 32us by the mac80211,
+                * we need us
+                */
+               ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+                                       params->cw_min, params->cw_max,
+                                       params->aifs, params->txop << 5);
+               if (ret < 0)
+                       goto out_sleep;
+
+               ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+                                        CONF_CHANNEL_TYPE_EDCF,
+                                        wl1271_tx_get_queue(queue),
+                                        ps_scheme, CONF_ACK_POLICY_LEGACY,
+                                        0, 0);
+               if (ret < 0)
+                       goto out_sleep;
 
 out_sleep:
-       wl1271_ps_elp_sleep(wl);
+               wl1271_ps_elp_sleep(wl);
+       }
 
 out:
        mutex_unlock(&wl->mutex);
@@ -2247,6 +2679,184 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
        return 0;
 }
 
+static int wl1271_allocate_sta(struct wl1271 *wl,
+                            struct ieee80211_sta *sta,
+                            u8 *hlid)
+{
+       struct wl1271_station *wl_sta;
+       int id;
+
+       id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
+       if (id >= AP_MAX_STATIONS) {
+               wl1271_warning("could not allocate HLID - too much stations");
+               return -EBUSY;
+       }
+
+       wl_sta = (struct wl1271_station *)sta->drv_priv;
+       __set_bit(id, wl->ap_hlid_map);
+       wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
+       *hlid = wl_sta->hlid;
+       memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
+       return 0;
+}
+
+static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+{
+       int id = hlid - WL1271_AP_STA_HLID_START;
+
+       if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+               return;
+
+       __clear_bit(id, wl->ap_hlid_map);
+       memset(wl->links[hlid].addr, 0, ETH_ALEN);
+       wl1271_tx_reset_link_queues(wl, hlid);
+       __clear_bit(hlid, &wl->ap_ps_map);
+       __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+}
+
+static int wl1271_op_sta_add(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta)
+{
+       struct wl1271 *wl = hw->priv;
+       int ret = 0;
+       u8 hlid;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS)
+               goto out;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
+
+       ret = wl1271_allocate_sta(wl, sta, &hlid);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl, false);
+       if (ret < 0)
+               goto out_free_sta;
+
+       ret = wl1271_cmd_add_sta(wl, sta, hlid);
+       if (ret < 0)
+               goto out_sleep;
+
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+
+out_free_sta:
+       if (ret < 0)
+               wl1271_free_sta(wl, hlid);
+
+out:
+       mutex_unlock(&wl->mutex);
+       return ret;
+}
+
+static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl1271_station *wl_sta;
+       int ret = 0, id;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS)
+               goto out;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+       wl_sta = (struct wl1271_station *)sta->drv_priv;
+       id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
+       if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl, false);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+       if (ret < 0)
+               goto out_sleep;
+
+       wl1271_free_sta(wl, wl_sta->hlid);
+
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+       return ret;
+}
+
+int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                          enum ieee80211_ampdu_mlme_action action,
+                          struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                          u8 buf_size)
+{
+       struct wl1271 *wl = hw->priv;
+       int ret;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl, false);
+       if (ret < 0)
+               goto out;
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               if (wl->ba_support) {
+                       ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
+                                                                true);
+                       if (!ret)
+                               wl->ba_rx_bitmap |= BIT(tid);
+               } else {
+                       ret = -ENOTSUPP;
+               }
+               break;
+
+       case IEEE80211_AMPDU_RX_STOP:
+               ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
+               if (!ret)
+                       wl->ba_rx_bitmap &= ~BIT(tid);
+               break;
+
+       /*
+        * The BA initiator session management in FW independently.
+        * Falling break here on purpose for all TX APDU commands.
+        */
+       case IEEE80211_AMPDU_TX_START:
+       case IEEE80211_AMPDU_TX_STOP:
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ret = -EINVAL;
+               break;
+
+       default:
+               wl1271_error("Incorrect ampdu action id=%x\n", action);
+               ret = -EINVAL;
+       }
+
+       wl1271_ps_elp_sleep(wl);
+
+out:
+       mutex_unlock(&wl->mutex);
+
+       return ret;
+}
+
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_rate wl1271_rates[] = {
        { .bitrate = 10,
@@ -2305,6 +2915,7 @@ static struct ieee80211_channel wl1271_channels[] = {
        { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
        { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
        { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
+       { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
 };
 
 /* mapping to indexes for wl1271_rates */
@@ -2493,6 +3104,9 @@ static const struct ieee80211_ops wl1271_ops = {
        .conf_tx = wl1271_op_conf_tx,
        .get_tsf = wl1271_op_get_tsf,
        .get_survey = wl1271_op_get_survey,
+       .sta_add = wl1271_op_sta_add,
+       .sta_remove = wl1271_op_sta_remove,
+       .ampdu_action = wl1271_op_ampdu_action,
        CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
 };
 
@@ -2607,6 +3221,18 @@ int wl1271_register_hw(struct wl1271 *wl)
        if (wl->mac80211_registered)
                return 0;
 
+       ret = wl1271_fetch_nvs(wl);
+       if (ret == 0) {
+               u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+               wl->mac_addr[0] = nvs_ptr[11];
+               wl->mac_addr[1] = nvs_ptr[10];
+               wl->mac_addr[2] = nvs_ptr[6];
+               wl->mac_addr[3] = nvs_ptr[5];
+               wl->mac_addr[4] = nvs_ptr[4];
+               wl->mac_addr[5] = nvs_ptr[3];
+       }
+
        SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
 
        ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3255,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
 
 void wl1271_unregister_hw(struct wl1271 *wl)
 {
+       if (wl->state == WL1271_STATE_PLT)
+               __wl1271_plt_stop(wl);
+
        unregister_netdevice_notifier(&wl1271_dev_notifier);
        ieee80211_unregister_hw(wl->hw);
        wl->mac80211_registered = false;
@@ -2661,13 +3290,15 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
                IEEE80211_HW_SUPPORTS_UAPSD |
                IEEE80211_HW_HAS_RATE_CONTROL |
                IEEE80211_HW_CONNECTION_MONITOR |
-               IEEE80211_HW_SUPPORTS_CQM_RSSI;
+               IEEE80211_HW_SUPPORTS_CQM_RSSI |
+               IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+               IEEE80211_HW_AP_LINK_PS;
 
        wl->hw->wiphy->cipher_suites = cipher_suites;
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
        wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC);
+               BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
        wl->hw->wiphy->max_scan_ssids = 1;
        /*
         * Maximum length of elements in scanning probe request templates
@@ -2676,8 +3307,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
         */
        wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
                        sizeof(struct ieee80211_header);
-       wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
-       wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
+       /*
+        * We keep local copies of the band structs because we need to
+        * modify them on a per-device basis.
+        */
+       memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+              sizeof(wl1271_band_2ghz));
+       memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+              sizeof(wl1271_band_5ghz));
+
+       wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+               &wl->bands[IEEE80211_BAND_2GHZ];
+       wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+               &wl->bands[IEEE80211_BAND_5GHZ];
 
        wl->hw->queues = 4;
        wl->hw->max_rates = 1;
@@ -2686,6 +3329,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
 
        SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
 
+       wl->hw->sta_data_size = sizeof(struct wl1271_station);
+
+       wl->hw->max_rx_aggregation_subframes = 8;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2697,7 +3344,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        struct ieee80211_hw *hw;
        struct platform_device *plat_dev = NULL;
        struct wl1271 *wl;
-       int i, ret;
+       int i, j, ret;
        unsigned int order;
 
        hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -2725,6 +3372,10 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        for (i = 0; i < NUM_TX_QUEUES; i++)
                skb_queue_head_init(&wl->tx_queue[i]);
 
+       for (i = 0; i < NUM_TX_QUEUES; i++)
+               for (j = 0; j < AP_MAX_LINKS; j++)
+                       skb_queue_head_init(&wl->links[j].tx_queue[i]);
+
        INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
        INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
        INIT_WORK(&wl->irq_work, wl1271_irq_work);
@@ -2735,19 +3386,24 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
        wl->default_key = 0;
        wl->rx_counter = 0;
-       wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-       wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+       wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+       wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
        wl->psm_entry_retry = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
        wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
        wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
        wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->sta_rate_set = 0;
        wl->band = IEEE80211_BAND_2GHZ;
        wl->vif = NULL;
        wl->flags = 0;
        wl->sg_enabled = true;
        wl->hw_pg_ver = -1;
+       wl->bss_type = MAX_BSS_TYPE;
+       wl->set_bss_type = MAX_BSS_TYPE;
+       wl->fw_bss_type = MAX_BSS_TYPE;
+       wl->last_tx_hlid = 0;
+       wl->ap_ps_map = 0;
+       wl->ap_fw_ps_map = 0;
 
        memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
        for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2837,11 +3493,11 @@ int wl1271_free_hw(struct wl1271 *wl)
 }
 EXPORT_SYMBOL_GPL(wl1271_free_hw);
 
-u32 wl12xx_debug_level;
+u32 wl12xx_debug_level = DEBUG_NONE;
 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
-module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE);
+module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
index 60a3738eadb0c2e6cf99bbce22be10722cec5633..5c347b1bd17faae15f9150fb3a50ecfec370e264 100644 (file)
@@ -24,6 +24,7 @@
 #include "reg.h"
 #include "ps.h"
 #include "io.h"
+#include "tx.h"
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
@@ -139,8 +140,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
                        return ret;
                }
 
-               ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE,
-                                        rates, send);
+               ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
                if (ret < 0)
                        return ret;
 
@@ -163,8 +163,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
                if (ret < 0)
                        return ret;
 
-               ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE,
-                                        rates, send);
+               ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
                if (ret < 0)
                        return ret;
 
@@ -175,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
        return ret;
 }
 
+static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
+{
+       int i, filtered = 0;
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       unsigned long flags;
+
+       /* filter all frames currently the low level queus for this hlid */
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+                       info = IEEE80211_SKB_CB(skb);
+                       info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+                       info->status.rates[0].idx = -1;
+                       ieee80211_tx_status(wl->hw, skb);
+                       filtered++;
+               }
+       }
+
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       wl->tx_queue_count -= filtered;
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       wl1271_handle_tx_low_watermark(wl);
+}
+
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+{
+       struct ieee80211_sta *sta;
+
+       if (test_bit(hlid, &wl->ap_ps_map))
+               return;
+
+       wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
+                    "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
+                    clean_queues);
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       if (!sta) {
+               wl1271_error("could not find sta %pM for starting ps",
+                            wl->links[hlid].addr);
+               rcu_read_unlock();
+               return;
+       }
 
+       ieee80211_sta_ps_transition_ni(sta, true);
+       rcu_read_unlock();
+
+       /* do we want to filter all frames from this link's queues? */
+       if (clean_queues)
+               wl1271_ps_filter_frames(wl, hlid);
+
+       __set_bit(hlid, &wl->ap_ps_map);
+}
+
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+{
+       struct ieee80211_sta *sta;
+
+       if (!test_bit(hlid, &wl->ap_ps_map))
+               return;
+
+       wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
+
+       __clear_bit(hlid, &wl->ap_ps_map);
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       if (!sta) {
+               wl1271_error("could not find sta %pM for ending ps",
+                            wl->links[hlid].addr);
+               goto end;
+       }
+
+       ieee80211_sta_ps_transition_ni(sta, false);
+end:
+       rcu_read_unlock();
+}
index 8415060f08e5023e1233a131ba46e65df962c7aa..fc1f4c193593ee955267bb985b08765ca248a5b2 100644 (file)
@@ -32,5 +32,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
 int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
 void wl1271_elp_work(struct work_struct *work);
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
 
 #endif /* __WL1271_PS_H__ */
index 682304c30b81e208b15cc3dda2b6b8f43cf4a4c8..3d13d7a83ea1eeeacc776429e48b8daa0902ab55 100644 (file)
 #include "rx.h"
 #include "io.h"
 
-static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
+static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
                                  u32 drv_rx_counter)
 {
        return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
                RX_MEM_BLOCK_MASK;
 }
 
-static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
+static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
                                 u32 drv_rx_counter)
 {
        return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
         */
        wl->noise = desc->rssi - (desc->snr >> 1);
 
-       status->freq = ieee80211_channel_to_frequency(desc->channel);
+       status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
 
        if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
                status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -92,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
 {
        struct wl1271_rx_descriptor *desc;
        struct sk_buff *skb;
-       u16 *fc;
+       struct ieee80211_hdr *hdr;
        u8 *buf;
        u8 beacon = 0;
 
@@ -118,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
        /* now we pull the descriptor out of the buffer */
        skb_pull(skb, sizeof(*desc));
 
-       fc = (u16 *)skb->data;
-       if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
+       hdr = (struct ieee80211_hdr *)skb->data;
+       if (ieee80211_is_beacon(hdr->frame_control))
                beacon = 1;
 
        wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
@@ -134,7 +134,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
        return 0;
 }
 
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
 {
        struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
        u32 buf_size;
@@ -198,6 +198,16 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
                        pkt_offset += pkt_length;
                }
        }
-       wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
-                       cpu_to_le32(wl->rx_counter));
+       wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+}
+
+void wl1271_set_default_filters(struct wl1271 *wl)
+{
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
+               wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
+       } else {
+               wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+               wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
+       }
 }
index 3abb26fe0364f3d5e66c76d2ec7000a8b056f4ea..75fabf83649137f959201aa0117be827c5fa54bd 100644 (file)
 #define WL1271_RX_MAX_RSSI -30
 #define WL1271_RX_MIN_RSSI -95
 
-#define WL1271_RX_ALIGN_TO 4
-#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
-                            ~(WL1271_RX_ALIGN_TO - 1))
-
 #define SHORT_PREAMBLE_BIT   BIT(0)
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
@@ -86,8 +82,9 @@
 /*
  * RX Descriptor status
  *
- * Bits 0-2 - status
- * Bits 3-7 - reserved
+ * Bits 0-2 - error code
+ * Bits 3-5 - process_id tag (AP mode FW)
+ * Bits 6-7 - reserved
  */
 #define WL1271_RX_DESC_STATUS_MASK      0x07
 
@@ -110,12 +107,16 @@ struct wl1271_rx_descriptor {
        u8  snr;
        __le32 timestamp;
        u8  packet_class;
-       u8  process_id;
+       union {
+               u8  process_id; /* STA FW */
+               u8  hlid; /* AP FW */
+       } __packed;
        u8  pad_len;
        u8  reserved;
 } __packed;
 
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+void wl1271_set_default_filters(struct wl1271 *wl);
 
 #endif
index 93cbb8d5aba9f7bcbc6630dd1784f7363b99f7c9..d5e87482506901b131743cb2db57c030d58ecdbc 100644 (file)
@@ -345,3 +345,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
 MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
index 7145ea5437832f06746063fba80909c2191d5d35..0132dad756c4c1bd73eb95738d505f9ba35febc4 100644 (file)
@@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl)
        spi_message_add_tail(&t, &m);
 
        spi_sync(wl_to_spi(wl), &m);
+
        wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
        kfree(cmd);
 }
@@ -494,4 +495,5 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
 MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
 MODULE_ALIAS("spi:wl1271");
index b44c75cd8c1e714d8e4f2cf83c8099475fca795b..ac60d577319f95887e019d4bd06731a7c1ede6c7 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/etherdevice.h>
 
 #include "wl12xx.h"
 #include "io.h"
 #include "ps.h"
 #include "tx.h"
 
+static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+{
+       int ret;
+       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+       if (is_ap)
+               ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+       else
+               ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+
+       if (ret < 0)
+               return ret;
+
+       wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
+       return 0;
+}
+
 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
 {
        int id;
@@ -52,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
        }
 }
 
+static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+                                                struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+
+       /*
+        * add the station to the known list before transmitting the
+        * authentication response. this way it won't get de-authed by FW
+        * when transmitting too soon.
+        */
+       hdr = (struct ieee80211_hdr *)(skb->data +
+                                      sizeof(struct wl1271_tx_hw_descr));
+       if (ieee80211_is_auth(hdr->frame_control))
+               wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+}
+
+static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+{
+       bool fw_ps;
+       u8 tx_blks;
+
+       /* only regulate station links */
+       if (hlid < WL1271_AP_STA_HLID_START)
+               return;
+
+       fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+       tx_blks = wl->links[hlid].allocated_blks;
+
+       /*
+        * if in FW PS and there is enough data in FW we can put the link
+        * into high-level PS and clean out its TX queues.
+        */
+       if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+               wl1271_ps_link_start(wl, hlid, true);
+}
+
+u8 wl1271_tx_get_hlid(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
+
+       if (control->control.sta) {
+               struct wl1271_station *wl_sta;
+
+               wl_sta = (struct wl1271_station *)
+                               control->control.sta->drv_priv;
+               return wl_sta->hlid;
+       } else {
+               struct ieee80211_hdr *hdr;
+
+               hdr = (struct ieee80211_hdr *)skb->data;
+               if (ieee80211_is_mgmt(hdr->frame_control))
+                       return WL1271_AP_GLOBAL_HLID;
+               else
+                       return WL1271_AP_BROADCAST_HLID;
+       }
+}
+
 static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
-                               u32 buf_offset)
+                               u32 buf_offset, u8 hlid)
 {
        struct wl1271_tx_hw_descr *desc;
        u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -82,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
 
                wl->tx_blocks_available -= total_blocks;
 
+               if (wl->bss_type == BSS_TYPE_AP_BSS)
+                       wl->links[hlid].allocated_blks += total_blocks;
+
                ret = 0;
 
                wl1271_debug(DEBUG_TX,
@@ -95,11 +173,12 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
 }
 
 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
-                             u32 extra, struct ieee80211_tx_info *control)
+                             u32 extra, struct ieee80211_tx_info *control,
+                             u8 hlid)
 {
        struct timespec ts;
        struct wl1271_tx_hw_descr *desc;
-       int pad, ac;
+       int pad, ac, rate_idx;
        s64 hosttime;
        u16 tx_attr;
 
@@ -117,7 +196,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        getnstimeofday(&ts);
        hosttime = (timespec_to_ns(&ts) >> 10);
        desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
-       desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS)
+               desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+       else
+               desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
 
        /* configure the tx attributes */
        tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
@@ -125,25 +208,49 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        /* queue (we use same identifiers for tid's and ac's */
        ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
        desc->tid = ac;
-       desc->aid = TX_HW_DEFAULT_AID;
+
+       if (wl->bss_type != BSS_TYPE_AP_BSS) {
+               desc->aid = hlid;
+
+               /* if the packets are destined for AP (have a STA entry)
+                  send them with AP rate policies, otherwise use default
+                  basic rates */
+               if (control->control.sta)
+                       rate_idx = ACX_TX_AP_FULL_RATE;
+               else
+                       rate_idx = ACX_TX_BASIC_RATE;
+       } else {
+               desc->hlid = hlid;
+               switch (hlid) {
+               case WL1271_AP_GLOBAL_HLID:
+                       rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
+                       break;
+               case WL1271_AP_BROADCAST_HLID:
+                       rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+                       break;
+               default:
+                       rate_idx = ac;
+                       break;
+               }
+       }
+
+       tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
        desc->reserved = 0;
 
        /* align the length (and store in terms of words) */
-       pad = WL1271_TX_ALIGN(skb->len);
+       pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
        desc->length = cpu_to_le16(pad >> 2);
 
        /* calculate number of padding bytes */
        pad = pad - skb->len;
        tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
 
-       /* if the packets are destined for AP (have a STA entry) send them
-          with AP rate policies, otherwise use default basic rates */
-       if (control->control.sta)
-               tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
-
        desc->tx_attr = cpu_to_le16(tx_attr);
 
-       wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
+       wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
+               "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
+               le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
+               le16_to_cpu(desc->life_time), desc->total_mem_blocks);
 }
 
 /* caller must hold wl->mutex */
@@ -153,8 +260,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
-       u8 idx;
        u32 total_len;
+       u8 hlid;
 
        if (!skb)
                return -EINVAL;
@@ -166,29 +273,43 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
                extra = WL1271_TKIP_IV_SPACE;
 
        if (info->control.hw_key) {
-               idx = info->control.hw_key->hw_key_idx;
+               bool is_wep;
+               u8 idx = info->control.hw_key->hw_key_idx;
+               u32 cipher = info->control.hw_key->cipher;
+
+               is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
+                        (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-               /* FIXME: do we have to do this if we're not using WEP? */
-               if (unlikely(wl->default_key != idx)) {
-                       ret = wl1271_cmd_set_default_wep_key(wl, idx);
+               if (unlikely(is_wep && wl->default_key != idx)) {
+                       ret = wl1271_set_default_wep_key(wl, idx);
                        if (ret < 0)
                                return ret;
                        wl->default_key = idx;
                }
        }
 
-       ret = wl1271_tx_allocate(wl, skb, extra, buf_offset);
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               hlid = wl1271_tx_get_hlid(skb);
+       else
+               hlid = TX_HW_DEFAULT_AID;
+
+       ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
        if (ret < 0)
                return ret;
 
-       wl1271_tx_fill_hdr(wl, skb, extra, info);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               wl1271_tx_ap_update_inconnection_sta(wl, skb);
+               wl1271_tx_regulate_link(wl, hlid);
+       }
+
+       wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
 
        /*
         * The length of each packet is stored in terms of words. Thus, we must
         * pad the skb data to make sure its length is aligned.
         * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
         */
-       total_len = WL1271_TX_ALIGN(skb->len);
+       total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
        memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
        memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
 
@@ -222,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
        return enabled_rates;
 }
 
-static void handle_tx_low_watermark(struct wl1271 *wl)
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
 {
        unsigned long flags;
 
@@ -236,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
        }
 }
 
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
 {
        struct sk_buff *skb = NULL;
        unsigned long flags;
@@ -262,12 +383,69 @@ out:
        return skb;
 }
 
+static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+{
+       struct sk_buff *skb = NULL;
+       unsigned long flags;
+       int i, h, start_hlid;
+
+       /* start from the link after the last one */
+       start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+
+       /* dequeue according to AC, round robin on each link */
+       for (i = 0; i < AP_MAX_LINKS; i++) {
+               h = (start_hlid + i) % AP_MAX_LINKS;
+
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
+               if (skb)
+                       goto out;
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
+               if (skb)
+                       goto out;
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
+               if (skb)
+                       goto out;
+               skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
+               if (skb)
+                       goto out;
+       }
+
+out:
+       if (skb) {
+               wl->last_tx_hlid = h;
+               spin_lock_irqsave(&wl->wl_lock, flags);
+               wl->tx_queue_count--;
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
+       } else {
+               wl->last_tx_hlid = 0;
+       }
+
+       return skb;
+}
+
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+{
+       if (wl->bss_type == BSS_TYPE_AP_BSS)
+               return wl1271_ap_skb_dequeue(wl);
+
+       return wl1271_sta_skb_dequeue(wl);
+}
+
 static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 
-       skb_queue_head(&wl->tx_queue[q], skb);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               u8 hlid = wl1271_tx_get_hlid(skb);
+               skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
+
+               /* make sure we dequeue the same packet next time */
+               wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
+       } else {
+               skb_queue_head(&wl->tx_queue[q], skb);
+       }
+
        spin_lock_irqsave(&wl->wl_lock, flags);
        wl->tx_queue_count++;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -277,35 +455,13 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
 {
        struct sk_buff *skb;
        bool woken_up = false;
-       u32 sta_rates = 0;
        u32 buf_offset = 0;
        bool sent_packets = false;
        int ret;
 
-       /* check if the rates supported by the AP have changed */
-       if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
-                                       &wl->flags))) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               sta_rates = wl->sta_rate_set;
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-       }
-
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       /* if rates have changed, re-configure the rate policy */
-       if (unlikely(sta_rates)) {
-               ret = wl1271_ps_elp_wakeup(wl, false);
-               if (ret < 0)
-                       goto out;
-               woken_up = true;
-
-               wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
-               wl1271_acx_rate_policies(wl);
-       }
-
        while ((skb = wl1271_skb_dequeue(wl))) {
                if (!woken_up) {
                        ret = wl1271_ps_elp_wakeup(wl, false);
@@ -352,7 +508,7 @@ out_ack:
        if (sent_packets) {
                /* interrupt the firmware with the new packets */
                wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
-               handle_tx_low_watermark(wl);
+               wl1271_handle_tx_low_watermark(wl);
        }
 
 out:
@@ -469,32 +625,76 @@ void wl1271_tx_complete(struct wl1271 *wl)
        }
 }
 
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
+{
+       struct sk_buff *skb;
+       int i, total = 0;
+       unsigned long flags;
+       struct ieee80211_tx_info *info;
+
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+                       wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
+                       info = IEEE80211_SKB_CB(skb);
+                       info->status.rates[0].idx = -1;
+                       info->status.rates[0].count = 0;
+                       ieee80211_tx_status(wl->hw, skb);
+                       total++;
+               }
+       }
+
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       wl->tx_queue_count -= total;
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       wl1271_handle_tx_low_watermark(wl);
+}
+
 /* caller must hold wl->mutex */
 void wl1271_tx_reset(struct wl1271 *wl)
 {
        int i;
        struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
 
        /* TX failure */
-       for (i = 0; i < NUM_TX_QUEUES; i++) {
-               while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
-                       wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
-                       ieee80211_tx_status(wl->hw, skb);
+       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+               for (i = 0; i < AP_MAX_LINKS; i++) {
+                       wl1271_tx_reset_link_queues(wl, i);
+                       wl->links[i].allocated_blks = 0;
+                       wl->links[i].prev_freed_blks = 0;
+               }
+
+               wl->last_tx_hlid = 0;
+       } else {
+               for (i = 0; i < NUM_TX_QUEUES; i++) {
+                       while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
+                               wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
+                                            skb);
+                               info = IEEE80211_SKB_CB(skb);
+                               info->status.rates[0].idx = -1;
+                               info->status.rates[0].count = 0;
+                               ieee80211_tx_status(wl->hw, skb);
+                       }
                }
        }
+
        wl->tx_queue_count = 0;
 
        /*
         * Make sure the driver is at a consistent state, in case this
         * function is called from a context other than interface removal.
         */
-       handle_tx_low_watermark(wl);
+       wl1271_handle_tx_low_watermark(wl);
 
        for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
                if (wl->tx_frames[i] != NULL) {
                        skb = wl->tx_frames[i];
                        wl1271_free_tx_id(wl, i);
                        wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
+                       info = IEEE80211_SKB_CB(skb);
+                       info->status.rates[0].idx = -1;
+                       info->status.rates[0].count = 0;
                        ieee80211_tx_status(wl->hw, skb);
                }
 }
@@ -509,8 +709,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
 
        while (!time_after(jiffies, timeout)) {
                mutex_lock(&wl->mutex);
-               wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
-                            wl->tx_frames_cnt);
+               wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+                            wl->tx_frames_cnt, wl->tx_queue_count);
                if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
                        mutex_unlock(&wl->mutex);
                        return;
@@ -521,3 +721,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
 
        wl1271_warning("Unable to flush all TX buffers, timed out.");
 }
+
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+{
+       int i;
+       u32 rate = 0;
+
+       if (!wl->basic_rate_set) {
+               WARN_ON(1);
+               wl->basic_rate_set = wl->conf.tx.basic_rate;
+       }
+
+       for (i = 0; !rate; i++) {
+               if ((wl->basic_rate_set >> i) & 0x1)
+                       rate = 1 << i;
+       }
+
+       return rate;
+}
index 903e5dc69b7a2d0dd62b2c5f488b31f3071114a8..02f07fa66e820f12c0c8790f7ee34f42a2b10a71 100644 (file)
@@ -29,6 +29,7 @@
 #define TX_HW_BLOCK_SIZE                 252
 
 #define TX_HW_MGMT_PKT_LIFETIME_TU       2000
+#define TX_HW_AP_MODE_PKT_LIFETIME_TU    8000
 /* The chipset reference driver states, that the "aid" value 1
  * is for infra-BSS, but is still always used */
 #define TX_HW_DEFAULT_AID                1
@@ -52,8 +53,6 @@
 #define TX_HW_RESULT_QUEUE_LEN_MASK      0xf
 
 #define WL1271_TX_ALIGN_TO 4
-#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
-                            ~(WL1271_TX_ALIGN_TO - 1))
 #define WL1271_TKIP_IV_SPACE 4
 
 struct wl1271_tx_hw_descr {
@@ -77,8 +76,12 @@ struct wl1271_tx_hw_descr {
        u8 id;
        /* The packet TID value (as User-Priority) */
        u8 tid;
-       /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
-       u8 aid;
+       union {
+               /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
+               u8 aid;
+               /* AP - host link ID (HLID) */
+               u8 hlid;
+       } __packed;
        u8 reserved;
 } __packed;
 
@@ -146,5 +149,9 @@ void wl1271_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
+u8 wl1271_tx_get_hlid(struct sk_buff *skb);
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 
 #endif
index 9050dd9b62d2e9ec46d3860a54b7f6781249e78e..338acc9f60b305d48d1a07d0d7b2b54f7374937e 100644 (file)
 #define DRIVER_NAME "wl1271"
 #define DRIVER_PREFIX DRIVER_NAME ": "
 
+/*
+ * FW versions support BA 11n
+ * versions marks x.x.x.50-60.x
+ */
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_START    50
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_END      60
+
 enum {
        DEBUG_NONE      = 0,
        DEBUG_IRQ       = BIT(0),
@@ -57,6 +64,8 @@ enum {
        DEBUG_SDIO      = BIT(14),
        DEBUG_FILTERS   = BIT(15),
        DEBUG_ADHOC     = BIT(16),
+       DEBUG_AP        = BIT(17),
+       DEBUG_MASTER    = (DEBUG_ADHOC | DEBUG_AP),
        DEBUG_ALL       = ~0,
 };
 
@@ -103,16 +112,27 @@ extern u32 wl12xx_debug_level;
                                       true);                           \
        } while (0)
 
-#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN |  \
+#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN |      \
                                  CFG_BSSID_FILTER_EN | \
                                  CFG_MC_FILTER_EN)
 
-#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
+#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
                                  CFG_RX_MGMT_EN | CFG_RX_DATA_EN |   \
                                  CFG_RX_CTL_EN | CFG_RX_BCN_EN |     \
                                  CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
 
-#define WL1271_FW_NAME "wl1271-fw.bin"
+#define WL1271_DEFAULT_AP_RX_CONFIG  0
+
+#define WL1271_DEFAULT_AP_RX_FILTER  (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
+                                 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
+                                 CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
+                                 CFG_RX_ASSOC_EN)
+
+
+
+#define WL1271_FW_NAME "wl1271-fw-2.bin"
+#define WL1271_AP_FW_NAME "wl1271-fw-ap.bin"
+
 #define WL1271_NVS_NAME "wl1271-nvs.bin"
 
 #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
@@ -129,6 +149,25 @@ extern u32 wl12xx_debug_level;
 #define WL1271_DEFAULT_BEACON_INT  100
 #define WL1271_DEFAULT_DTIM_PERIOD 1
 
+#define WL1271_AP_GLOBAL_HLID      0
+#define WL1271_AP_BROADCAST_HLID   1
+#define WL1271_AP_STA_HLID_START   2
+
+/*
+ * When in AP-mode, we allow (at least) this number of mem-blocks
+ * to be transmitted to FW for a STA in PS-mode. Only when packets are
+ * present in the FW buffers it will wake the sleeping STA. We want to put
+ * enough packets for the driver to transmit all of its buffered data before
+ * the STA goes to sleep again. But we don't want to take too much mem-blocks
+ * as it might hurt the throughput of active STAs.
+ * The number of blocks (18) is enough for 2 large packets.
+ */
+#define WL1271_PS_STA_MAX_BLOCKS  (2 * 9)
+
+#define WL1271_AP_BSS_INDEX        0
+#define WL1271_AP_DEF_INACTIV_SEC  300
+#define WL1271_AP_DEF_BEACON_EXP   20
+
 #define ACX_TX_DESCRIPTORS         32
 
 #define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +200,13 @@ struct wl1271_partition_set {
 
 struct wl1271;
 
+#define WL12XX_NUM_FW_VER 5
+
 /* FIXME: I'm not sure about this structure name */
 struct wl1271_chip {
        u32 id;
-       char fw_ver[21];
+       char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+       unsigned int fw_ver[WL12XX_NUM_FW_VER];
 };
 
 struct wl1271_stats {
@@ -178,8 +220,13 @@ struct wl1271_stats {
 #define NUM_TX_QUEUES              4
 #define NUM_RX_PKT_DESC            8
 
-/* FW status registers */
-struct wl1271_fw_status {
+#define AP_MAX_STATIONS            5
+
+/* Broadcast and Global links + links to stations */
+#define AP_MAX_LINKS               (AP_MAX_STATIONS + 2)
+
+/* FW status registers common for AP/STA */
+struct wl1271_fw_common_status {
        __le32 intr;
        u8  fw_rx_counter;
        u8  drv_rx_counter;
@@ -188,9 +235,43 @@ struct wl1271_fw_status {
        __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
        __le32 tx_released_blks[NUM_TX_QUEUES];
        __le32 fw_localtime;
-       __le32 padding[2];
 } __packed;
 
+/* FW status registers for AP */
+struct wl1271_fw_ap_status {
+       struct wl1271_fw_common_status common;
+
+       /* Next fields valid only in AP FW */
+
+       /*
+        * A bitmap (where each bit represents a single HLID)
+        * to indicate if the station is in PS mode.
+        */
+       __le32 link_ps_bitmap;
+
+       /* Number of freed MBs per HLID */
+       u8 tx_lnk_free_blks[AP_MAX_LINKS];
+       u8 padding_1[1];
+} __packed;
+
+/* FW status registers for STA */
+struct wl1271_fw_sta_status {
+       struct wl1271_fw_common_status common;
+
+       u8  tx_total;
+       u8  reserved1;
+       __le16 reserved2;
+} __packed;
+
+struct wl1271_fw_full_status {
+       union {
+               struct wl1271_fw_common_status common;
+               struct wl1271_fw_sta_status sta;
+               struct wl1271_fw_ap_status ap;
+       };
+} __packed;
+
+
 struct wl1271_rx_mem_pool_addr {
        u32 addr;
        u32 addr_extra;
@@ -218,6 +299,48 @@ struct wl1271_if_operations {
        void (*disable_irq)(struct wl1271 *wl);
 };
 
+#define MAX_NUM_KEYS 14
+#define MAX_KEY_SIZE 32
+
+struct wl1271_ap_key {
+       u8 id;
+       u8 key_type;
+       u8 key_size;
+       u8 key[MAX_KEY_SIZE];
+       u8 hlid;
+       u32 tx_seq_32;
+       u16 tx_seq_16;
+};
+
+enum wl12xx_flags {
+       WL1271_FLAG_STA_ASSOCIATED,
+       WL1271_FLAG_JOINED,
+       WL1271_FLAG_GPIO_POWER,
+       WL1271_FLAG_TX_QUEUE_STOPPED,
+       WL1271_FLAG_IN_ELP,
+       WL1271_FLAG_PSM,
+       WL1271_FLAG_PSM_REQUESTED,
+       WL1271_FLAG_IRQ_PENDING,
+       WL1271_FLAG_IRQ_RUNNING,
+       WL1271_FLAG_IDLE,
+       WL1271_FLAG_IDLE_REQUESTED,
+       WL1271_FLAG_PSPOLL_FAILURE,
+       WL1271_FLAG_STA_STATE_SENT,
+       WL1271_FLAG_FW_TX_BUSY,
+       WL1271_FLAG_AP_STARTED
+};
+
+struct wl1271_link {
+       /* AP-mode - TX queue per AC in link */
+       struct sk_buff_head tx_queue[NUM_TX_QUEUES];
+
+       /* accounting for allocated / available TX blocks in FW */
+       u8 allocated_blks;
+       u8 prev_freed_blks;
+
+       u8 addr[ETH_ALEN];
+};
+
 struct wl1271 {
        struct platform_device *plat_dev;
        struct ieee80211_hw *hw;
@@ -236,21 +359,6 @@ struct wl1271 {
        enum wl1271_state state;
        struct mutex mutex;
 
-#define WL1271_FLAG_STA_RATES_CHANGED  (0)
-#define WL1271_FLAG_STA_ASSOCIATED     (1)
-#define WL1271_FLAG_JOINED             (2)
-#define WL1271_FLAG_GPIO_POWER         (3)
-#define WL1271_FLAG_TX_QUEUE_STOPPED   (4)
-#define WL1271_FLAG_IN_ELP             (5)
-#define WL1271_FLAG_PSM                (6)
-#define WL1271_FLAG_PSM_REQUESTED      (7)
-#define WL1271_FLAG_IRQ_PENDING        (8)
-#define WL1271_FLAG_IRQ_RUNNING        (9)
-#define WL1271_FLAG_IDLE              (10)
-#define WL1271_FLAG_IDLE_REQUESTED    (11)
-#define WL1271_FLAG_PSPOLL_FAILURE    (12)
-#define WL1271_FLAG_STA_STATE_SENT    (13)
-#define WL1271_FLAG_FW_TX_BUSY        (14)
        unsigned long flags;
 
        struct wl1271_partition_set part;
@@ -262,6 +370,7 @@ struct wl1271 {
 
        u8 *fw;
        size_t fw_len;
+       u8 fw_bss_type;
        struct wl1271_nvs_file *nvs;
        size_t nvs_len;
 
@@ -343,7 +452,6 @@ struct wl1271 {
         *      bits 16-23 - 802.11n   MCS index mask
         * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
         */
-       u32 sta_rate_set;
        u32 basic_rate_set;
        u32 basic_rate;
        u32 rate_set;
@@ -378,13 +486,12 @@ struct wl1271 {
        int last_rssi_event;
 
        struct wl1271_stats stats;
-       struct dentry *rootdir;
 
        __le32 buffer_32;
        u32 buffer_cmd;
        u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 
-       struct wl1271_fw_status *fw_status;
+       struct wl1271_fw_full_status *fw_status;
        struct wl1271_tx_hw_res_if *tx_res_if;
 
        struct ieee80211_vif *vif;
@@ -400,6 +507,38 @@ struct wl1271 {
 
        /* Most recently reported noise in dBm */
        s8 noise;
+
+       /* map for HLIDs of associated stations - when operating in AP mode */
+       unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
+
+       /* recoreded keys for AP-mode - set here before AP startup */
+       struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
+
+       /* bands supported by this instance of wl12xx */
+       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+       /* RX BA constraint value */
+       bool ba_support;
+       u8 ba_rx_bitmap;
+
+       /*
+        * AP-mode - links indexed by HLID. The global and broadcast links
+        * are always active.
+        */
+       struct wl1271_link links[AP_MAX_LINKS];
+
+       /* the hlid of the link where the last transmitted skb came from */
+       int last_tx_hlid;
+
+       /* AP-mode - a bitmap of links currently in PS mode according to FW */
+       u32 ap_fw_ps_map;
+
+       /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
+       unsigned long ap_ps_map;
+};
+
+struct wl1271_station {
+       u8 hlid;
 };
 
 int wl1271_plt_start(struct wl1271 *wl);
index be21032f4dc16047fa71dd8757d4d1bbb4db5825..67dcf8f28cd3fee77924366b0f49dc920f6cee03 100644 (file)
@@ -138,13 +138,13 @@ struct wl12xx_arp_rsp_template {
        struct ieee80211_hdr_3addr hdr;
 
        u8 llc_hdr[sizeof(rfc1042_header)];
-       u16 llc_type;
+       __be16 llc_type;
 
        struct arphdr arp_hdr;
        u8 sender_hw[ETH_ALEN];
-       u32 sender_ip;
+       __be32 sender_ip;
        u8 target_hw[ETH_ALEN];
-       u32 target_ip;
+       __be32 target_ip;
 } __packed;
 
 
@@ -160,4 +160,9 @@ struct wl12xx_probe_resp_template {
        struct wl12xx_ie_country country;
 } __packed;
 
+struct wl12xx_disconn_template {
+       struct ieee80211_header header;
+       __le16 disconn_reason;
+} __packed;
+
 #endif
index 6a9b66051cf7de38000dcc20f3a2d944cef4ceae..a73a305d3cba3afe954b2fbbbbd4336e2c18f0ad 100644 (file)
@@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
 {
        int r;
        int i;
-       zd_addr_t *a16;
-       u16 *v16;
+       zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
+       u16 v16[USB_MAX_IOREAD32_COUNT * 2];
        unsigned int count16;
 
        if (count > USB_MAX_IOREAD32_COUNT)
                return -EINVAL;
 
-       /* Allocate a single memory block for values and addresses. */
-       count16 = 2*count;
-       /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
-       a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
-                                  GFP_KERNEL);
-       if (!a16) {
-               dev_dbg_f(zd_chip_dev(chip),
-                         "error ENOMEM in allocation of a16\n");
-               r = -ENOMEM;
-               goto out;
-       }
-       v16 = (u16 *)(a16 + count16);
+       /* Use stack for values and addresses. */
+       count16 = 2 * count;
+       BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
+       BUG_ON(count16 * sizeof(u16) > sizeof(v16));
 
        for (i = 0; i < count; i++) {
                int j = 2*i;
@@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
        if (r) {
                dev_dbg_f(zd_chip_dev(chip),
                          "error: zd_ioread16v_locked. Error number %d\n", r);
-               goto out;
+               return r;
        }
 
        for (i = 0; i < count; i++) {
@@ -147,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
                values[i] = (v16[j] << 16) | v16[j+1];
        }
 
-out:
-       kfree((void *)a16);
-       return r;
+       return 0;
 }
 
-int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
-                  unsigned int count)
+static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
+                                      const struct zd_ioreq32 *ioreqs,
+                                      unsigned int count)
 {
        int i, j, r;
-       struct zd_ioreq16 *ioreqs16;
+       struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
        unsigned int count16;
 
+       /* Use stack for values and addresses. */
+
        ZD_ASSERT(mutex_is_locked(&chip->mutex));
 
        if (count == 0)
@@ -166,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
        if (count > USB_MAX_IOWRITE32_COUNT)
                return -EINVAL;
 
-       /* Allocate a single memory block for values and addresses. */
-       count16 = 2*count;
-       ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
-       if (!ioreqs16) {
-               r = -ENOMEM;
-               dev_dbg_f(zd_chip_dev(chip),
-                         "error %d in ioreqs16 allocation\n", r);
-               goto out;
-       }
+       count16 = 2 * count;
+       BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
 
        for (i = 0; i < count; i++) {
                j = 2*i;
@@ -185,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
                ioreqs16[j+1].addr  = ioreqs[i].addr;
        }
 
-       r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16);
+       r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
 #ifdef DEBUG
        if (r) {
                dev_dbg_f(zd_chip_dev(chip),
                          "error %d in zd_usb_write16v\n", r);
        }
 #endif /* DEBUG */
-out:
-       kfree(ioreqs16);
        return r;
 }
 
+int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+                         unsigned int count)
+{
+       int r;
+
+       zd_usb_iowrite16v_async_start(&chip->usb);
+       r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
+       if (r) {
+               zd_usb_iowrite16v_async_end(&chip->usb, 0);
+               return r;
+       }
+       return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
+}
+
 int zd_iowrite16a_locked(struct zd_chip *chip,
                   const struct zd_ioreq16 *ioreqs, unsigned int count)
 {
@@ -204,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
        unsigned int i, j, t, max;
 
        ZD_ASSERT(mutex_is_locked(&chip->mutex));
+       zd_usb_iowrite16v_async_start(&chip->usb);
+
        for (i = 0; i < count; i += j + t) {
                t = 0;
                max = count-i;
@@ -216,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
                        }
                }
 
-               r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j);
+               r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
                if (r) {
+                       zd_usb_iowrite16v_async_end(&chip->usb, 0);
                        dev_dbg_f(zd_chip_dev(chip),
                                  "error zd_usb_iowrite16v. Error number %d\n",
                                  r);
@@ -225,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
                }
        }
 
-       return 0;
+       return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
 }
 
 /* Writes a variable number of 32 bit registers. The functions will split
@@ -238,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
        int r;
        unsigned int i, j, t, max;
 
+       zd_usb_iowrite16v_async_start(&chip->usb);
+
        for (i = 0; i < count; i += j + t) {
                t = 0;
                max = count-i;
@@ -250,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
                        }
                }
 
-               r = _zd_iowrite32v_locked(chip, &ioreqs[i], j);
+               r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
                if (r) {
+                       zd_usb_iowrite16v_async_end(&chip->usb, 0);
                        dev_dbg_f(zd_chip_dev(chip),
                                "error _zd_iowrite32v_locked."
                                " Error number %d\n", r);
@@ -259,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
                }
        }
 
-       return 0;
+       return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
 }
 
 int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
@@ -370,16 +374,12 @@ error:
        return r;
 }
 
-/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
- *              CR_MAC_ADDR_P2 must be overwritten
- */
-int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
+                                   const struct zd_ioreq32 *in_reqs,
+                                   const char *type)
 {
        int r;
-       struct zd_ioreq32 reqs[2] = {
-               [0] = { .addr = CR_MAC_ADDR_P1 },
-               [1] = { .addr = CR_MAC_ADDR_P2 },
-       };
+       struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
 
        if (mac_addr) {
                reqs[0].value = (mac_addr[3] << 24)
@@ -388,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
                              |  mac_addr[0];
                reqs[1].value = (mac_addr[5] <<  8)
                              |  mac_addr[4];
-               dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr);
+               dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
        } else {
-               dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n");
+               dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
        }
 
        mutex_lock(&chip->mutex);
@@ -399,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
        return r;
 }
 
+/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
+ *              CR_MAC_ADDR_P2 must be overwritten
+ */
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+{
+       static const struct zd_ioreq32 reqs[2] = {
+               [0] = { .addr = CR_MAC_ADDR_P1 },
+               [1] = { .addr = CR_MAC_ADDR_P2 },
+       };
+
+       return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
+}
+
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
+{
+       static const struct zd_ioreq32 reqs[2] = {
+               [0] = { .addr = CR_BSSID_P1 },
+               [1] = { .addr = CR_BSSID_P2 },
+       };
+
+       return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
+}
+
 int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
 {
        int r;
@@ -849,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
 static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
 {
        struct zd_ioreq32 reqs[3];
+       u16 b_interval = s->beacon_interval & 0xffff;
 
-       if (s->beacon_interval <= 5)
-               s->beacon_interval = 5;
-       if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
-               s->pre_tbtt = s->beacon_interval - 1;
+       if (b_interval <= 5)
+               b_interval = 5;
+       if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
+               s->pre_tbtt = b_interval - 1;
        if (s->atim_wnd_period >= s->pre_tbtt)
                s->atim_wnd_period = s->pre_tbtt - 1;
 
@@ -862,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
        reqs[1].addr = CR_PRE_TBTT;
        reqs[1].value = s->pre_tbtt;
        reqs[2].addr = CR_BCN_INTERVAL;
-       reqs[2].value = s->beacon_interval;
+       reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
 
        return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
 }
 
 
-static int set_beacon_interval(struct zd_chip *chip, u32 interval)
+static int set_beacon_interval(struct zd_chip *chip, u16 interval,
+                              u8 dtim_period, int type)
 {
        int r;
        struct aw_pt_bi s;
+       u32 b_interval, mode_flag;
 
        ZD_ASSERT(mutex_is_locked(&chip->mutex));
+
+       if (interval > 0) {
+               switch (type) {
+               case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_MESH_POINT:
+                       mode_flag = BCN_MODE_IBSS;
+                       break;
+               case NL80211_IFTYPE_AP:
+                       mode_flag = BCN_MODE_AP;
+                       break;
+               default:
+                       mode_flag = 0;
+                       break;
+               }
+       } else {
+               dtim_period = 0;
+               mode_flag = 0;
+       }
+
+       b_interval = mode_flag | (dtim_period << 16) | interval;
+
+       r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
+       if (r)
+               return r;
        r = get_aw_pt_bi(chip, &s);
        if (r)
                return r;
-       s.beacon_interval = interval;
        return set_aw_pt_bi(chip, &s);
 }
 
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+                          int type)
 {
        int r;
 
        mutex_lock(&chip->mutex);
-       r = set_beacon_interval(chip, interval);
+       r = set_beacon_interval(chip, interval, dtim_period, type);
        mutex_unlock(&chip->mutex);
        return r;
 }
@@ -905,7 +955,7 @@ static int hw_init(struct zd_chip *chip)
        if (r)
                return r;
 
-       return set_beacon_interval(chip, 100);
+       return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
 }
 
 static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1407,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
        mutex_lock(&chip->mutex);
        zd_usb_disable_int(&chip->usb);
        mutex_unlock(&chip->mutex);
+
+       /* cancel pending interrupt work */
+       cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
 }
 
 int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1416,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
        mutex_lock(&chip->mutex);
        zd_usb_enable_tx(&chip->usb);
        r = zd_usb_enable_rx(&chip->usb);
+       zd_tx_watchdog_enable(&chip->usb);
        mutex_unlock(&chip->mutex);
        return r;
 }
@@ -1423,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
 void zd_chip_disable_rxtx(struct zd_chip *chip)
 {
        mutex_lock(&chip->mutex);
+       zd_tx_watchdog_disable(&chip->usb);
        zd_usb_disable_rx(&chip->usb);
        zd_usb_disable_tx(&chip->usb);
        mutex_unlock(&chip->mutex);
index f8bbf7d302ae6758b3d98ea69acf3dbeb4ad41e2..14e4402a6111bdb5b70745f7d4784693eb30dfa9 100644 (file)
@@ -546,6 +546,7 @@ enum {
 #define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
        RX_FILTER_CFEND | RX_FILTER_CFACK)
 
+#define BCN_MODE_AP                    0x1000000
 #define BCN_MODE_IBSS                  0x2000000
 
 /* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
 u8  zd_chip_get_channel(struct zd_chip *chip);
 int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
 int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
 int zd_chip_switch_radio_on(struct zd_chip *chip);
 int zd_chip_switch_radio_off(struct zd_chip *chip);
 int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
 
 int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
 
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+                          int type);
 
 static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
 {
index 6ac597ffd3b9031e73b8229ef91b5e1f4520cc44..5463ca9ebc01296f221f9487b2e8a7cbf137dac6 100644 (file)
@@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t;
 #ifdef DEBUG
 #  define ZD_ASSERT(x) \
 do { \
-       if (!(x)) { \
+       if (unlikely(!(x))) { \
                pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
                        __FILE__, __LINE__, __stringify(x)); \
                dump_stack(); \
index 6107304cb94c5e7050cda334d6eaa142218159d3..5037c8b2b41598548e80738f43c5a1ad2f06c9d6 100644 (file)
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
 static void housekeeping_init(struct zd_mac *mac);
 static void housekeeping_enable(struct zd_mac *mac);
 static void housekeeping_disable(struct zd_mac *mac);
+static void beacon_init(struct zd_mac *mac);
+static void beacon_enable(struct zd_mac *mac);
+static void beacon_disable(struct zd_mac *mac);
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
+static int zd_mac_config_beacon(struct ieee80211_hw *hw,
+                               struct sk_buff *beacon);
 
 static int zd_reg2alpha2(u8 regdomain, char *alpha2)
 {
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
        return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
 }
 
+static int set_mac_and_bssid(struct zd_mac *mac)
+{
+       int r;
+
+       if (!mac->vif)
+               return -1;
+
+       r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
+       if (r)
+               return r;
+
+       /* Vendor driver after setting MAC either sets BSSID for AP or
+        * filter for other modes.
+        */
+       if (mac->type != NL80211_IFTYPE_AP)
+               return set_rx_filter(mac);
+       else
+               return zd_write_bssid(&mac->chip, mac->vif->addr);
+}
+
 static int set_mc_hash(struct zd_mac *mac)
 {
        struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
        return zd_chip_set_multicast_hash(&mac->chip, &hash);
 }
 
-static int zd_op_start(struct ieee80211_hw *hw)
+int zd_op_start(struct ieee80211_hw *hw)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
                goto disable_rxtx;
 
        housekeeping_enable(mac);
+       beacon_enable(mac);
+       set_bit(ZD_DEVICE_RUNNING, &mac->flags);
        return 0;
 disable_rxtx:
        zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
        return r;
 }
 
-static void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct zd_chip *chip = &mac->chip;
        struct sk_buff *skb;
        struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
 
+       clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
        /* The order here deliberately is a little different from the open()
         * method, since we need to make sure there is no opportunity for RX
         * frames to be processed by mac80211 after we have stopped it.
         */
 
        zd_chip_disable_rxtx(chip);
+       beacon_disable(mac);
        housekeeping_disable(mac);
        flush_workqueue(zd_workqueue);
 
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
                dev_kfree_skb_any(skb);
 }
 
+int zd_restore_settings(struct zd_mac *mac)
+{
+       struct sk_buff *beacon;
+       struct zd_mc_hash multicast_hash;
+       unsigned int short_preamble;
+       int r, beacon_interval, beacon_period;
+       u8 channel;
+
+       dev_dbg_f(zd_mac_dev(mac), "\n");
+
+       spin_lock_irq(&mac->lock);
+       multicast_hash = mac->multicast_hash;
+       short_preamble = mac->short_preamble;
+       beacon_interval = mac->beacon.interval;
+       beacon_period = mac->beacon.period;
+       channel = mac->channel;
+       spin_unlock_irq(&mac->lock);
+
+       r = set_mac_and_bssid(mac);
+       if (r < 0) {
+               dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
+               return r;
+       }
+
+       r = zd_chip_set_channel(&mac->chip, channel);
+       if (r < 0) {
+               dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
+                         r);
+               return r;
+       }
+
+       set_rts_cts(mac, short_preamble);
+
+       r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
+       if (r < 0) {
+               dev_dbg_f(zd_mac_dev(mac),
+                         "zd_chip_set_multicast_hash failed, %d\n", r);
+               return r;
+       }
+
+       if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+           mac->type == NL80211_IFTYPE_ADHOC ||
+           mac->type == NL80211_IFTYPE_AP) {
+               if (mac->vif != NULL) {
+                       beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+                       if (beacon) {
+                               zd_mac_config_beacon(mac->hw, beacon);
+                               kfree_skb(beacon);
+                       }
+               }
+
+               zd_set_beacon_interval(&mac->chip, beacon_interval,
+                                       beacon_period, mac->type);
+
+               spin_lock_irq(&mac->lock);
+               mac->beacon.last_update = jiffies;
+               spin_unlock_irq(&mac->lock);
+       }
+
+       return 0;
+}
+
 /**
  * zd_mac_tx_status - reports tx status of a packet if required
  * @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
 static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
-       int r;
+       int r, ret, num_cmds, req_pos = 0;
        u32 tmp, j = 0;
        /* 4 more bytes for tail CRC */
        u32 full_len = beacon->len + 4;
+       unsigned long end_jiffies, message_jiffies;
+       struct zd_ioreq32 *ioreqs;
 
-       r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0);
+       /* Alloc memory for full beacon write at once. */
+       num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
+       ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
+       if (!ioreqs)
+               return -ENOMEM;
+
+       mutex_lock(&mac->chip.mutex);
+
+       r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
        if (r < 0)
-               return r;
-       r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+               goto out;
+       r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
        if (r < 0)
-               return r;
+               goto release_sema;
 
+       end_jiffies = jiffies + HZ / 2; /*~500ms*/
+       message_jiffies = jiffies + HZ / 10; /*~100ms*/
        while (tmp & 0x2) {
-               r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+               r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
                if (r < 0)
-                       return r;
-               if ((++j % 100) == 0) {
-                       printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n");
-                       if (j >= 500)  {
-                               printk(KERN_ERR "Giving up beacon config.\n");
-                               return -ETIMEDOUT;
+                       goto release_sema;
+               if (time_is_before_eq_jiffies(message_jiffies)) {
+                       message_jiffies = jiffies + HZ / 10;
+                       dev_err(zd_mac_dev(mac),
+                                       "CR_BCN_FIFO_SEMAPHORE not ready\n");
+                       if (time_is_before_eq_jiffies(end_jiffies))  {
+                               dev_err(zd_mac_dev(mac),
+                                               "Giving up beacon config.\n");
+                               r = -ETIMEDOUT;
+                               goto reset_device;
                        }
                }
-               msleep(1);
+               msleep(20);
        }
 
-       r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1);
-       if (r < 0)
-               return r;
+       ioreqs[req_pos].addr = CR_BCN_FIFO;
+       ioreqs[req_pos].value = full_len - 1;
+       req_pos++;
        if (zd_chip_is_zd1211b(&mac->chip)) {
-               r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1);
-               if (r < 0)
-                       return r;
+               ioreqs[req_pos].addr = CR_BCN_LENGTH;
+               ioreqs[req_pos].value = full_len - 1;
+               req_pos++;
        }
 
        for (j = 0 ; j < beacon->len; j++) {
-               r = zd_iowrite32(&mac->chip, CR_BCN_FIFO,
-                               *((u8 *)(beacon->data + j)));
-               if (r < 0)
-                       return r;
+               ioreqs[req_pos].addr = CR_BCN_FIFO;
+               ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
+               req_pos++;
        }
 
        for (j = 0; j < 4; j++) {
-               r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0);
-               if (r < 0)
-                       return r;
+               ioreqs[req_pos].addr = CR_BCN_FIFO;
+               ioreqs[req_pos].value = 0x0;
+               req_pos++;
        }
 
-       r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1);
-       if (r < 0)
-               return r;
+       BUG_ON(req_pos != num_cmds);
+
+       r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
+
+release_sema:
+       /*
+        * Try very hard to release device beacon semaphore, as otherwise
+        * device/driver can be left in unusable state.
+        */
+       end_jiffies = jiffies + HZ / 2; /*~500ms*/
+       ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+       while (ret < 0) {
+               if (time_is_before_eq_jiffies(end_jiffies)) {
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               msleep(20);
+               ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+       }
+
+       if (ret < 0)
+               dev_err(zd_mac_dev(mac), "Could not release "
+                                        "CR_BCN_FIFO_SEMAPHORE!\n");
+       if (r < 0 || ret < 0) {
+               if (r >= 0)
+                       r = ret;
+               goto out;
+       }
 
        /* 802.11b/g 2.4G CCK 1Mb
         * 802.11a, not yet implemented, uses different values (see GPL vendor
         * driver)
         */
-       return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 |
-                       (full_len << 19));
+       r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
+                               CR_BCN_PLCP_CFG);
+out:
+       mutex_unlock(&mac->chip.mutex);
+       kfree(ioreqs);
+       return r;
+
+reset_device:
+       mutex_unlock(&mac->chip.mutex);
+       kfree(ioreqs);
+
+       /* semaphore stuck, reset device to avoid fw freeze later */
+       dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
+                                 "reseting device...");
+       usb_queue_reset_device(mac->chip.usb.intf);
+
+       return r;
 }
 
 static int fill_ctrlset(struct zd_mac *mac,
@@ -701,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac,
  * control block of the skbuff will be initialized. If necessary the incoming
  * mac80211 queues will be stopped.
  */
-static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -716,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        r = zd_usb_tx(&mac->chip.usb, skb);
        if (r)
                goto fail;
-       return 0;
+       return;
 
 fail:
        dev_kfree_skb(skb);
-       return 0;
 }
 
 /**
@@ -779,6 +927,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
 
                mac->ack_pending = 1;
                mac->ack_signal = stats->signal;
+
+               /* Prevent pending tx-packet on AP-mode */
+               if (mac->type == NL80211_IFTYPE_AP) {
+                       skb = __skb_dequeue(q);
+                       zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
+                       mac->ack_pending = 0;
+               }
        }
 
        spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1037,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
                mac->type = vif->type;
                break;
        default:
                return -EOPNOTSUPP;
        }
 
-       return zd_write_mac_addr(&mac->chip, vif->addr);
+       mac->vif = vif;
+
+       return set_mac_and_bssid(mac);
 }
 
 static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1054,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        mac->type = NL80211_IFTYPE_UNSPECIFIED;
-       zd_set_beacon_interval(&mac->chip, 0);
+       mac->vif = NULL;
+       zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
        zd_write_mac_addr(&mac->chip, NULL);
 }
 
@@ -905,49 +1064,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_conf *conf = &hw->conf;
 
+       spin_lock_irq(&mac->lock);
+       mac->channel = conf->channel->hw_value;
+       spin_unlock_irq(&mac->lock);
+
        return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
 }
 
-static void zd_process_intr(struct work_struct *work)
+static void zd_beacon_done(struct zd_mac *mac)
 {
-       u16 int_status;
-       struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
+       struct sk_buff *skb, *beacon;
 
-       int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
-       if (int_status & INT_CFG_NEXT_BCN)
-               dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
-       else
-               dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
-
-       zd_chip_enable_hwint(&mac->chip);
-}
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               return;
+       if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
+               return;
 
+       /*
+        * Send out buffered broad- and multicast frames.
+        */
+       while (!ieee80211_queue_stopped(mac->hw, 0)) {
+               skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
+               if (!skb)
+                       break;
+               zd_op_tx(mac->hw, skb);
+       }
 
-static void set_multicast_hash_handler(struct work_struct *work)
-{
-       struct zd_mac *mac =
-               container_of(work, struct zd_mac, set_multicast_hash_work);
-       struct zd_mc_hash hash;
+       /*
+        * Fetch next beacon so that tim_count is updated.
+        */
+       beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+       if (beacon) {
+               zd_mac_config_beacon(mac->hw, beacon);
+               kfree_skb(beacon);
+       }
 
        spin_lock_irq(&mac->lock);
-       hash = mac->multicast_hash;
+       mac->beacon.last_update = jiffies;
        spin_unlock_irq(&mac->lock);
-
-       zd_chip_set_multicast_hash(&mac->chip, &hash);
 }
 
-static void set_rx_filter_handler(struct work_struct *work)
+static void zd_process_intr(struct work_struct *work)
 {
-       struct zd_mac *mac =
-               container_of(work, struct zd_mac, set_rx_filter_work);
-       int r;
+       u16 int_status;
+       unsigned long flags;
+       struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
 
-       dev_dbg_f(zd_mac_dev(mac), "\n");
-       r = set_rx_filter(mac);
-       if (r)
-               dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r);
+       spin_lock_irqsave(&mac->lock, flags);
+       int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
+       spin_unlock_irqrestore(&mac->lock, flags);
+
+       if (int_status & INT_CFG_NEXT_BCN) {
+               /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
+               zd_beacon_done(mac);
+       } else {
+               dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
+       }
+
+       zd_chip_enable_hwint(&mac->chip);
 }
 
+
 static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
                                   struct netdev_hw_addr_list *mc_list)
 {
@@ -979,6 +1156,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
        };
        struct zd_mac *mac = zd_hw_mac(hw);
        unsigned long flags;
+       int r;
 
        /* Only deal with supported flags */
        changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1178,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
        mac->multicast_hash = hash;
        spin_unlock_irqrestore(&mac->lock, flags);
 
-       /* XXX: these can be called here now, can sleep now! */
-       queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+       zd_chip_set_multicast_hash(&mac->chip, &hash);
 
-       if (changed_flags & FIF_CONTROL)
-               queue_work(zd_workqueue, &mac->set_rx_filter_work);
+       if (changed_flags & FIF_CONTROL) {
+               r = set_rx_filter(mac);
+               if (r)
+                       dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
+       }
 
        /* no handling required for FIF_OTHER_BSS as we don't currently
         * do BSSID filtering */
@@ -1016,20 +1196,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
         * time. */
 }
 
-static void set_rts_cts_work(struct work_struct *work)
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
 {
-       struct zd_mac *mac =
-               container_of(work, struct zd_mac, set_rts_cts_work);
-       unsigned long flags;
-       unsigned int short_preamble;
-
        mutex_lock(&mac->chip.mutex);
-
-       spin_lock_irqsave(&mac->lock, flags);
-       mac->updating_rts_rate = 0;
-       short_preamble = mac->short_preamble;
-       spin_unlock_irqrestore(&mac->lock, flags);
-
        zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
        mutex_unlock(&mac->chip.mutex);
 }
@@ -1040,33 +1209,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
                                   u32 changes)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
-       unsigned long flags;
        int associated;
 
        dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
 
        if (mac->type == NL80211_IFTYPE_MESH_POINT ||
-           mac->type == NL80211_IFTYPE_ADHOC) {
+           mac->type == NL80211_IFTYPE_ADHOC ||
+           mac->type == NL80211_IFTYPE_AP) {
                associated = true;
                if (changes & BSS_CHANGED_BEACON) {
                        struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
 
                        if (beacon) {
+                               zd_chip_disable_hwint(&mac->chip);
                                zd_mac_config_beacon(hw, beacon);
+                               zd_chip_enable_hwint(&mac->chip);
                                kfree_skb(beacon);
                        }
                }
 
                if (changes & BSS_CHANGED_BEACON_ENABLED) {
-                       u32 interval;
+                       u16 interval = 0;
+                       u8 period = 0;
 
-                       if (bss_conf->enable_beacon)
-                               interval = BCN_MODE_IBSS |
-                                               bss_conf->beacon_int;
-                       else
-                               interval = 0;
+                       if (bss_conf->enable_beacon) {
+                               period = bss_conf->dtim_period;
+                               interval = bss_conf->beacon_int;
+                       }
 
-                       zd_set_beacon_interval(&mac->chip, interval);
+                       spin_lock_irq(&mac->lock);
+                       mac->beacon.period = period;
+                       mac->beacon.interval = interval;
+                       mac->beacon.last_update = jiffies;
+                       spin_unlock_irq(&mac->lock);
+
+                       zd_set_beacon_interval(&mac->chip, interval, period,
+                                              mac->type);
                }
        } else
                associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1256,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
        /* TODO: do hardware bssid filtering */
 
        if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-               spin_lock_irqsave(&mac->lock, flags);
+               spin_lock_irq(&mac->lock);
                mac->short_preamble = bss_conf->use_short_preamble;
-               if (!mac->updating_rts_rate) {
-                       mac->updating_rts_rate = 1;
-                       /* FIXME: should disable TX here, until work has
-                        * completed and RTS_CTS reg is updated */
-                       queue_work(zd_workqueue, &mac->set_rts_cts_work);
-               }
-               spin_unlock_irqrestore(&mac->lock, flags);
+               spin_unlock_irq(&mac->lock);
+
+               set_rts_cts(mac, bss_conf->use_short_preamble);
        }
 }
 
@@ -1138,12 +1312,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
        hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
 
        hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
-                   IEEE80211_HW_SIGNAL_UNSPEC;
+                   IEEE80211_HW_SIGNAL_UNSPEC |
+                   IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
 
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_MESH_POINT) |
                BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC);
+               BIT(NL80211_IFTYPE_ADHOC) |
+               BIT(NL80211_IFTYPE_AP);
 
        hw->max_signal = 100;
        hw->queues = 1;
@@ -1160,15 +1336,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
 
        zd_chip_init(&mac->chip, hw, intf);
        housekeeping_init(mac);
-       INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
-       INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
-       INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
+       beacon_init(mac);
        INIT_WORK(&mac->process_intr, zd_process_intr);
 
        SET_IEEE80211_DEV(hw, &intf->dev);
        return hw;
 }
 
+#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
+
+static void beacon_watchdog_handler(struct work_struct *work)
+{
+       struct zd_mac *mac =
+               container_of(work, struct zd_mac, beacon.watchdog_work.work);
+       struct sk_buff *beacon;
+       unsigned long timeout;
+       int interval, period;
+
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               goto rearm;
+       if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
+               goto rearm;
+
+       spin_lock_irq(&mac->lock);
+       interval = mac->beacon.interval;
+       period = mac->beacon.period;
+       timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
+       spin_unlock_irq(&mac->lock);
+
+       if (interval > 0 && time_is_before_jiffies(timeout)) {
+               dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
+                                          "restarting. "
+                                          "(interval: %d, dtim: %d)\n",
+                                          interval, period);
+
+               zd_chip_disable_hwint(&mac->chip);
+
+               beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+               if (beacon) {
+                       zd_mac_config_beacon(mac->hw, beacon);
+                       kfree_skb(beacon);
+               }
+
+               zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
+
+               zd_chip_enable_hwint(&mac->chip);
+
+               spin_lock_irq(&mac->lock);
+               mac->beacon.last_update = jiffies;
+               spin_unlock_irq(&mac->lock);
+       }
+
+rearm:
+       queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+                          BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_init(struct zd_mac *mac)
+{
+       INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
+}
+
+static void beacon_enable(struct zd_mac *mac)
+{
+       dev_dbg_f(zd_mac_dev(mac), "\n");
+
+       mac->beacon.last_update = jiffies;
+       queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+                          BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_disable(struct zd_mac *mac)
+{
+       dev_dbg_f(zd_mac_dev(mac), "\n");
+       cancel_delayed_work_sync(&mac->beacon.watchdog_work);
+}
+
 #define LINK_LED_WORK_DELAY HZ
 
 static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1422,9 @@ static void link_led_handler(struct work_struct *work)
        int is_associated;
        int r;
 
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               goto requeue;
+
        spin_lock_irq(&mac->lock);
        is_associated = mac->associated;
        spin_unlock_irq(&mac->lock);
@@ -1188,6 +1434,7 @@ static void link_led_handler(struct work_struct *work)
        if (r)
                dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
 
+requeue:
        queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
                           LINK_LED_WORK_DELAY);
 }
index a6d86b996c7984a3994ec2c8a69eb7fed26a88e9..f8c93c3fe75592247a33026123bb263026849c10 100644 (file)
@@ -163,6 +163,17 @@ struct housekeeping {
        struct delayed_work link_led_work;
 };
 
+struct beacon {
+       struct delayed_work watchdog_work;
+       unsigned long last_update;
+       u16 interval;
+       u8 period;
+};
+
+enum zd_device_flags {
+       ZD_DEVICE_RUNNING,
+};
+
 #define ZD_MAC_STATS_BUFFER_SIZE 16
 
 #define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
        spinlock_t lock;
        spinlock_t intr_lock;
        struct ieee80211_hw *hw;
+       struct ieee80211_vif *vif;
        struct housekeeping housekeeping;
-       struct work_struct set_multicast_hash_work;
+       struct beacon beacon;
        struct work_struct set_rts_cts_work;
-       struct work_struct set_rx_filter_work;
        struct work_struct process_intr;
        struct zd_mc_hash multicast_hash;
        u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
        u8 regdomain;
        u8 default_regdomain;
+       u8 channel;
        int type;
        int associated;
+       unsigned long flags;
        struct sk_buff_head ack_wait_queue;
        struct ieee80211_channel channels[14];
        struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
        /* Short preamble (used for RTS/CTS) */
        unsigned int short_preamble:1;
 
-       /* flags to indicate update in progress */
-       unsigned int updating_rts_rate:1;
-
        /* whether to pass frames with CRC errors to stack */
        unsigned int pass_failed_fcs:1;
 
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
 void zd_mac_tx_failed(struct urb *urb);
 void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
 
+int zd_op_start(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw);
+int zd_restore_settings(struct zd_mac *mac);
+
 #ifdef DEBUG
 void zd_dump_rx_status(const struct rx_status *status);
 #else
index 06041cb1c4220e548a08d4229a5a64ca645a0bbb..81e80489a0523a9e07b4485e1f74a285f8c063f3 100644 (file)
@@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
        int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
        if (int_num == CR_INTERRUPT) {
                struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
+               spin_lock(&mac->lock);
                memcpy(&mac->intr_buffer, urb->transfer_buffer,
                                USB_MAX_EP_INT_BUFFER);
+               spin_unlock(&mac->lock);
                schedule_work(&mac->process_intr);
        } else if (intr->read_regs_enabled) {
                intr->read_regs.length = len = urb->actual_length;
@@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
        case -ENOENT:
        case -ECONNRESET:
        case -EPIPE:
-               goto kfree;
+               dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+               return;
        default:
+               dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
                goto resubmit;
        }
 
@@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
 resubmit:
        r = usb_submit_urb(urb, GFP_ATOMIC);
        if (r) {
-               dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
-               goto kfree;
+               dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
+                         urb, r);
+               /* TODO: add worker to reset intr->urb */
        }
        return;
-kfree:
-       kfree(urb->transfer_buffer);
 }
 
 static inline int int_urb_interval(struct usb_device *udev)
@@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
 int zd_usb_enable_int(struct zd_usb *usb)
 {
        int r;
-       struct usb_device *udev;
+       struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct zd_usb_interrupt *intr = &usb->intr;
-       void *transfer_buffer = NULL;
        struct urb *urb;
 
        dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
        intr->urb = urb;
        spin_unlock_irq(&intr->lock);
 
-       /* TODO: make it a DMA buffer */
        r = -ENOMEM;
-       transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL);
-       if (!transfer_buffer) {
+       intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
+                                         GFP_KERNEL, &intr->buffer_dma);
+       if (!intr->buffer) {
                dev_dbg_f(zd_usb_dev(usb),
                        "couldn't allocate transfer_buffer\n");
                goto error_set_urb_null;
        }
 
-       udev = zd_usb_to_usbdev(usb);
        usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
-                        transfer_buffer, USB_MAX_EP_INT_BUFFER,
+                        intr->buffer, USB_MAX_EP_INT_BUFFER,
                         int_urb_complete, usb,
                         intr->interval);
+       urb->transfer_dma = intr->buffer_dma;
+       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
        dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
        r = usb_submit_urb(urb, GFP_KERNEL);
@@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
 
        return 0;
 error:
-       kfree(transfer_buffer);
+       usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+                         intr->buffer, intr->buffer_dma);
 error_set_urb_null:
        spin_lock_irq(&intr->lock);
        intr->urb = NULL;
@@ -539,8 +543,11 @@ out:
 void zd_usb_disable_int(struct zd_usb *usb)
 {
        unsigned long flags;
+       struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct zd_usb_interrupt *intr = &usb->intr;
        struct urb *urb;
+       void *buffer;
+       dma_addr_t buffer_dma;
 
        spin_lock_irqsave(&intr->lock, flags);
        urb = intr->urb;
@@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
                return;
        }
        intr->urb = NULL;
+       buffer = intr->buffer;
+       buffer_dma = intr->buffer_dma;
+       intr->buffer = NULL;
        spin_unlock_irqrestore(&intr->lock, flags);
 
        usb_kill_urb(urb);
        dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
        usb_free_urb(urb);
+
+       if (buffer)
+               usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+                                 buffer, buffer_dma);
 }
 
 static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
 
 static void rx_urb_complete(struct urb *urb)
 {
+       int r;
        struct zd_usb *usb;
        struct zd_usb_rx *rx;
        const u8 *buffer;
@@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
        case -ENOENT:
        case -ECONNRESET:
        case -EPIPE:
+               dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
                return;
        default:
                dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
        usb = urb->context;
        rx = &usb->rx;
 
+       zd_usb_reset_rx_idle_timer(usb);
+
        if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
                /* If there is an old first fragment, we don't care. */
                dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
        }
 
 resubmit:
-       usb_submit_urb(urb, GFP_ATOMIC);
+       r = usb_submit_urb(urb, GFP_ATOMIC);
+       if (r)
+               dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
 }
 
 static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
        usb_free_urb(urb);
 }
 
-int zd_usb_enable_rx(struct zd_usb *usb)
+static int __zd_usb_enable_rx(struct zd_usb *usb)
 {
        int i, r;
        struct zd_usb_rx *rx = &usb->rx;
@@ -742,7 +762,21 @@ error:
        return r;
 }
 
-void zd_usb_disable_rx(struct zd_usb *usb)
+int zd_usb_enable_rx(struct zd_usb *usb)
+{
+       int r;
+       struct zd_usb_rx *rx = &usb->rx;
+
+       mutex_lock(&rx->setup_mutex);
+       r = __zd_usb_enable_rx(usb);
+       mutex_unlock(&rx->setup_mutex);
+
+       zd_usb_reset_rx_idle_timer(usb);
+
+       return r;
+}
+
+static void __zd_usb_disable_rx(struct zd_usb *usb)
 {
        int i;
        unsigned long flags;
@@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
        spin_unlock_irqrestore(&rx->lock, flags);
 }
 
+void zd_usb_disable_rx(struct zd_usb *usb)
+{
+       struct zd_usb_rx *rx = &usb->rx;
+
+       mutex_lock(&rx->setup_mutex);
+       __zd_usb_disable_rx(usb);
+       mutex_unlock(&rx->setup_mutex);
+
+       cancel_delayed_work_sync(&rx->idle_work);
+}
+
+static void zd_usb_reset_rx(struct zd_usb *usb)
+{
+       bool do_reset;
+       struct zd_usb_rx *rx = &usb->rx;
+       unsigned long flags;
+
+       mutex_lock(&rx->setup_mutex);
+
+       spin_lock_irqsave(&rx->lock, flags);
+       do_reset = rx->urbs != NULL;
+       spin_unlock_irqrestore(&rx->lock, flags);
+
+       if (do_reset) {
+               __zd_usb_disable_rx(usb);
+               __zd_usb_enable_rx(usb);
+       }
+
+       mutex_unlock(&rx->setup_mutex);
+
+       if (do_reset)
+               zd_usb_reset_rx_idle_timer(usb);
+}
+
 /**
  * zd_usb_disable_tx - disable transmission
  * @usb: the zd1211rw-private USB structure
@@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
        unsigned long flags;
-       struct list_head *pos, *n;
+
+       atomic_set(&tx->enabled, 0);
+
+       /* kill all submitted tx-urbs */
+       usb_kill_anchored_urbs(&tx->submitted);
 
        spin_lock_irqsave(&tx->lock, flags);
-       list_for_each_safe(pos, n, &tx->free_urb_list) {
-               list_del(pos);
-               usb_free_urb(list_entry(pos, struct urb, urb_list));
-       }
-       tx->enabled = 0;
+       WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+       WARN_ON(tx->submitted_urbs != 0);
        tx->submitted_urbs = 0;
+       spin_unlock_irqrestore(&tx->lock, flags);
+
        /* The stopped state is ignored, relying on ieee80211_wake_queues()
         * in a potentionally following zd_usb_enable_tx().
         */
-       spin_unlock_irqrestore(&tx->lock, flags);
 }
 
 /**
@@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
        struct zd_usb_tx *tx = &usb->tx;
 
        spin_lock_irqsave(&tx->lock, flags);
-       tx->enabled = 1;
+       atomic_set(&tx->enabled, 1);
        tx->submitted_urbs = 0;
        ieee80211_wake_queues(zd_usb_to_hw(usb));
        tx->stopped = 0;
        spin_unlock_irqrestore(&tx->lock, flags);
 }
 
-/**
- * alloc_tx_urb - provides an tx URB
- * @usb: a &struct zd_usb pointer
- *
- * Allocates a new URB. If possible takes the urb from the free list in
- * usb->tx.
- */
-static struct urb *alloc_tx_urb(struct zd_usb *usb)
-{
-       struct zd_usb_tx *tx = &usb->tx;
-       unsigned long flags;
-       struct list_head *entry;
-       struct urb *urb;
-
-       spin_lock_irqsave(&tx->lock, flags);
-       if (list_empty(&tx->free_urb_list)) {
-               urb = usb_alloc_urb(0, GFP_ATOMIC);
-               goto out;
-       }
-       entry = tx->free_urb_list.next;
-       list_del(entry);
-       urb = list_entry(entry, struct urb, urb_list);
-out:
-       spin_unlock_irqrestore(&tx->lock, flags);
-       return urb;
-}
-
-/**
- * free_tx_urb - frees a used tx URB
- * @usb: a &struct zd_usb pointer
- * @urb: URB to be freed
- *
- * Frees the transmission URB, which means to put it on the free URB
- * list.
- */
-static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
-{
-       struct zd_usb_tx *tx = &usb->tx;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tx->lock, flags);
-       if (!tx->enabled) {
-               usb_free_urb(urb);
-               goto out;
-       }
-       list_add(&urb->urb_list, &tx->free_urb_list);
-out:
-       spin_unlock_irqrestore(&tx->lock, flags);
-}
-
 static void tx_dec_submitted_urbs(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
@@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
        struct sk_buff *skb;
        struct ieee80211_tx_info *info;
        struct zd_usb *usb;
+       struct zd_usb_tx *tx;
+
+       skb = (struct sk_buff *)urb->context;
+       info = IEEE80211_SKB_CB(skb);
+       /*
+        * grab 'usb' pointer before handing off the skb (since
+        * it might be freed by zd_mac_tx_to_dev or mac80211)
+        */
+       usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+       tx = &usb->tx;
 
        switch (urb->status) {
        case 0:
@@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
                goto resubmit;
        }
 free_urb:
-       skb = (struct sk_buff *)urb->context;
-       /*
-        * grab 'usb' pointer before handing off the skb (since
-        * it might be freed by zd_mac_tx_to_dev or mac80211)
-        */
-       info = IEEE80211_SKB_CB(skb);
-       usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+       skb_unlink(skb, &usb->tx.submitted_skbs);
        zd_mac_tx_to_dev(skb, urb->status);
-       free_tx_urb(usb, urb);
+       usb_free_urb(urb);
        tx_dec_submitted_urbs(usb);
        return;
 resubmit:
+       usb_anchor_urb(urb, &tx->submitted);
        r = usb_submit_urb(urb, GFP_ATOMIC);
        if (r) {
+               usb_unanchor_urb(urb);
                dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
                goto free_urb;
        }
@@ -956,10 +982,17 @@ resubmit:
 int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
 {
        int r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct urb *urb;
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (!atomic_read(&tx->enabled)) {
+               r = -ENOENT;
+               goto out;
+       }
 
-       urb = alloc_tx_urb(usb);
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
                r = -ENOMEM;
                goto out;
@@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
        usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
                          skb->data, skb->len, tx_urb_complete, skb);
 
+       info->rate_driver_data[1] = (void *)jiffies;
+       skb_queue_tail(&tx->submitted_skbs, skb);
+       usb_anchor_urb(urb, &tx->submitted);
+
        r = usb_submit_urb(urb, GFP_ATOMIC);
-       if (r)
+       if (r) {
+               dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
+               usb_unanchor_urb(urb);
+               skb_unlink(skb, &tx->submitted_skbs);
                goto error;
+       }
        tx_inc_submitted_urbs(usb);
        return 0;
 error:
-       free_tx_urb(usb, urb);
+       usb_free_urb(urb);
 out:
        return r;
 }
 
+static bool zd_tx_timeout(struct zd_usb *usb)
+{
+       struct zd_usb_tx *tx = &usb->tx;
+       struct sk_buff_head *q = &tx->submitted_skbs;
+       struct sk_buff *skb, *skbnext;
+       struct ieee80211_tx_info *info;
+       unsigned long flags, trans_start;
+       bool have_timedout = false;
+
+       spin_lock_irqsave(&q->lock, flags);
+       skb_queue_walk_safe(q, skb, skbnext) {
+               info = IEEE80211_SKB_CB(skb);
+               trans_start = (unsigned long)info->rate_driver_data[1];
+
+               if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
+                       have_timedout = true;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&q->lock, flags);
+
+       return have_timedout;
+}
+
+static void zd_tx_watchdog_handler(struct work_struct *work)
+{
+       struct zd_usb *usb =
+               container_of(work, struct zd_usb, tx.watchdog_work.work);
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
+               goto out;
+       if (!zd_tx_timeout(usb))
+               goto out;
+
+       /* TX halted, try reset */
+       dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
+
+       usb_queue_reset_device(usb->intf);
+
+       /* reset will stop this worker, don't rearm */
+       return;
+out:
+       queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+                          ZD_TX_WATCHDOG_INTERVAL);
+}
+
+void zd_tx_watchdog_enable(struct zd_usb *usb)
+{
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (!tx->watchdog_enabled) {
+               dev_dbg_f(zd_usb_dev(usb), "\n");
+               queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+                                  ZD_TX_WATCHDOG_INTERVAL);
+               tx->watchdog_enabled = 1;
+       }
+}
+
+void zd_tx_watchdog_disable(struct zd_usb *usb)
+{
+       struct zd_usb_tx *tx = &usb->tx;
+
+       if (tx->watchdog_enabled) {
+               dev_dbg_f(zd_usb_dev(usb), "\n");
+               tx->watchdog_enabled = 0;
+               cancel_delayed_work_sync(&tx->watchdog_work);
+       }
+}
+
+static void zd_rx_idle_timer_handler(struct work_struct *work)
+{
+       struct zd_usb *usb =
+               container_of(work, struct zd_usb, rx.idle_work.work);
+       struct zd_mac *mac = zd_usb_to_mac(usb);
+
+       if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+               return;
+
+       dev_dbg_f(zd_usb_dev(usb), "\n");
+
+       /* 30 seconds since last rx, reset rx */
+       zd_usb_reset_rx(usb);
+}
+
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
+{
+       struct zd_usb_rx *rx = &usb->rx;
+
+       cancel_delayed_work(&rx->idle_work);
+       queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+}
+
 static inline void init_usb_interrupt(struct zd_usb *usb)
 {
        struct zd_usb_interrupt *intr = &usb->intr;
@@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
 {
        struct zd_usb_rx *rx = &usb->rx;
        spin_lock_init(&rx->lock);
+       mutex_init(&rx->setup_mutex);
        if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
                rx->usb_packet_size = 512;
        } else {
                rx->usb_packet_size = 64;
        }
        ZD_ASSERT(rx->fragment_length == 0);
+       INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
 }
 
 static inline void init_usb_tx(struct zd_usb *usb)
 {
        struct zd_usb_tx *tx = &usb->tx;
        spin_lock_init(&tx->lock);
-       tx->enabled = 0;
+       atomic_set(&tx->enabled, 0);
        tx->stopped = 0;
-       INIT_LIST_HEAD(&tx->free_urb_list);
+       skb_queue_head_init(&tx->submitted_skbs);
+       init_usb_anchor(&tx->submitted);
        tx->submitted_urbs = 0;
+       tx->watchdog_enabled = 0;
+       INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
 }
 
 void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1017,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
        memset(usb, 0, sizeof(*usb));
        usb->intf = usb_get_intf(intf);
        usb_set_intfdata(usb->intf, hw);
+       init_usb_anchor(&usb->submitted_cmds);
        init_usb_interrupt(usb);
        init_usb_tx(usb);
        init_usb_rx(usb);
@@ -1240,6 +1380,7 @@ static void disconnect(struct usb_interface *intf)
        ieee80211_unregister_hw(hw);
 
        /* Just in case something has gone wrong! */
+       zd_usb_disable_tx(usb);
        zd_usb_disable_rx(usb);
        zd_usb_disable_int(usb);
 
@@ -1255,11 +1396,92 @@ static void disconnect(struct usb_interface *intf)
        dev_dbg(&intf->dev, "disconnected\n");
 }
 
+static void zd_usb_resume(struct zd_usb *usb)
+{
+       struct zd_mac *mac = zd_usb_to_mac(usb);
+       int r;
+
+       dev_dbg_f(zd_usb_dev(usb), "\n");
+
+       r = zd_op_start(zd_usb_to_hw(usb));
+       if (r < 0) {
+               dev_warn(zd_usb_dev(usb), "Device resume failed "
+                        "with error code %d. Retrying...\n", r);
+               if (usb->was_running)
+                       set_bit(ZD_DEVICE_RUNNING, &mac->flags);
+               usb_queue_reset_device(usb->intf);
+               return;
+       }
+
+       if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+               r = zd_restore_settings(mac);
+               if (r < 0) {
+                       dev_dbg(zd_usb_dev(usb),
+                               "failed to restore settings, %d\n", r);
+                       return;
+               }
+       }
+}
+
+static void zd_usb_stop(struct zd_usb *usb)
+{
+       dev_dbg_f(zd_usb_dev(usb), "\n");
+
+       zd_op_stop(zd_usb_to_hw(usb));
+
+       zd_usb_disable_tx(usb);
+       zd_usb_disable_rx(usb);
+       zd_usb_disable_int(usb);
+
+       usb->initialized = 0;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+       struct ieee80211_hw *hw = usb_get_intfdata(intf);
+       struct zd_mac *mac;
+       struct zd_usb *usb;
+
+       if (!hw || intf->condition != USB_INTERFACE_BOUND)
+               return 0;
+
+       mac = zd_hw_mac(hw);
+       usb = &mac->chip.usb;
+
+       usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
+       zd_usb_stop(usb);
+
+       mutex_lock(&mac->chip.mutex);
+       return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+       struct ieee80211_hw *hw = usb_get_intfdata(intf);
+       struct zd_mac *mac;
+       struct zd_usb *usb;
+
+       if (!hw || intf->condition != USB_INTERFACE_BOUND)
+               return 0;
+
+       mac = zd_hw_mac(hw);
+       usb = &mac->chip.usb;
+
+       mutex_unlock(&mac->chip.mutex);
+
+       if (usb->was_running)
+               zd_usb_resume(usb);
+       return 0;
+}
+
 static struct usb_driver driver = {
        .name           = KBUILD_MODNAME,
        .id_table       = usb_ids,
        .probe          = probe,
        .disconnect     = disconnect,
+       .pre_reset      = pre_reset,
+       .post_reset     = post_reset,
 };
 
 struct workqueue_struct *zd_workqueue;
@@ -1393,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
                return -EWOULDBLOCK;
        }
        if (!usb_int_enabled(usb)) {
-                dev_dbg_f(zd_usb_dev(usb),
+               dev_dbg_f(zd_usb_dev(usb),
                          "error: usb interrupt not enabled\n");
                return -EWOULDBLOCK;
        }
 
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
+                    sizeof(__le16) > sizeof(usb->req_buf));
+       BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
+              sizeof(usb->req_buf));
+
        req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
-       req = kmalloc(req_len, GFP_KERNEL);
-       if (!req)
-               return -ENOMEM;
+       req = (void *)usb->req_buf;
+
        req->id = cpu_to_le16(USB_REQ_READ_REGS);
        for (i = 0; i < count; i++)
                req->addr[i] = cpu_to_le16((u16)addresses[i]);
 
        udev = zd_usb_to_usbdev(usb);
        prepare_read_regs_int(usb);
-       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
-                        req, req_len, &actual_req_len, 1000 /* ms */);
+       r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                             req, req_len, &actual_req_len, 50 /* ms */);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg(). Error number %d\n", r);
+                       "error in usb_interrupt_msg(). Error number %d\n", r);
                goto error;
        }
        if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n"
+               dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
                        " req_len %d != actual_req_len %d\n",
                        req_len, actual_req_len);
                r = -EIO;
@@ -1424,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
        }
 
        timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
-                                             msecs_to_jiffies(1000));
+                                             msecs_to_jiffies(50));
        if (!timeout) {
                disable_read_regs_int(usb);
                dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1434,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
 
        r = get_results(usb, values, req, count);
 error:
-       kfree(req);
        return r;
 }
 
-int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
-                     unsigned int count)
+static void iowrite16v_urb_complete(struct urb *urb)
+{
+       struct zd_usb *usb = urb->context;
+
+       if (urb->status && !usb->cmd_error)
+               usb->cmd_error = urb->status;
+}
+
+static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
+{
+       int r = 0;
+       struct urb *urb = usb->urb_async_waiting;
+
+       if (!urb)
+               return 0;
+
+       usb->urb_async_waiting = NULL;
+
+       if (!last)
+               urb->transfer_flags |= URB_NO_INTERRUPT;
+
+       usb_anchor_urb(urb, &usb->submitted_cmds);
+       r = usb_submit_urb(urb, GFP_KERNEL);
+       if (r) {
+               usb_unanchor_urb(urb);
+               dev_dbg_f(zd_usb_dev(usb),
+                       "error in usb_submit_urb(). Error number %d\n", r);
+               goto error;
+       }
+
+       /* fall-through with r == 0 */
+error:
+       usb_free_urb(urb);
+       return r;
+}
+
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
+{
+       ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
+       ZD_ASSERT(usb->urb_async_waiting == NULL);
+       ZD_ASSERT(!usb->in_async);
+
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+
+       usb->in_async = 1;
+       usb->cmd_error = 0;
+       usb->urb_async_waiting = NULL;
+}
+
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
+{
+       int r;
+
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       ZD_ASSERT(usb->in_async);
+
+       /* Submit last iowrite16v URB */
+       r = zd_submit_waiting_urb(usb, true);
+       if (r) {
+               dev_dbg_f(zd_usb_dev(usb),
+                       "error in zd_submit_waiting_usb(). "
+                       "Error number %d\n", r);
+
+               usb_kill_anchored_urbs(&usb->submitted_cmds);
+               goto error;
+       }
+
+       if (timeout)
+               timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
+                                                       timeout);
+       if (!timeout) {
+               usb_kill_anchored_urbs(&usb->submitted_cmds);
+               if (usb->cmd_error == -ENOENT) {
+                       dev_dbg_f(zd_usb_dev(usb), "timed out");
+                       r = -ETIMEDOUT;
+                       goto error;
+               }
+       }
+
+       r = usb->cmd_error;
+error:
+       usb->in_async = 0;
+       return r;
+}
+
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+                           unsigned int count)
 {
        int r;
        struct usb_device *udev;
        struct usb_req_write_regs *req = NULL;
-       int i, req_len, actual_req_len;
+       int i, req_len;
+       struct urb *urb;
+       struct usb_host_endpoint *ep;
+
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       ZD_ASSERT(usb->in_async);
 
        if (count == 0)
                return 0;
@@ -1460,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                return -EWOULDBLOCK;
        }
 
+       udev = zd_usb_to_usbdev(usb);
+
+       ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
+       if (!ep)
+               return -ENOENT;
+
+       urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!urb)
+               return -ENOMEM;
+
        req_len = sizeof(struct usb_req_write_regs) +
                  count * sizeof(struct reg_data);
        req = kmalloc(req_len, GFP_KERNEL);
-       if (!req)
-               return -ENOMEM;
+       if (!req) {
+               r = -ENOMEM;
+               goto error;
+       }
 
        req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
        for (i = 0; i < count; i++) {
@@ -1473,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                rw->value = cpu_to_le16(ioreqs[i].value);
        }
 
-       udev = zd_usb_to_usbdev(usb);
-       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
-                        req, req_len, &actual_req_len, 1000 /* ms */);
+       usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                        req, req_len, iowrite16v_urb_complete, usb,
+                        ep->desc.bInterval);
+       urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+
+       /* Submit previous URB */
+       r = zd_submit_waiting_urb(usb, false);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg(). Error number %d\n", r);
-               goto error;
-       }
-       if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg()"
-                       " req_len %d != actual_req_len %d\n",
-                       req_len, actual_req_len);
-               r = -EIO;
+                       "error in zd_submit_waiting_usb(). "
+                       "Error number %d\n", r);
                goto error;
        }
 
-       /* FALL-THROUGH with r == 0 */
+       /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
+        * of currect batch except for very last.
+        */
+       usb->urb_async_waiting = urb;
+       return 0;
 error:
-       kfree(req);
+       usb_free_urb(urb);
        return r;
 }
 
+int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+                       unsigned int count)
+{
+       int r;
+
+       zd_usb_iowrite16v_async_start(usb);
+       r = zd_usb_iowrite16v_async(usb, ioreqs, count);
+       if (r) {
+               zd_usb_iowrite16v_async_end(usb, 0);
+               return r;
+       }
+       return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
+}
+
 int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
 {
        int r;
@@ -1537,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
                        "error %d: Couldn't read CR203\n", r);
-               goto out;
+               return r;
        }
        bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
 
+       ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+       BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
+                    USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
+                    sizeof(usb->req_buf));
+       BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
+              sizeof(usb->req_buf));
+
        req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
-       req = kmalloc(req_len, GFP_KERNEL);
-       if (!req)
-               return -ENOMEM;
+       req = (void *)usb->req_buf;
 
        req->id = cpu_to_le16(USB_REQ_WRITE_RF);
        /* 1: 3683a, but not used in ZYDAS driver */
@@ -1559,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
        }
 
        udev = zd_usb_to_usbdev(usb);
-       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
-                        req, req_len, &actual_req_len, 1000 /* ms */);
+       r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                             req, req_len, &actual_req_len, 50 /* ms */);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_bulk_msg(). Error number %d\n", r);
+                       "error in usb_interrupt_msg(). Error number %d\n", r);
                goto out;
        }
        if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()"
+               dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
                        " req_len %d != actual_req_len %d\n",
                        req_len, actual_req_len);
                r = -EIO;
@@ -1576,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
 
        /* FALL-THROUGH with r == 0 */
 out:
-       kfree(req);
        return r;
 }
index 1b1655cb7cb4baf6f04d4ae8525a0066e0f117bf..b3df2c8116cc6ef2d22234318ceacb4866a2481a 100644 (file)
 #define ZD_USB_TX_HIGH  5
 #define ZD_USB_TX_LOW   2
 
+#define ZD_TX_TIMEOUT          (HZ * 5)
+#define ZD_TX_WATCHDOG_INTERVAL        round_jiffies_relative(HZ)
+#define ZD_RX_IDLE_INTERVAL    round_jiffies_relative(30 * HZ)
+
 enum devicetype {
        DEVICE_ZD1211  = 0,
        DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
        struct read_regs_int read_regs;
        spinlock_t lock;
        struct urb *urb;
+       void *buffer;
+       dma_addr_t buffer_dma;
        int interval;
        u8 read_regs_enabled:1;
 };
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
 
 struct zd_usb_rx {
        spinlock_t lock;
-       u8 fragment[2*USB_MAX_RX_SIZE];
+       struct mutex setup_mutex;
+       struct delayed_work idle_work;
+       u8 fragment[2 * USB_MAX_RX_SIZE];
        unsigned int fragment_length;
        unsigned int usb_packet_size;
        struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
 
 /**
  * struct zd_usb_tx - structure used for transmitting frames
+ * @enabled: atomic enabled flag, indicates whether tx is enabled
  * @lock: lock for transmission
- * @free_urb_list: list of free URBs, contains all the URBs, which can be used
+ * @submitted: anchor for URBs sent to device
  * @submitted_urbs: atomic integer that counts the URBs having sent to the
  *     device, which haven't been completed
- * @enabled: enabled flag, indicates whether tx is enabled
  * @stopped: indicates whether higher level tx queues are stopped
  */
 struct zd_usb_tx {
+       atomic_t enabled;
        spinlock_t lock;
-       struct list_head free_urb_list;
+       struct delayed_work watchdog_work;
+       struct sk_buff_head submitted_skbs;
+       struct usb_anchor submitted;
        int submitted_urbs;
-       int enabled;
-       int stopped;
+       u8 stopped:1, watchdog_enabled:1;
 };
 
 /* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,11 @@ struct zd_usb {
        struct zd_usb_rx rx;
        struct zd_usb_tx tx;
        struct usb_interface *intf;
-       u8 is_zd1211b:1, initialized:1;
+       struct usb_anchor submitted_cmds;
+       struct urb *urb_async_waiting;
+       int cmd_error;
+       u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
+       u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1;
 };
 
 #define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb);
 
 int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
 
+void zd_tx_watchdog_enable(struct zd_usb *usb);
+void zd_tx_watchdog_disable(struct zd_usb *usb);
+
 int zd_usb_enable_int(struct zd_usb *usb);
 void zd_usb_disable_int(struct zd_usb *usb);
 
 int zd_usb_enable_rx(struct zd_usb *usb);
 void zd_usb_disable_rx(struct zd_usb *usb);
 
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
+
 void zd_usb_enable_tx(struct zd_usb *usb);
 void zd_usb_disable_tx(struct zd_usb *usb);
 
@@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
        return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
 }
 
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout);
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+                           unsigned int count);
 int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                      unsigned int count);
 
index f47a714538db187e209d82a2acda81a91d7fd0de..c5d763ed406e6c2210ac8a6820f2f0b41f8905c7 100644 (file)
@@ -741,7 +741,6 @@ struct qeth_card {
        /* QDIO buffer handling */
        struct qeth_qdio_info qdio;
        struct qeth_perf_stats perf_stats;
-       int use_hard_stop;
        int read_or_write_problem;
        struct qeth_osn_info osn_info;
        struct qeth_discipline discipline;
index 019ae58ab91352c55d979362854dab4fd2a157a2..f3d98ac16e9f6faa0211978ff2ba8664765395e4 100644 (file)
@@ -302,12 +302,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
        if (rc)
-               QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n",
-                               ipa_name, com, QETH_CARD_IFNAME(card),
-                                       rc, qeth_get_ipa_msg(rc));
+               QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
+                               "x%X \"%s\"\n",
+                               ipa_name, com, dev_name(&card->gdev->dev),
+                               QETH_CARD_IFNAME(card), rc,
+                               qeth_get_ipa_msg(rc));
        else
-               QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n",
-                               ipa_name, com, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
+                               ipa_name, com, dev_name(&card->gdev->dev),
+                               QETH_CARD_IFNAME(card));
 }
 
 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -1083,7 +1086,6 @@ static int qeth_setup_card(struct qeth_card *card)
        card->data.state  = CH_STATE_DOWN;
        card->state = CARD_STATE_DOWN;
        card->lan_online = 0;
-       card->use_hard_stop = 0;
        card->read_or_write_problem = 0;
        card->dev = NULL;
        spin_lock_init(&card->vlanlock);
@@ -1732,20 +1734,22 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                };
        }
 
+       if (reply->rc == -EIO)
+               goto error;
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
 
 time_err:
+       reply->rc = -ETIME;
        spin_lock_irqsave(&reply->card->lock, flags);
        list_del_init(&reply->list);
        spin_unlock_irqrestore(&reply->card->lock, flags);
-       reply->rc = -ETIME;
        atomic_inc(&reply->received);
+error:
        atomic_set(&card->write.irq_pending, 0);
        qeth_release_buffer(iob->channel, iob);
        card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
-       wake_up(&reply->wait_q);
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
@@ -2490,45 +2494,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 }
 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
 
-static int qeth_send_startstoplan(struct qeth_card *card,
-               enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
-{
-       int rc;
-       struct qeth_cmd_buffer *iob;
-
-       iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
-       rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-
-       return rc;
-}
-
 int qeth_send_startlan(struct qeth_card *card)
 {
        int rc;
+       struct qeth_cmd_buffer *iob;
 
        QETH_DBF_TEXT(SETUP, 2, "strtlan");
 
-       rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0);
+       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+       rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_send_startlan);
 
-int qeth_send_stoplan(struct qeth_card *card)
-{
-       int rc = 0;
-
-       /*
-        * TODO: according to the IPA format document page 14,
-        * TCP/IP (we!) never issue a STOPLAN
-        * is this right ?!?
-        */
-       QETH_DBF_TEXT(SETUP, 2, "stoplan");
-
-       rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
-       return rc;
-}
-EXPORT_SYMBOL_GPL(qeth_send_stoplan);
-
 int qeth_default_setadapterparms_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
index ada0fe782373b945986444e5dae1d7181d8d3843..6fbaacb2194398c806b3180cf87e8625de5ee6be 100644 (file)
@@ -202,17 +202,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
                kfree(mc);
 }
 
-static void qeth_l2_del_all_mc(struct qeth_card *card)
+static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
 {
        struct qeth_mc_mac *mc, *tmp;
 
        spin_lock_bh(&card->mclock);
        list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
-               if (mc->is_vmac)
-                       qeth_l2_send_setdelmac(card, mc->mc_addr,
+               if (del) {
+                       if (mc->is_vmac)
+                               qeth_l2_send_setdelmac(card, mc->mc_addr,
                                        IPA_CMD_DELVMAC, NULL);
-               else
-                       qeth_l2_send_delgroupmac(card, mc->mc_addr);
+                       else
+                               qeth_l2_send_delgroupmac(card, mc->mc_addr);
+               }
                list_del(&mc->list);
                kfree(mc);
        }
@@ -288,18 +290,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
                                 qeth_l2_send_setdelvlan_cb, NULL);
 }
 
-static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
+static void qeth_l2_process_vlans(struct qeth_card *card)
 {
        struct qeth_vlan_vid *id;
        QETH_CARD_TEXT(card, 3, "L2prcvln");
        spin_lock_bh(&card->vlanlock);
        list_for_each_entry(id, &card->vid_list, list) {
-               if (clear)
-                       qeth_l2_send_setdelvlan(card, id->vid,
-                               IPA_CMD_DELVLAN);
-               else
-                       qeth_l2_send_setdelvlan(card, id->vid,
-                               IPA_CMD_SETVLAN);
+               qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
        }
        spin_unlock_bh(&card->vlanlock);
 }
@@ -379,19 +376,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
                        dev_close(card->dev);
                        rtnl_unlock();
                }
-               if (!card->use_hard_stop ||
-                       recovery_mode) {
-                       __u8 *mac = &card->dev->dev_addr[0];
-                       rc = qeth_l2_send_delmac(card, mac);
-                       QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
-               }
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
                card->state = CARD_STATE_SOFTSETUP;
        }
        if (card->state == CARD_STATE_SOFTSETUP) {
-               qeth_l2_process_vlans(card, 1);
-               if (!card->use_hard_stop ||
-                       recovery_mode)
-                       qeth_l2_del_all_mc(card);
+               qeth_l2_del_all_mc(card, 0);
                qeth_clear_ipacmd_list(card);
                card->state = CARD_STATE_HARDSETUP;
        }
@@ -405,7 +394,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
                qeth_clear_cmd_buffers(&card->read);
                qeth_clear_cmd_buffers(&card->write);
        }
-       card->use_hard_stop = 0;
        return rc;
 }
 
@@ -705,7 +693,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
        if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
            (card->state != CARD_STATE_UP))
                return;
-       qeth_l2_del_all_mc(card);
+       qeth_l2_del_all_mc(card, 1);
        spin_lock_bh(&card->mclock);
        netdev_for_each_mc_addr(ha, dev)
                qeth_l2_add_mc(card, ha->addr, 0);
@@ -907,10 +895,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
        qeth_set_allowed_threads(card, 0, 1);
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
-       if (cgdev->state == CCWGROUP_ONLINE) {
-               card->use_hard_stop = 1;
+       if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l2_set_offline(cgdev);
-       }
 
        if (card->dev) {
                unregister_netdev(card->dev);
@@ -1040,7 +1026,7 @@ contin:
 
        if (card->info.type != QETH_CARD_TYPE_OSN &&
            card->info.type != QETH_CARD_TYPE_OSM)
-               qeth_l2_process_vlans(card, 0);
+               qeth_l2_process_vlans(card);
 
        netif_tx_disable(card->dev);
 
@@ -1076,7 +1062,6 @@ contin:
        return 0;
 
 out_remove:
-       card->use_hard_stop = 1;
        qeth_l2_stop_card(card, 0);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
@@ -1144,7 +1129,6 @@ static int qeth_l2_recover(void *ptr)
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
-       card->use_hard_stop = 1;
        __qeth_l2_set_offline(card->gdev, 1);
        rc = __qeth_l2_set_online(card->gdev, 1);
        if (!rc)
@@ -1191,7 +1175,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
        if (gdev->state == CCWGROUP_OFFLINE)
                return 0;
        if (card->state == CARD_STATE_UP) {
-               card->use_hard_stop = 1;
                __qeth_l2_set_offline(card->gdev, 1);
        } else
                __qeth_l2_set_offline(card->gdev, 0);
index d09b0c44fc3d35c89224fd699b2cf0a2624d283b..6a9cc58321a03752049142c0120095bcd65e4377 100644 (file)
@@ -510,8 +510,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
        kfree(tbd_list);
 }
 
-static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
-                                       int recover)
+static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
 {
        struct qeth_ipaddr *addr, *tmp;
        unsigned long flags;
@@ -530,11 +529,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
                addr = list_entry(card->ip_list.next,
                                  struct qeth_ipaddr, entry);
                list_del_init(&addr->entry);
-               if (clean) {
-                       spin_unlock_irqrestore(&card->ip_lock, flags);
-                       qeth_l3_deregister_addr_entry(card, addr);
-                       spin_lock_irqsave(&card->ip_lock, flags);
-               }
                if (!recover || addr->is_multicast) {
                        kfree(addr);
                        continue;
@@ -1611,29 +1605,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
        return 0;
 }
 
-static int qeth_l3_put_unique_id(struct qeth_card *card)
-{
-
-       int rc = 0;
-       struct qeth_cmd_buffer *iob;
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 2, "puniqeid");
-
-       if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
-               UNIQUE_ID_NOT_BY_CARD)
-               return -1;
-       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
-                                    QETH_PROT_IPV6);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
-                               card->info.unique_id;
-       memcpy(&cmd->data.create_destroy_addr.unique_id[0],
-              card->dev->dev_addr, OSA_ADDR_LEN);
-       rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-       return rc;
-}
-
 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
@@ -2324,25 +2295,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
                        dev_close(card->dev);
                        rtnl_unlock();
                }
-               if (!card->use_hard_stop) {
-                       rc = qeth_send_stoplan(card);
-                       if (rc)
-                               QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-               }
                card->state = CARD_STATE_SOFTSETUP;
        }
        if (card->state == CARD_STATE_SOFTSETUP) {
-               qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1);
+               qeth_l3_clear_ip_list(card, 1);
                qeth_clear_ipacmd_list(card);
                card->state = CARD_STATE_HARDSETUP;
        }
        if (card->state == CARD_STATE_HARDSETUP) {
-               if (!card->use_hard_stop &&
-                   (card->info.type != QETH_CARD_TYPE_IQD)) {
-                       rc = qeth_l3_put_unique_id(card);
-                       if (rc)
-                               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
-               }
                qeth_qdio_clear_card(card, 0);
                qeth_clear_qdio_buffers(card);
                qeth_clear_working_pool_list(card);
@@ -2352,7 +2312,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
                qeth_clear_cmd_buffers(&card->read);
                qeth_clear_cmd_buffers(&card->write);
        }
-       card->use_hard_stop = 0;
        return rc;
 }
 
@@ -3483,17 +3442,15 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        qeth_set_allowed_threads(card, 0, 1);
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
-       if (cgdev->state == CCWGROUP_ONLINE) {
-               card->use_hard_stop = 1;
+       if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l3_set_offline(cgdev);
-       }
 
        if (card->dev) {
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
 
-       qeth_l3_clear_ip_list(card, 0, 0);
+       qeth_l3_clear_ip_list(card, 0);
        qeth_l3_clear_ipato_list(card);
        return;
 }
@@ -3594,7 +3551,6 @@ contin:
        mutex_unlock(&card->discipline_mutex);
        return 0;
 out_remove:
-       card->use_hard_stop = 1;
        qeth_l3_stop_card(card, 0);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
@@ -3663,7 +3619,6 @@ static int qeth_l3_recover(void *ptr)
        QETH_CARD_TEXT(card, 2, "recover2");
        dev_warn(&card->gdev->dev,
                "A recovery process has been started for the device\n");
-       card->use_hard_stop = 1;
        __qeth_l3_set_offline(card->gdev, 1);
        rc = __qeth_l3_set_online(card->gdev, 1);
        if (!rc)
@@ -3684,7 +3639,6 @@ static int qeth_l3_recover(void *ptr)
 static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-       qeth_l3_clear_ip_list(card, 0, 0);
        qeth_qdio_clear_card(card, 0);
        qeth_clear_qdio_buffers(card);
 }
@@ -3700,7 +3654,6 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
        if (gdev->state == CCWGROUP_OFFLINE)
                return 0;
        if (card->state == CARD_STATE_UP) {
-               card->use_hard_stop = 1;
                __qeth_l3_set_offline(card->gdev, 1);
        } else
                __qeth_l3_set_offline(card->gdev, 0);
index 5cf4e9831f1bf2cffa4c57f302bc52aaf00a2f17..11dff23f7838479e33f90fd412154a66ceb77dbd 100644 (file)
@@ -1,6 +1,8 @@
 config SCSI_CXGB3_ISCSI
        tristate "Chelsio T3 iSCSI support"
-       depends on CHELSIO_T3_DEPENDS
+       depends on PCI && INET
+       select NETDEVICES
+       select NETDEV_10000
        select CHELSIO_T3
        select SCSI_ISCSI_ATTRS
        ---help---
index bb94b39b17b3ee10df2c403c22c7db3cab91a211..d5302c27f37786d62302d29037a977a422b74827 100644 (file)
@@ -1,6 +1,8 @@
 config SCSI_CXGB4_ISCSI
        tristate "Chelsio T4 iSCSI support"
-       depends on CHELSIO_T4_DEPENDS
+       depends on PCI && INET
+       select NETDEVICES
+       select NETDEV_10000
        select CHELSIO_T4
        select SCSI_ISCSI_ATTRS
        ---help---
index d2ad3d6767246d34d0bc2ffcfd187d5d60e65fe9..889199aa1f5b232d83439d67dfc75fee74aeb52f 100644 (file)
@@ -470,7 +470,8 @@ static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
                        }
        };
 
-       if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+       rt = ip_route_output_flow(&init_net, &fl, NULL);
+       if (IS_ERR(rt))
                return NULL;
 
        return rt;
index 3918d2cc5856b333600c6fc313a611eb66ff97fd..e05ba6eefc7e7d148fe94db458e2055bf342b9dc 100644 (file)
@@ -1192,10 +1192,10 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
 }
 EXPORT_SYMBOL(ssb_device_enable);
 
-/* Wait for a bit in a register to get set or unset.
+/* Wait for bitmask in a register to get set or cleared.
  * timeout is in units of ten-microseconds */
-static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
-                       int timeout, int set)
+static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
+                        int timeout, int set)
 {
        int i;
        u32 val;
@@ -1203,7 +1203,7 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
        for (i = 0; i < timeout; i++) {
                val = ssb_read32(dev, reg);
                if (set) {
-                       if (val & bitmask)
+                       if ((val & bitmask) == bitmask)
                                return 0;
                } else {
                        if (!(val & bitmask))
@@ -1220,20 +1220,38 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask,
 
 void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags)
 {
-       u32 reject;
+       u32 reject, val;
 
        if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET)
                return;
 
        reject = ssb_tmslow_reject_bitmask(dev);
-       ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
-       ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1);
-       ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
-       ssb_write32(dev, SSB_TMSLOW,
-                   SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
-                   reject | SSB_TMSLOW_RESET |
-                   core_specific_flags);
-       ssb_flush_tmslow(dev);
+
+       if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_CLOCK) {
+               ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
+               ssb_wait_bits(dev, SSB_TMSLOW, reject, 1000, 1);
+               ssb_wait_bits(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
+
+               if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
+                       val = ssb_read32(dev, SSB_IMSTATE);
+                       val |= SSB_IMSTATE_REJECT;
+                       ssb_write32(dev, SSB_IMSTATE, val);
+                       ssb_wait_bits(dev, SSB_IMSTATE, SSB_IMSTATE_BUSY, 1000,
+                                     0);
+               }
+
+               ssb_write32(dev, SSB_TMSLOW,
+                       SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+                       reject | SSB_TMSLOW_RESET |
+                       core_specific_flags);
+               ssb_flush_tmslow(dev);
+
+               if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
+                       val = ssb_read32(dev, SSB_IMSTATE);
+                       val &= ~SSB_IMSTATE_REJECT;
+                       ssb_write32(dev, SSB_IMSTATE, val);
+               }
+       }
 
        ssb_write32(dev, SSB_TMSLOW,
                    reject | SSB_TMSLOW_RESET |
index 158449e55044f91b93970a995c612ef9f1c28e76..a467b20baac8d652ea322daff5b996b07f98072e 100644 (file)
@@ -468,10 +468,14 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
                SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
                SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
                SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
+               SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
+               SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
        } else {
                SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
                SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
                SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
+               SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
+               SPEX(boardflags2_hi, SSB_SPROM5_BFL2HI, 0xFFFF, 0);
        }
        SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
             SSB_SPROM4_ANTAVAIL_A_SHIFT);
@@ -641,7 +645,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
                break;
        default:
                ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
-                          "  revision %d detected. Will extract"
+                          " revision %d detected. Will extract"
                           " v1\n", out->revision);
                out->revision = 1;
                sprom_extract_r123(out, in);
index 991463f4a7f4c1559ffbf8d1d4998ea0942bccaa..9b7b71c294b892a94993716bd58d97d21dc1c1dc 100644 (file)
@@ -2313,7 +2313,9 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
        notif_bss_info->frame_len =
            offsetof(struct ieee80211_mgmt,
                     u.beacon.variable) + wl_get_ielen(wl);
-       freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+       freq = ieee80211_channel_to_frequency(notif_bss_info->channel,
+                                             band->band);
+
        channel = ieee80211_get_channel(wiphy, freq);
 
        WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
index cd8392badff07e6059cf2b2e99fb697a2855b220..6363077468f1297e8135a0bf7b4c5d21785f2aa1 100644 (file)
@@ -104,9 +104,6 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
 static void wl_release_fw(struct wl_info *wl);
 
 /* local prototypes */
-static int wl_start(struct sk_buff *skb, struct wl_info *wl);
-static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
-                       struct sk_buff *skb);
 static void wl_dpc(unsigned long data);
 
 MODULE_AUTHOR("Broadcom Corporation");
@@ -135,7 +132,6 @@ module_param(phymsglevel, int, 0);
 
 #define HW_TO_WL(hw)    (hw->priv)
 #define WL_TO_HW(wl)     (wl->pub->ieee_hw)
-static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
 static int wl_ops_start(struct ieee80211_hw *hw);
 static void wl_ops_stop(struct ieee80211_hw *hw);
 static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -173,20 +169,18 @@ static int wl_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                           enum ieee80211_ampdu_mlme_action action,
                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 
-static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       int status;
        struct wl_info *wl = hw->priv;
        WL_LOCK(wl);
        if (!wl->pub->up) {
                WL_ERROR("ops->tx called while down\n");
-               status = -ENETDOWN;
+               kfree_skb(skb);
                goto done;
        }
-       status = wl_start(skb, wl);
+       wlc_sendpkt_mac80211(wl->wlc, skb, hw);
  done:
        WL_UNLOCK(wl);
-       return status;
 }
 
 static int wl_ops_start(struct ieee80211_hw *hw)
@@ -1325,22 +1319,6 @@ void wl_free(struct wl_info *wl)
        osl_detach(osh);
 }
 
-/* transmit a packet */
-static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
-{
-       if (!wl)
-               return -ENETDOWN;
-
-       return wl_start_int(wl, WL_TO_HW(wl), skb);
-}
-
-static int BCMFASTPATH
-wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       wlc_sendpkt_mac80211(wl->wlc, skb, hw);
-       return NETDEV_TX_OK;
-}
-
 void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
                      int prio)
 {
index e37e8058e2b846ffa99576e3968adcc0445073d9..aa12d1a65184d97c81639e0dcec4b439101c0390 100644 (file)
@@ -6818,11 +6818,14 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
        ratespec_t rspec;
        unsigned char *plcp;
 
+#if 0
+       /* Clearly, this is bogus -- reading the TSF now is wrong */
        wlc_read_tsf(wlc, &tsf_l, &tsf_h);      /* mactime */
        rx_status->mactime = tsf_h;
        rx_status->mactime <<= 32;
        rx_status->mactime |= tsf_l;
-       rx_status->flag |= RX_FLAG_TSFT;
+       rx_status->flag |= RX_FLAG_MACTIME_MPDU; /* clearly wrong */
+#endif
 
        channel = WLC_CHAN_CHANNEL(rxh->RxChan);
 
index 89279ba1b7378aee57230d1964d072fb4b38e703..39413b7d387d1c341cfab722dd629996acd07414 100644 (file)
@@ -525,7 +525,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n
 {
        int err;
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
                return;
 
        switch (msg->flags) {
index 2163d60c2eafef9e7196026db08bf37f7a0a3de9..3724e1e67ec23adfd56d52b0e1cf6a5e0fe9e523 100644 (file)
@@ -118,13 +118,14 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
        *total_flags = new_flags;
 }
 
-static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
 {
        struct wbsoft_priv *priv = dev->priv;
 
        if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) {
                priv->sMlmeFrame.wNumTxMMPDUDiscarded++;
-               return NETDEV_TX_BUSY;
+               kfree_skb(skb);
+               return;
        }
 
        priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
@@ -140,8 +141,6 @@ static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
         */
 
        Mds_Tx(priv);
-
-       return NETDEV_TX_OK;
 }
 
 static int wbsoft_start(struct ieee80211_hw *dev)
index 52ec0959d462aedeff308ff3106acfd53f0a0ff1..5180a215d781337912c4e71d6a8e0cdbdd35ba55 100644 (file)
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
        struct uvesafb_task *utask;
        struct uvesafb_ktask *task;
 
-       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+       if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
                return;
 
        if (msg->seq >= UVESAFB_TASKS_MAX)
index 359df048769059bfe24883d00149f8f3a4727872..9d339eb278810a36e8549bad93954f8d31636f44 100644 (file)
 #define AUDIT_BPRM_FCAPS       1321    /* Information about fcaps increasing perms */
 #define AUDIT_CAPSET           1322    /* Record showing argument to sys_capset */
 #define AUDIT_MMAP             1323    /* Record showing descriptor and flags in mmap */
+#define AUDIT_NETFILTER_PKT    1324    /* Packets traversing netfilter chains */
+#define AUDIT_NETFILTER_CFG    1325    /* Netfilter chain modifications */
 
 #define AUDIT_AVC              1400    /* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR      1401    /* Internal SE Linux Errors */
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644 (file)
index 0000000..473771a
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ *      based on affinity masks
+ */
+struct cpu_rmap {
+       u16             size, used;
+       void            **obj;
+       struct {
+               u16     index;
+               u16     dist;
+       }               near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+
+/**
+ * free_cpu_rmap - free CPU affinity reverse-map
+ * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
+ */
+static inline void free_cpu_rmap(struct cpu_rmap *rmap)
+{
+       kfree(rmap);
+}
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+                          const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+       return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+       return rmap->obj[rmap->near[cpu].index];
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+       return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif
index 66900e3c6eb1ba0c33d741725e7a532f2b555012..a3680a16718f4f200d32da9045d5b32b52cd6b17 100644 (file)
 /* IEEE 802.1Qaz std supported values */
 #define IEEE_8021QAZ_MAX_TCS   8
 
+#define IEEE_8021QAZ_TSA_STRICT                0
+#define IEEE_8021QAZ_TSA_CB_SHABER     1
+#define IEEE_8021QAZ_TSA_ETS           2
+#define IEEE_8021QAZ_TSA_VENDOR                255
+
 /* This structure contains the IEEE 802.1Qaz ETS managed object
  *
  * @willing: willing bit in ETS configuratin TLV
@@ -82,6 +87,45 @@ struct ieee_pfc {
        __u64   indications[IEEE_8021QAZ_MAX_TCS];
 };
 
+/* CEE DCBX std supported values */
+#define CEE_DCBX_MAX_PGS       8
+#define CEE_DCBX_MAX_PRIO      8
+
+/**
+ * struct cee_pg - CEE Prioity-Group managed object
+ *
+ * @willing: willing bit in the PG tlv
+ * @error: error bit in the PG tlv
+ * @pg_en: enable bit of the PG feature
+ * @tcs_supported: number of traffic classes supported
+ * @pg_bw: bandwidth percentage for each priority group
+ * @prio_pg: priority to PG mapping indexed by priority
+ */
+struct cee_pg {
+       __u8    willing;
+       __u8    error;
+       __u8    pg_en;
+       __u8    tcs_supported;
+       __u8    pg_bw[CEE_DCBX_MAX_PGS];
+       __u8    prio_pg[CEE_DCBX_MAX_PGS];
+};
+
+/**
+ * struct cee_pfc - CEE PFC managed object
+ *
+ * @willing: willing bit in the PFC tlv
+ * @error: error bit in the PFC tlv
+ * @pfc_en: bitmap indicating pfc enabled traffic classes
+ * @tcs_supported: number of traffic classes supported
+ */
+struct cee_pfc {
+       __u8    willing;
+       __u8    error;
+       __u8    pfc_en;
+       __u8    tcs_supported;
+};
+
+
 /* This structure contains the IEEE 802.1Qaz APP managed object. This
  * object is also used for the CEE std as well. There is no difference
  * between the objects.
@@ -105,6 +149,20 @@ struct dcb_app {
        __u16   protocol;
 };
 
+/**
+ * struct dcb_peer_app_info - APP feature information sent by the peer
+ *
+ * @willing: willing bit in the peer APP tlv
+ * @error: error bit in the peer APP tlv
+ *
+ * In addition to this information the full peer APP tlv also contains
+ * a table of 'app_count' APP objects defined above.
+ */
+struct dcb_peer_app_info {
+       __u8    willing;
+       __u8    error;
+};
+
 struct dcbmsg {
        __u8               dcb_family;
        __u8               cmd;
@@ -139,6 +197,7 @@ struct dcbmsg {
  * @DCB_CMD_SDCBX: set DCBX engine configuration
  * @DCB_CMD_GFEATCFG: get DCBX features flags
  * @DCB_CMD_SFEATCFG: set DCBX features negotiation flags
+ * @DCB_CMD_CEE_GET: get CEE aggregated configuration
  */
 enum dcbnl_commands {
        DCB_CMD_UNDEFINED,
@@ -181,6 +240,8 @@ enum dcbnl_commands {
        DCB_CMD_GFEATCFG,
        DCB_CMD_SFEATCFG,
 
+       DCB_CMD_CEE_GET,
+
        __DCB_CMD_ENUM_MAX,
        DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
 };
@@ -203,6 +264,7 @@ enum dcbnl_commands {
  * @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED)
  * @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8)
  * @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED)
+ * @DCB_ATTR_CEE: CEE std supported attributes (NLA_NESTED)
  */
 enum dcbnl_attrs {
        DCB_ATTR_UNDEFINED,
@@ -226,15 +288,32 @@ enum dcbnl_attrs {
        DCB_ATTR_DCBX,
        DCB_ATTR_FEATCFG,
 
+       /* CEE nested attributes */
+       DCB_ATTR_CEE,
+
        __DCB_ATTR_ENUM_MAX,
        DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
 };
 
+/**
+ * enum ieee_attrs - IEEE 802.1Qaz get/set attributes
+ *
+ * @DCB_ATTR_IEEE_UNSPEC: unspecified
+ * @DCB_ATTR_IEEE_ETS: negotiated ETS configuration
+ * @DCB_ATTR_IEEE_PFC: negotiated PFC configuration
+ * @DCB_ATTR_IEEE_APP_TABLE: negotiated APP configuration
+ * @DCB_ATTR_IEEE_PEER_ETS: peer ETS configuration - get only
+ * @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only
+ * @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only
+ */
 enum ieee_attrs {
        DCB_ATTR_IEEE_UNSPEC,
        DCB_ATTR_IEEE_ETS,
        DCB_ATTR_IEEE_PFC,
        DCB_ATTR_IEEE_APP_TABLE,
+       DCB_ATTR_IEEE_PEER_ETS,
+       DCB_ATTR_IEEE_PEER_PFC,
+       DCB_ATTR_IEEE_PEER_APP,
        __DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
@@ -246,6 +325,31 @@ enum ieee_attrs_app {
 };
 #define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1)
 
+/**
+ * enum cee_attrs - CEE DCBX get attributes
+ *
+ * @DCB_ATTR_CEE_UNSPEC: unspecified
+ * @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only
+ * @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only
+ * @DCB_ATTR_CEE_PEER_APP: peer APP tlv - get only
+ */
+enum cee_attrs {
+       DCB_ATTR_CEE_UNSPEC,
+       DCB_ATTR_CEE_PEER_PG,
+       DCB_ATTR_CEE_PEER_PFC,
+       DCB_ATTR_CEE_PEER_APP_TABLE,
+       __DCB_ATTR_CEE_MAX
+};
+#define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1)
+
+enum peer_app_attr {
+       DCB_ATTR_CEE_PEER_APP_UNSPEC,
+       DCB_ATTR_CEE_PEER_APP_INFO,
+       DCB_ATTR_CEE_PEER_APP,
+       __DCB_ATTR_CEE_PEER_APP_MAX
+};
+#define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1)
+
 /**
  * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
  *
index 010e2d87ed7568ea1019f57447f671666e56fbb3..d638e85dc501f98a8182b4063a2bfe7fde77524a 100644 (file)
@@ -279,8 +279,6 @@ enum dccp_state {
        DCCP_MAX_STATES
 };
 
-#define DCCP_STATE_MASK 0x1f
-
 enum {
        DCCPF_OPEN            = TCPF_ESTABLISHED,
        DCCPF_REQUESTING      = TCPF_SYN_SENT,
index 1908929204a9eb284a11eaabcd7959ee3a1614a8..aac3e2eeb4fd626565462379c0acd7fb4d1e1c66 100644 (file)
@@ -251,6 +251,7 @@ enum ethtool_stringset {
        ETH_SS_STATS,
        ETH_SS_PRIV_FLAGS,
        ETH_SS_NTUPLE_FILTERS,
+       ETH_SS_FEATURES,
 };
 
 /* for passing string sets for data tagging */
@@ -523,6 +524,92 @@ struct ethtool_flash {
        char    data[ETHTOOL_FLASH_MAX_FILENAME];
 };
 
+/* for returning and changing feature sets */
+
+/**
+ * struct ethtool_get_features_block - block with state of 32 features
+ * @available: mask of changeable features
+ * @requested: mask of features requested to be enabled if possible
+ * @active: mask of currently enabled features
+ * @never_changed: mask of features not changeable for any device
+ */
+struct ethtool_get_features_block {
+       __u32   available;
+       __u32   requested;
+       __u32   active;
+       __u32   never_changed;
+};
+
+/**
+ * struct ethtool_gfeatures - command to get state of device's features
+ * @cmd: command number = %ETHTOOL_GFEATURES
+ * @size: in: number of elements in the features[] array;
+ *       out: number of elements in features[] needed to hold all features
+ * @features: state of features
+ */
+struct ethtool_gfeatures {
+       __u32   cmd;
+       __u32   size;
+       struct ethtool_get_features_block features[0];
+};
+
+/**
+ * struct ethtool_set_features_block - block with request for 32 features
+ * @valid: mask of features to be changed
+ * @requested: values of features to be changed
+ */
+struct ethtool_set_features_block {
+       __u32   valid;
+       __u32   requested;
+};
+
+/**
+ * struct ethtool_sfeatures - command to request change in device's features
+ * @cmd: command number = %ETHTOOL_SFEATURES
+ * @size: array size of the features[] array
+ * @features: feature change masks
+ */
+struct ethtool_sfeatures {
+       __u32   cmd;
+       __u32   size;
+       struct ethtool_set_features_block features[0];
+};
+
+/*
+ * %ETHTOOL_SFEATURES changes features present in features[].valid to the
+ * values of corresponding bits in features[].requested. Bits in .requested
+ * not set in .valid or not changeable are ignored.
+ *
+ * Returns %EINVAL when .valid contains undefined or never-changable bits
+ * or size is not equal to required number of features words (32-bit blocks).
+ * Returns >= 0 if request was completed; bits set in the value mean:
+ *   %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not
+ *     changeable (not present in %ETHTOOL_GFEATURES' features[].available)
+ *     those bits were ignored.
+ *   %ETHTOOL_F_WISH - some or all changes requested were recorded but the
+ *      resulting state of bits masked by .valid is not equal to .requested.
+ *      Probably there are other device-specific constraints on some features
+ *      in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered
+ *      here as though ignored bits were cleared.
+ *   %ETHTOOL_F_COMPAT - some or all changes requested were made by calling
+ *      compatibility functions. Requested offload state cannot be properly
+ *      managed by kernel.
+ *
+ * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of
+ * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands
+ * for ETH_SS_FEATURES string set. First entry in the table corresponds to least
+ * significant bit in features[0] fields. Empty strings mark undefined features.
+ */
+enum ethtool_sfeatures_retval_bits {
+       ETHTOOL_F_UNSUPPORTED__BIT,
+       ETHTOOL_F_WISH__BIT,
+       ETHTOOL_F_COMPAT__BIT,
+};
+
+#define ETHTOOL_F_UNSUPPORTED   (1 << ETHTOOL_F_UNSUPPORTED__BIT)
+#define ETHTOOL_F_WISH          (1 << ETHTOOL_F_WISH__BIT)
+#define ETHTOOL_F_COMPAT        (1 << ETHTOOL_F_COMPAT__BIT)
+
 #ifdef __KERNEL__
 
 #include <linux/rculist.h>
@@ -543,7 +630,6 @@ struct net_device;
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_rx_csum(struct net_device *dev);
 u32 ethtool_op_get_tx_csum(struct net_device *dev);
 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
@@ -744,6 +830,9 @@ struct ethtool_ops {
 #define ETHTOOL_GRXFHINDIR     0x00000038 /* Get RX flow hash indir'n table */
 #define ETHTOOL_SRXFHINDIR     0x00000039 /* Set RX flow hash indir'n table */
 
+#define ETHTOOL_GFEATURES      0x0000003a /* Get device offload settings */
+#define ETHTOOL_SFEATURES      0x0000003b /* Change device offload settings */
+
 /* compatibility with older code */
 #define SPARC_ETH_GSET         ETHTOOL_GSET
 #define SPARC_ETH_SSET         ETHTOOL_SSET
index 123959927745b7934cacea3e1aacf5c6303bcb1c..3bc63e6a02f7390909cd128fbd1fceb4c41c7df7 100644 (file)
                                         * release skb->dst
                                         */
 #define IFF_DONT_BRIDGE 0x800          /* disallow bridging this ether dev */
-#define IFF_IN_NETPOLL 0x1000          /* whether we are processing netpoll */
-#define IFF_DISABLE_NETPOLL    0x2000  /* disable netpoll at run-time */
-#define IFF_MACVLAN_PORT       0x4000  /* device used as macvlan port */
-#define IFF_BRIDGE_PORT        0x8000          /* device used as bridge port */
-#define IFF_OVS_DATAPATH       0x10000 /* device used as Open vSwitch
+#define IFF_DISABLE_NETPOLL    0x1000  /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT       0x2000  /* device used as macvlan port */
+#define IFF_BRIDGE_PORT        0x4000          /* device used as bridge port */
+#define IFF_OVS_DATAPATH       0x8000  /* device used as Open vSwitch
                                         * datapath port */
 
 #define IF_GET_IFACE   0x0001          /* for querying only */
index 6485d2a89bec2149c5f019ff01ce4c27f3d27001..f4a2e6b1b864c5d8d83caffd2343c2cf12fe3b84 100644 (file)
@@ -135,6 +135,7 @@ enum {
        IFLA_VF_PORTS,
        IFLA_PORT_SELF,
        IFLA_AF_SPEC,
+       IFLA_GROUP,             /* Group the device belongs to */
        __IFLA_MAX
 };
 
index ae8fdc54e0c06941356de75606fc186f7cc63507..5f8146695b7ff199b3135c89a079b4649d5cf269 100644 (file)
@@ -144,6 +144,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_ARP_NOTIFY(in_dev)      IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
 
 struct in_ifaddr {
+       struct hlist_node       hash;
        struct in_ifaddr        *ifa_next;
        struct in_device        *ifa_dev;
        struct rcu_head         rcu_head;
index 55e0d4253e4927eb67254f38137b2a9e787afa9d..63c5ad78e37ca89c3e9faeed0492f6d060e2d64b 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/smp.h>
 #include <linux/percpu.h>
 #include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
 
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
@@ -240,6 +242,35 @@ extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
 
 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq:               Interrupt to which notification applies
+ * @kref:              Reference count, for internal use
+ * @work:              Work item, for internal use
+ * @notify:            Function to be called on change.  This will be
+ *                     called in process context.
+ * @release:           Function to be called on release.  This will be
+ *                     called in process context.  Once registered, the
+ *                     structure must only be freed when this function is
+ *                     called or later.
+ */
+struct irq_affinity_notify {
+       unsigned int irq;
+       struct kref kref;
+       struct work_struct work;
+       void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+       void (*release)(struct kref *ref);
+};
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+static inline void irq_run_affinity_notifiers(void)
+{
+       flush_scheduled_work();
+}
+
 #else /* CONFIG_SMP */
 
 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +286,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
 
 static inline int irq_set_affinity_hint(unsigned int irq,
-                                        const struct cpumask *m)
+                                       const struct cpumask *m)
 {
        return -EINVAL;
 }
index 5f43a3b2e3ad7149c13f2552987c8b5ce1749fcb..4deb3834d62c5621c2e3bc94869fe17198499d19 100644 (file)
 #define IP_VS_CONN_F_TEMPLATE  0x1000          /* template, not connection */
 #define IP_VS_CONN_F_ONE_PACKET        0x2000          /* forward only one packet */
 
+#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
+                                 IP_VS_CONN_F_NOOUTPUT | \
+                                 IP_VS_CONN_F_INACTIVE | \
+                                 IP_VS_CONN_F_SEQ_MASK | \
+                                 IP_VS_CONN_F_NO_CPORT | \
+                                 IP_VS_CONN_F_TEMPLATE \
+                                )
+
 /* Flags that are not sent to backup server start from bit 16 */
 #define IP_VS_CONN_F_NFCT      (1 << 16)       /* use netfilter conntrack */
 
index c1a95b7b58de736f228cfeef9bfe1f118e02bf29..bfef56dadddbe596c525ddc4b4059acb2b6f8805 100644 (file)
@@ -8,6 +8,7 @@
  * For now it's included from <linux/irq.h>
  */
 
+struct irq_affinity_notify;
 struct proc_dir_entry;
 struct timer_rand_state;
 /**
@@ -24,6 +25,7 @@ struct timer_rand_state;
  * @last_unhandled:    aging timer for unhandled count
  * @irqs_unhandled:    stats field for spurious unhandled interrupts
  * @lock:              locking for SMP
+ * @affinity_notify:   context for notification of affinity changes
  * @pending_mask:      pending rebalanced interrupts
  * @threads_active:    number of irqaction threads currently running
  * @wait_for_threads:  wait queue for sync_irq to wait for threaded handlers
@@ -70,6 +72,7 @@ struct irq_desc {
        raw_spinlock_t          lock;
 #ifdef CONFIG_SMP
        const struct cpumask    *affinity_hint;
+       struct irq_affinity_notify *affinity_notify;
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        cpumask_var_t           pending_mask;
 #endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
new file mode 100644 (file)
index 0000000..dd8da34
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _MICREL_PHY_H
+#define _MICREL_PHY_H
+
+#define MICREL_PHY_ID_MASK     0x00fffff0
+
+#define PHY_ID_KSZ9021         0x00221611
+#define PHY_ID_KS8737          0x00221720
+#define PHY_ID_KS8041          0x00221510
+#define PHY_ID_KS8051          0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
+#define PHY_ID_KS8001          0x0022161A
+
+/* struct phy_device dev_flags definitions */
+#define MICREL_PHY_50MHZ_CLK   0x00000001
+
+#endif /* _MICREL_PHY_H */
index 16faa130088c298fe8540b7fa29d365d0ea579b7..94de83c0f877629a4d25d8fedfd8fff290550e9e 100644 (file)
@@ -118,6 +118,7 @@ enum sock_shutdown_cmd {
 };
 
 struct socket_wq {
+       /* Note: wait MUST be first field of socket_wq */
        wait_queue_head_t       wait;
        struct fasync_struct    *fasync_list;
        struct rcu_head         rcu;
@@ -142,7 +143,7 @@ struct socket {
 
        unsigned long           flags;
 
-       struct socket_wq        *wq;
+       struct socket_wq __rcu  *wq;
 
        struct file             *file;
        struct sock             *sk;
index 71caf7a5e6c6cc59fd975f06a0ce6bb304288d52..7a071535c4c00ee92273735e6f618d3fffc3ef7c 100644 (file)
@@ -138,6 +138,9 @@ static inline bool dev_xmit_complete(int rc)
 
 #define MAX_ADDR_LEN   32              /* Largest hardware address length */
 
+/* Initial net device group. All devices belong to group 0 by default. */
+#define INIT_NETDEV_GROUP      0
+
 #ifdef  __KERNEL__
 /*
  *     Compute the worst case header length according to the protocols
@@ -551,14 +554,16 @@ struct rps_map {
 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
 
 /*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
- * tail pointer for that CPU's input queue at the time of last enqueue.
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
  */
 struct rps_dev_flow {
        u16 cpu;
-       u16 fill;
+       u16 filter;
        unsigned int last_qtail;
 };
+#define RPS_NO_FILTER 0xffff
 
 /*
  * The rps_dev_flow_table structure contains a table of flow mappings.
@@ -608,6 +613,11 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
 
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
+#ifdef CONFIG_RFS_ACCEL
+extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+                               u32 flow_id, u16 filter_id);
+#endif
+
 /* This structure contains an instance of an RX queue. */
 struct netdev_rx_queue {
        struct rps_map __rcu            *rps_map;
@@ -643,6 +653,14 @@ struct xps_dev_maps {
     (nr_cpu_ids * sizeof(struct xps_map *)))
 #endif /* CONFIG_XPS */
 
+#define TC_MAX_QUEUE   16
+#define TC_BITMASK     15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+       u16 count;
+       u16 offset;
+};
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -753,6 +771,38 @@ struct xps_dev_maps {
  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  *                       struct nlattr *port[]);
  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ *     Called to setup 'tc' number of traffic classes in the net device. This
+ *     is always called from the stack with the rtnl lock held and netif tx
+ *     queues stopped. This allows the netdevice to perform queue management
+ *     safely.
+ *
+ *     RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ *                         u16 rxq_index, u32 flow_id);
+ *     Set hardware filter for RFS.  rxq_index is the target queue index;
+ *     flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ *     Return the filter ID on success, or a negative error code.
+ *
+ *     Slave management functions (for bridge, bonding, etc). User should
+ *     call netdev_set_master() to set dev->master properly.
+ * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
+ *     Called to make another netdev an underling.
+ *
+ * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
+ *     Called to release previously enslaved netdev.
+ *
+ *      Feature/offload setting functions.
+ * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ *     Adjusts the requested feature flags according to device-specific
+ *     constraints, and returns the resulting flags. Must not modify
+ *     the device state.
+ *
+ * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ *     Called to update device configuration to new features. Passed
+ *     feature set might be less than what was returned by ndo_fix_features()).
+ *     Must return >0 or -errno if it changed dev->features itself.
+ *
  */
 #define HAVE_NET_DEVICE_OPS
 struct net_device_ops {
@@ -811,6 +861,7 @@ struct net_device_ops {
                                                   struct nlattr *port[]);
        int                     (*ndo_get_vf_port)(struct net_device *dev,
                                                   int vf, struct sk_buff *skb);
+       int                     (*ndo_setup_tc)(struct net_device *dev, u8 tc);
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        int                     (*ndo_fcoe_enable)(struct net_device *dev);
        int                     (*ndo_fcoe_disable)(struct net_device *dev);
@@ -820,11 +871,29 @@ struct net_device_ops {
                                                      unsigned int sgc);
        int                     (*ndo_fcoe_ddp_done)(struct net_device *dev,
                                                     u16 xid);
+       int                     (*ndo_fcoe_ddp_target)(struct net_device *dev,
+                                                      u16 xid,
+                                                      struct scatterlist *sgl,
+                                                      unsigned int sgc);
 #define NETDEV_FCOE_WWNN 0
 #define NETDEV_FCOE_WWPN 1
        int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
                                                    u64 *wwn, int type);
 #endif
+#ifdef CONFIG_RFS_ACCEL
+       int                     (*ndo_rx_flow_steer)(struct net_device *dev,
+                                                    const struct sk_buff *skb,
+                                                    u16 rxq_index,
+                                                    u32 flow_id);
+#endif
+       int                     (*ndo_add_slave)(struct net_device *dev,
+                                                struct net_device *slave_dev);
+       int                     (*ndo_del_slave)(struct net_device *dev,
+                                                struct net_device *slave_dev);
+       u32                     (*ndo_fix_features)(struct net_device *dev,
+                                                   u32 features);
+       int                     (*ndo_set_features)(struct net_device *dev,
+                                                   u32 features);
 };
 
 /*
@@ -876,8 +945,18 @@ struct net_device {
        struct list_head        napi_list;
        struct list_head        unreg_list;
 
-       /* Net device features */
-       unsigned long           features;
+       /* currently active device features */
+       u32                     features;
+       /* user-changeable features */
+       u32                     hw_features;
+       /* user-requested features */
+       u32                     wanted_features;
+       /* VLAN feature mask */
+       u32                     vlan_features;
+
+       /* Net device feature bits; if you change something,
+        * also update netdev_features_strings[] in ethtool.c */
+
 #define NETIF_F_SG             1       /* Scatter/gather IO. */
 #define NETIF_F_IP_CSUM                2       /* Can checksum TCP/UDP over IPv4. */
 #define NETIF_F_NO_CSUM                4       /* Does not require checksum. F.e. loopack. */
@@ -902,6 +981,7 @@ struct net_device {
 #define NETIF_F_FCOE_MTU       (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
 #define NETIF_F_NTUPLE         (1 << 27) /* N-tuple filters supported */
 #define NETIF_F_RXHASH         (1 << 28) /* Receive hashing offload */
+#define NETIF_F_RXCSUM         (1 << 29) /* Receive checksumming offload */
 
        /* Segmentation offload features */
 #define NETIF_F_GSO_SHIFT      16
@@ -913,6 +993,12 @@ struct net_device {
 #define NETIF_F_TSO6           (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 #define NETIF_F_FSO            (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
 
+       /* Features valid for ethtool to change */
+       /* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE   (NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \
+                                 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_ETHTOOL_BITS   (0x3f3fffff & ~NETIF_F_NEVER_CHANGE)
+
        /* List of features with software fallbacks. */
 #define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
                                 NETIF_F_TSO6 | NETIF_F_UFO)
@@ -923,6 +1009,12 @@ struct net_device {
 #define NETIF_F_V6_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
 #define NETIF_F_ALL_CSUM       (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
 
+#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_TX_OFFLOADS        (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                                NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
+
        /*
         * If one device supports one of these features, then enable them
         * for all in netdev_increment_features.
@@ -931,6 +1023,9 @@ struct net_device {
                                 NETIF_F_SG | NETIF_F_HIGHDMA |         \
                                 NETIF_F_FRAGLIST)
 
+       /* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
+
        /* Interface index. Unique device identifier    */
        int                     ifindex;
        int                     iflink;
@@ -1039,6 +1134,13 @@ struct net_device {
 
        /* Number of RX queues currently active in device */
        unsigned int            real_num_rx_queues;
+
+#ifdef CONFIG_RFS_ACCEL
+       /* CPU reverse-mapping for RX completion interrupts, indexed
+        * by RX queue number.  Assigned by driver.  This must only be
+        * set if the ndo_rx_flow_steer operation is defined. */
+       struct cpu_rmap         *rx_cpu_rmap;
+#endif
 #endif
 
        rx_handler_func_t __rcu *rx_handler;
@@ -1132,9 +1234,6 @@ struct net_device {
        /* rtnetlink link ops */
        const struct rtnl_link_ops *rtnl_link_ops;
 
-       /* VLAN feature mask */
-       unsigned long vlan_features;
-
        /* for setting kernel sock attribute on TCP connection setup */
 #define GSO_MAX_SIZE           65536
        unsigned int            gso_max_size;
@@ -1143,6 +1242,9 @@ struct net_device {
        /* Data Center Bridging netlink ops */
        const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
+       u8 num_tc;
+       struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+       u8 prio_tc_map[TC_BITMASK + 1];
 
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        /* max exchange id for FCoE LRO by ddp */
@@ -1153,11 +1255,65 @@ struct net_device {
 
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
+
+       /* group the device belongs to */
+       int group;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
 #define        NETDEV_ALIGN            32
 
+static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+       return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+       if (tc >= dev->num_tc)
+               return -EINVAL;
+
+       dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+       return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+       dev->num_tc = 0;
+       memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+       memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+       if (tc >= dev->num_tc)
+               return -EINVAL;
+
+       dev->tc_to_txq[tc].count = count;
+       dev->tc_to_txq[tc].offset = offset;
+       return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+       if (num_tc > TC_MAX_QUEUE)
+               return -EINVAL;
+
+       dev->num_tc = num_tc;
+       return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+       return dev->num_tc;
+}
+
 static inline
 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
                                         unsigned int index)
@@ -1300,7 +1456,7 @@ struct packet_type {
                                         struct packet_type *,
                                         struct net_device *);
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
-                                               int features);
+                                               u32 features);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
@@ -1345,7 +1501,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev)
        struct net *net;
 
        net = dev_net(dev);
-       lh = rcu_dereference(dev->dev_list.next);
+       lh = rcu_dereference(list_next_rcu(&dev->dev_list));
        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
@@ -1355,6 +1511,13 @@ static inline struct net_device *first_net_device(struct net *net)
                net_device_entry(net->dev_base_head.next);
 }
 
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+       struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+       return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
 extern int                     netdev_boot_setup_check(struct net_device *dev);
 extern unsigned long           netdev_boot_base(const char *prefix, int unit);
 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
@@ -1606,8 +1769,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
        if (WARN_ON(!dev_queue)) {
-               printk(KERN_INFO "netif_stop_queue() cannot be called before "
-                      "register_netdev()");
+               pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
                return;
        }
        set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
@@ -1844,6 +2006,7 @@ extern int                dev_set_alias(struct net_device *, const char *, size_t);
 extern int             dev_change_net_namespace(struct net_device *,
                                                 struct net *, const char *);
 extern int             dev_set_mtu(struct net_device *, int);
+extern void            dev_set_group(struct net_device *, int);
 extern int             dev_set_mac_address(struct net_device *,
                                            struct sockaddr *);
 extern int             dev_hard_start_xmit(struct sk_buff *skb,
@@ -2267,8 +2430,10 @@ extern int               netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
 extern int             netdev_set_master(struct net_device *dev, struct net_device *master);
+extern int netdev_set_bond_master(struct net_device *dev,
+                                 struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
@@ -2295,22 +2460,26 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
 
 extern void linkwatch_run_queue(void);
 
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
-                                       unsigned long mask);
-unsigned long netdev_fix_features(unsigned long features, const char *name);
+static inline u32 netdev_get_wanted_features(struct net_device *dev)
+{
+       return (dev->features & ~dev->hw_features) | dev->wanted_features;
+}
+u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+u32 netdev_fix_features(struct net_device *dev, u32 features);
+void netdev_update_features(struct net_device *dev);
 
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-int netif_skb_features(struct sk_buff *skb);
+u32 netif_skb_features(struct sk_buff *skb);
 
-static inline int net_gso_ok(int features, int gso_type)
+static inline int net_gso_ok(u32 features, int gso_type)
 {
        int feature = gso_type << NETIF_F_GSO_SHIFT;
        return (features & feature) == feature;
 }
 
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
 {
        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
               (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
@@ -2328,15 +2497,9 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
        dev->gso_max_size = size;
 }
 
-extern int __skb_bond_should_drop(struct sk_buff *skb,
-                                 struct net_device *master);
-
-static inline int skb_bond_should_drop(struct sk_buff *skb,
-                                      struct net_device *master)
+static inline int netif_is_bond_slave(struct net_device *dev)
 {
-       if (master)
-               return __skb_bond_should_drop(skb, master);
-       return 0;
+       return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 }
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -2351,6 +2514,8 @@ static inline int dev_ethtool_get_settings(struct net_device *dev,
 
 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
 {
+       if (dev->hw_features & NETIF_F_RXCSUM)
+               return !!(dev->features & NETIF_F_RXCSUM);
        if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
                return 0;
        return dev->ethtool_ops->get_rx_csum(dev);
index 1893837b39660821351c4e744a166f0983244f7d..eeec00abb6648911f2aaf2edca07b1edca99e3ba 100644 (file)
 #define NF_MAX_VERDICT NF_STOP
 
 /* we overload the higher bits for encoding auxiliary data such as the queue
- * number. Not nice, but better than additional function arguments. */
-#define NF_VERDICT_MASK 0x0000ffff
-#define NF_VERDICT_BITS 16
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
+
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS   0x00008000
 
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
 #define NF_VERDICT_QMASK 0xffff0000
 #define NF_VERDICT_QBITS 16
 
-#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
 
-#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
 
 /* only for userspace compatibility */
 #ifndef __KERNEL__
@@ -41,6 +45,9 @@
    <= 0x2000 is used for protocol-flags. */
 #define NFC_UNKNOWN 0x4000
 #define NFC_ALTERED 0x8000
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
 #endif
 
 enum nf_inet_hooks {
@@ -72,6 +79,10 @@ union nf_inet_addr {
 
 #ifdef __KERNEL__
 #ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+       return -(verdict >> NF_VERDICT_QBITS);
+}
 
 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
                                   const union nf_inet_addr *a2)
@@ -267,7 +278,7 @@ struct nf_afinfo {
        int             route_key_size;
 };
 
-extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
 static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
 {
        return rcu_dereference(nf_afinfo[family]);
@@ -357,9 +368,9 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 #endif /*CONFIG_NETFILTER*/
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
 extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 #else
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
index 9d40effe7ca70f2c111badc775884e9e0cc79226..15e83bf3dd58dc73dda0091c288cc8f0c3d696dc 100644 (file)
@@ -1,3 +1,5 @@
+header-y += ipset/
+
 header-y += nf_conntrack_common.h
 header-y += nf_conntrack_ftp.h
 header-y += nf_conntrack_sctp.h
@@ -9,6 +11,7 @@ header-y += nfnetlink_conntrack.h
 header-y += nfnetlink_log.h
 header-y += nfnetlink_queue.h
 header-y += x_tables.h
+header-y += xt_AUDIT.h
 header-y += xt_CHECKSUM.h
 header-y += xt_CLASSIFY.h
 header-y += xt_CONNMARK.h
@@ -34,6 +37,7 @@ header-y += xt_connmark.h
 header-y += xt_conntrack.h
 header-y += xt_cpu.h
 header-y += xt_dccp.h
+header-y += xt_devgroup.h
 header-y += xt_dscp.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
@@ -54,7 +58,9 @@ header-y += xt_quota.h
 header-y += xt_rateest.h
 header-y += xt_realm.h
 header-y += xt_recent.h
+header-y += xt_set.h
 header-y += xt_sctp.h
+header-y += xt_socket.h
 header-y += xt_state.h
 header-y += xt_statistic.h
 header-y += xt_string.h
diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild
new file mode 100644 (file)
index 0000000..601fe71
--- /dev/null
@@ -0,0 +1,4 @@
+header-y += ip_set.h
+header-y += ip_set_bitmap.h
+header-y += ip_set_hash.h
+header-y += ip_set_list.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644 (file)
index 0000000..ec333d8
--- /dev/null
@@ -0,0 +1,452 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                         Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The protocol version */
+#define IPSET_PROTOCOL         6
+
+/* The max length of strings including NUL: set and type identifiers */
+#define IPSET_MAXNAMELEN       32
+
+/* Message types and commands */
+enum ipset_cmd {
+       IPSET_CMD_NONE,
+       IPSET_CMD_PROTOCOL,     /* 1: Return protocol version */
+       IPSET_CMD_CREATE,       /* 2: Create a new (empty) set */
+       IPSET_CMD_DESTROY,      /* 3: Destroy a (empty) set */
+       IPSET_CMD_FLUSH,        /* 4: Remove all elements from a set */
+       IPSET_CMD_RENAME,       /* 5: Rename a set */
+       IPSET_CMD_SWAP,         /* 6: Swap two sets */
+       IPSET_CMD_LIST,         /* 7: List sets */
+       IPSET_CMD_SAVE,         /* 8: Save sets */
+       IPSET_CMD_ADD,          /* 9: Add an element to a set */
+       IPSET_CMD_DEL,          /* 10: Delete an element from a set */
+       IPSET_CMD_TEST,         /* 11: Test an element in a set */
+       IPSET_CMD_HEADER,       /* 12: Get set header data only */
+       IPSET_CMD_TYPE,         /* 13: Get set type */
+       IPSET_MSG_MAX,          /* Netlink message commands */
+
+       /* Commands in userspace: */
+       IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
+       IPSET_CMD_HELP,         /* 15: Get help */
+       IPSET_CMD_VERSION,      /* 16: Get program version */
+       IPSET_CMD_QUIT,         /* 17: Quit from interactive mode */
+
+       IPSET_CMD_MAX,
+
+       IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+};
+
+/* Attributes at command level */
+enum {
+       IPSET_ATTR_UNSPEC,
+       IPSET_ATTR_PROTOCOL,    /* 1: Protocol version */
+       IPSET_ATTR_SETNAME,     /* 2: Name of the set */
+       IPSET_ATTR_TYPENAME,    /* 3: Typename */
+       IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */
+       IPSET_ATTR_REVISION,    /* 4: Settype revision */
+       IPSET_ATTR_FAMILY,      /* 5: Settype family */
+       IPSET_ATTR_FLAGS,       /* 6: Flags at command level */
+       IPSET_ATTR_DATA,        /* 7: Nested attributes */
+       IPSET_ATTR_ADT,         /* 8: Multiple data containers */
+       IPSET_ATTR_LINENO,      /* 9: Restore lineno */
+       IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
+       IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+       __IPSET_ATTR_CMD_MAX,
+};
+#define IPSET_ATTR_CMD_MAX     (__IPSET_ATTR_CMD_MAX - 1)
+
+/* CADT specific attributes */
+enum {
+       IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1,
+       IPSET_ATTR_IP_FROM = IPSET_ATTR_IP,
+       IPSET_ATTR_IP_TO,       /* 2 */
+       IPSET_ATTR_CIDR,        /* 3 */
+       IPSET_ATTR_PORT,        /* 4 */
+       IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT,
+       IPSET_ATTR_PORT_TO,     /* 5 */
+       IPSET_ATTR_TIMEOUT,     /* 6 */
+       IPSET_ATTR_PROTO,       /* 7 */
+       IPSET_ATTR_CADT_FLAGS,  /* 8 */
+       IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO,     /* 9 */
+       /* Reserve empty slots */
+       IPSET_ATTR_CADT_MAX = 16,
+       /* Create-only specific attributes */
+       IPSET_ATTR_GC,
+       IPSET_ATTR_HASHSIZE,
+       IPSET_ATTR_MAXELEM,
+       IPSET_ATTR_NETMASK,
+       IPSET_ATTR_PROBES,
+       IPSET_ATTR_RESIZE,
+       IPSET_ATTR_SIZE,
+       /* Kernel-only */
+       IPSET_ATTR_ELEMENTS,
+       IPSET_ATTR_REFERENCES,
+       IPSET_ATTR_MEMSIZE,
+
+       __IPSET_ATTR_CREATE_MAX,
+};
+#define IPSET_ATTR_CREATE_MAX  (__IPSET_ATTR_CREATE_MAX - 1)
+
+/* ADT specific attributes */
+enum {
+       IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1,
+       IPSET_ATTR_NAME,
+       IPSET_ATTR_NAMEREF,
+       IPSET_ATTR_IP2,
+       IPSET_ATTR_CIDR2,
+       __IPSET_ATTR_ADT_MAX,
+};
+#define IPSET_ATTR_ADT_MAX     (__IPSET_ATTR_ADT_MAX - 1)
+
+/* IP specific attributes */
+enum {
+       IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1,
+       IPSET_ATTR_IPADDR_IPV6,
+       __IPSET_ATTR_IPADDR_MAX,
+};
+#define IPSET_ATTR_IPADDR_MAX  (__IPSET_ATTR_IPADDR_MAX - 1)
+
+/* Error codes */
+enum ipset_errno {
+       IPSET_ERR_PRIVATE = 4096,
+       IPSET_ERR_PROTOCOL,
+       IPSET_ERR_FIND_TYPE,
+       IPSET_ERR_MAX_SETS,
+       IPSET_ERR_BUSY,
+       IPSET_ERR_EXIST_SETNAME2,
+       IPSET_ERR_TYPE_MISMATCH,
+       IPSET_ERR_EXIST,
+       IPSET_ERR_INVALID_CIDR,
+       IPSET_ERR_INVALID_NETMASK,
+       IPSET_ERR_INVALID_FAMILY,
+       IPSET_ERR_TIMEOUT,
+       IPSET_ERR_REFERENCED,
+       IPSET_ERR_IPADDR_IPV4,
+       IPSET_ERR_IPADDR_IPV6,
+
+       /* Type specific error codes */
+       IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+/* Flags at command level */
+enum ipset_cmd_flags {
+       IPSET_FLAG_BIT_EXIST    = 0,
+       IPSET_FLAG_EXIST        = (1 << IPSET_FLAG_BIT_EXIST),
+};
+
+/* Flags at CADT attribute level */
+enum ipset_cadt_flags {
+       IPSET_FLAG_BIT_BEFORE   = 0,
+       IPSET_FLAG_BEFORE       = (1 << IPSET_FLAG_BIT_BEFORE),
+};
+
+/* Commands with settype-specific attributes */
+enum ipset_adt {
+       IPSET_ADD,
+       IPSET_DEL,
+       IPSET_TEST,
+       IPSET_ADT_MAX,
+       IPSET_CREATE = IPSET_ADT_MAX,
+       IPSET_CADT_MAX,
+};
+
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
+/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
+ * and IPSET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef u16 ip_set_id_t;
+
+#define IPSET_INVALID_ID               65535
+
+enum ip_set_dim {
+       IPSET_DIM_ZERO = 0,
+       IPSET_DIM_ONE,
+       IPSET_DIM_TWO,
+       IPSET_DIM_THREE,
+       /* Max dimension in elements.
+        * If changed, new revision of iptables match/target is required.
+        */
+       IPSET_DIM_MAX = 6,
+};
+
+/* Option flags for kernel operations */
+enum ip_set_kopt {
+       IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO),
+       IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
+       IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
+       IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+};
+
+/* Set features */
+enum ip_set_feature {
+       IPSET_TYPE_IP_FLAG = 0,
+       IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+       IPSET_TYPE_PORT_FLAG = 1,
+       IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+       IPSET_TYPE_MAC_FLAG = 2,
+       IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+       IPSET_TYPE_IP2_FLAG = 3,
+       IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+       IPSET_TYPE_NAME_FLAG = 4,
+       IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+       /* Strictly speaking not a feature, but a flag for dumping:
+        * this settype must be dumped last */
+       IPSET_DUMP_LAST_FLAG = 7,
+       IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+struct ip_set;
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+       /* Kernelspace: test/add/del entries
+        *              returns negative error code,
+        *                      zero for no match/success to add/delete
+        *                      positive for matching element */
+       int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+                   enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+
+       /* Userspace: test/add/del entries
+        *              returns negative error code,
+        *                      zero for no match/success to add/delete
+        *                      positive for matching element */
+       int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+                   enum ipset_adt adt, u32 *lineno, u32 flags);
+
+       /* Low level add/del/test functions */
+       ipset_adtfn adt[IPSET_ADT_MAX];
+
+       /* When adding entries and set is full, try to resize the set */
+       int (*resize)(struct ip_set *set, bool retried);
+       /* Destroy the set */
+       void (*destroy)(struct ip_set *set);
+       /* Flush the elements */
+       void (*flush)(struct ip_set *set);
+       /* Expire entries before listing */
+       void (*expire)(struct ip_set *set);
+       /* List set header data */
+       int (*head)(struct ip_set *set, struct sk_buff *skb);
+       /* List elements */
+       int (*list)(const struct ip_set *set, struct sk_buff *skb,
+                   struct netlink_callback *cb);
+
+       /* Return true if "b" set is the same as "a"
+        * according to the create set parameters */
+       bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+       struct list_head list;
+
+       /* Typename */
+       char name[IPSET_MAXNAMELEN];
+       /* Protocol version */
+       u8 protocol;
+       /* Set features to control swapping */
+       u8 features;
+       /* Set type dimension */
+       u8 dimension;
+       /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+       u8 family;
+       /* Type revision */
+       u8 revision;
+
+       /* Create set */
+       int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+
+       /* Attribute policies */
+       const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+       const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+       struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+       /* The name of the set */
+       char name[IPSET_MAXNAMELEN];
+       /* Lock protecting the set data */
+       rwlock_t lock;
+       /* References to the set */
+       atomic_t ref;
+       /* The core set type */
+       struct ip_set_type *type;
+       /* The type variant doing the real job */
+       const struct ip_set_type_variant *variant;
+       /* The actual INET family of the set */
+       u8 family;
+       /* The type specific data */
+       void *data;
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(ip_set_id_t index);
+extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
+extern void ip_set_nfnl_put(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+                     u8 family, u8 dim, u8 flags);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+                     u8 family, u8 dim, u8 flags);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+                      u8 family, u8 dim, u8 flags);
+
+/* Utility functions */
+extern void * ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+       __be32 ip;
+       int ret = ip_set_get_ipaddr4(nla, &ip);
+       
+       if (ret)
+               return ret;
+       *ipaddr = ntohl(ip);
+       return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+       return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+       return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+       return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+       return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+       return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
+
+#define NLA_PUT_IPADDR4(skb, type, ipaddr)                     \
+do {                                                           \
+       struct nlattr *__nested = ipset_nest_start(skb, type);  \
+                                                               \
+       if (!__nested)                                          \
+               goto nla_put_failure;                           \
+       NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);     \
+       ipset_nest_end(skb, __nested);                          \
+} while (0)
+
+#define NLA_PUT_IPADDR6(skb, type, ipaddrptr)                  \
+do {                                                           \
+       struct nlattr *__nested = ipset_nest_start(skb, type);  \
+                                                               \
+       if (!__nested)                                          \
+               goto nla_put_failure;                           \
+       NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6,                    \
+               sizeof(struct in6_addr), ipaddrptr);            \
+       ipset_nest_end(skb, __nested);                          \
+} while (0)
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+       return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+       *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+       memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+              sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+       return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+/* Interface to iptables/ip6tables */
+
+#define SO_IP_SET              83
+
+union ip_set_name_index {
+       char name[IPSET_MAXNAMELEN];
+       ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME   0x00000006      /* Get set index by name */
+struct ip_set_req_get_set {
+       unsigned op;
+       unsigned version;
+       union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX  0x00000007      /* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION      0x00000100      /* Ask kernel version */
+struct ip_set_req_version {
+       unsigned op;
+       unsigned version;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
new file mode 100644 (file)
index 0000000..ec9d9be
--- /dev/null
@@ -0,0 +1,1074 @@
+#ifndef _IP_SET_AHASH_H
+#define _IP_SET_AHASH_H
+
+#include <linux/rcupdate.h>
+#include <linux/jhash.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+/* Hashing which uses arrays to resolve clashing. The hash table is resized
+ * (doubled) when searching becomes too long.
+ * Internally jhash is used with the assumption that the size of the
+ * stored data is a multiple of sizeof(u32). If storage supports timeout,
+ * the timeout field must be the last one in the data structure - that field
+ * is ignored when computing the hash key.
+ *
+ * Readers and resizing
+ *
+ * Resizing can be triggered by userspace command only, and those
+ * are serialized by the nfnl mutex. During resizing the set is
+ * read-locked, so the only possible concurrent operations are
+ * the kernel side readers. Those must be protected by proper RCU locking.
+ */
+
+/* Number of elements to store in an initial array block */
+#define AHASH_INIT_SIZE                        4
+/* Max number of elements to store in an array block */
+#define AHASH_MAX_SIZE                 (3*4)
+
+/* A hash bucket */
+struct hbucket {
+       void *value;            /* the array of the values */
+       u8 size;                /* size of the array */
+       u8 pos;                 /* position of the first free entry */
+};
+
+/* The hash table: the table size stored here in order to make resizing easy */
+struct htable {
+       u8 htable_bits;         /* size of hash table == 2^htable_bits */
+       struct hbucket bucket[0]; /* hashtable buckets */
+};
+
+#define hbucket(h, i)          &((h)->bucket[i])
+
+/* Book-keeping of the prefixes added to the set */
+struct ip_set_hash_nets {
+       u8 cidr;                /* the different cidr values in the set */
+       u32 nets;               /* number of elements per cidr */
+};
+
+/* The generic ip_set hash structure */
+struct ip_set_hash {
+       struct htable *table;   /* the hash table */
+       u32 maxelem;            /* max elements in the hash */
+       u32 elements;           /* current element (vs timeout) */
+       u32 initval;            /* random jhash init value */
+       u32 timeout;            /* timeout value, if enabled */
+       struct timer_list gc;   /* garbage collection when timeout enabled */
+#ifdef IP_SET_HASH_WITH_NETMASK
+       u8 netmask;             /* netmask value for subnets to store */
+#endif
+#ifdef IP_SET_HASH_WITH_NETS
+       struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
+#endif
+};
+
+/* Compute htable_bits from the user input parameter hashsize */
+static u8
+htable_bits(u32 hashsize)
+{
+       /* Assume that hashsize == 2^htable_bits */
+       u8 bits = fls(hashsize - 1);
+       if (jhash_size(bits) != hashsize)
+               /* Round up to the first 2^n value */
+               bits = fls(hashsize);
+
+       return bits;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+#define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
+
+/* Network cidr size book keeping when the hash stores different
+ * sized networks */
+static void
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+       u8 i;
+
+       ++h->nets[cidr-1].nets;
+
+       pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+       if (h->nets[cidr-1].nets > 1)
+               return;
+
+       /* New cidr size */
+       for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
+               /* Add in increasing prefix order, so larger cidr first */
+               if (h->nets[i].cidr < cidr)
+                       swap(h->nets[i].cidr, cidr);
+       }
+       if (i < host_mask)
+               h->nets[i].cidr = cidr;
+}
+
+static void
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+       u8 i;
+
+       --h->nets[cidr-1].nets;
+
+       pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+       if (h->nets[cidr-1].nets != 0)
+               return;
+
+       /* All entries with this cidr size deleted, so cleanup h->cidr[] */
+       for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
+               if (h->nets[i].cidr == cidr)
+                       h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+       }
+       h->nets[i - 1].cidr = 0;
+}
+#endif
+
+/* Destroy the hashtable part of the set */
+static void
+ahash_destroy(struct htable *t)
+{
+       struct hbucket *n;
+       u32 i;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               if (n->size)
+                       /* FIXME: use slab cache */
+                       kfree(n->value);
+       }
+
+       ip_set_free(t);
+}
+
+/* Calculate the actual memory size of the set data */
+static size_t
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+{
+       u32 i;
+       struct htable *t = h->table;
+       size_t memsize = sizeof(*h)
+                        + sizeof(*t)
+#ifdef IP_SET_HASH_WITH_NETS
+                        + sizeof(struct ip_set_hash_nets) * host_mask
+#endif
+                        + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++)
+                       memsize += t->bucket[i].size * dsize;
+
+       return memsize;
+}
+
+/* Flush a hash type of set: destroy all elements */
+static void
+ip_set_hash_flush(struct ip_set *set)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct hbucket *n;
+       u32 i;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               if (n->size) {
+                       n->size = n->pos = 0;
+                       /* FIXME: use slab cache */
+                       kfree(n->value);
+               }
+       }
+#ifdef IP_SET_HASH_WITH_NETS
+       memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
+                          * SET_HOST_MASK(set->family));
+#endif
+       h->elements = 0;
+}
+
+/* Destroy a hash type of set */
+static void
+ip_set_hash_destroy(struct ip_set *set)
+{
+       struct ip_set_hash *h = set->data;
+
+       if (with_timeout(h->timeout))
+               del_timer_sync(&h->gc);
+
+       ahash_destroy(h->table);
+       kfree(h);
+
+       set->data = NULL;
+}
+
+#define HKEY(data, initval, htable_bits)                                \
+(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
+       & jhash_mask(htable_bits))
+
+#endif /* _IP_SET_AHASH_H */
+
+#define CONCAT(a, b, c)                a##b##c
+#define TOKEN(a, b, c)         CONCAT(a, b, c)
+
+/* Type/family dependent function prototypes */
+
+#define type_pf_data_equal     TOKEN(TYPE, PF, _data_equal)
+#define type_pf_data_isnull    TOKEN(TYPE, PF, _data_isnull)
+#define type_pf_data_copy      TOKEN(TYPE, PF, _data_copy)
+#define type_pf_data_zero_out  TOKEN(TYPE, PF, _data_zero_out)
+#define type_pf_data_netmask   TOKEN(TYPE, PF, _data_netmask)
+#define type_pf_data_list      TOKEN(TYPE, PF, _data_list)
+#define type_pf_data_tlist     TOKEN(TYPE, PF, _data_tlist)
+
+#define type_pf_elem           TOKEN(TYPE, PF, _elem)
+#define type_pf_telem          TOKEN(TYPE, PF, _telem)
+#define type_pf_data_timeout   TOKEN(TYPE, PF, _data_timeout)
+#define type_pf_data_expired   TOKEN(TYPE, PF, _data_expired)
+#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
+
+#define type_pf_elem_add       TOKEN(TYPE, PF, _elem_add)
+#define type_pf_add            TOKEN(TYPE, PF, _add)
+#define type_pf_del            TOKEN(TYPE, PF, _del)
+#define type_pf_test_cidrs     TOKEN(TYPE, PF, _test_cidrs)
+#define type_pf_test           TOKEN(TYPE, PF, _test)
+
+#define type_pf_elem_tadd      TOKEN(TYPE, PF, _elem_tadd)
+#define type_pf_del_telem      TOKEN(TYPE, PF, _ahash_del_telem)
+#define type_pf_expire         TOKEN(TYPE, PF, _expire)
+#define type_pf_tadd           TOKEN(TYPE, PF, _tadd)
+#define type_pf_tdel           TOKEN(TYPE, PF, _tdel)
+#define type_pf_ttest_cidrs    TOKEN(TYPE, PF, _ahash_ttest_cidrs)
+#define type_pf_ttest          TOKEN(TYPE, PF, _ahash_ttest)
+
+#define type_pf_resize         TOKEN(TYPE, PF, _resize)
+#define type_pf_tresize                TOKEN(TYPE, PF, _tresize)
+#define type_pf_flush          ip_set_hash_flush
+#define type_pf_destroy                ip_set_hash_destroy
+#define type_pf_head           TOKEN(TYPE, PF, _head)
+#define type_pf_list           TOKEN(TYPE, PF, _list)
+#define type_pf_tlist          TOKEN(TYPE, PF, _tlist)
+#define type_pf_same_set       TOKEN(TYPE, PF, _same_set)
+#define type_pf_kadt           TOKEN(TYPE, PF, _kadt)
+#define type_pf_uadt           TOKEN(TYPE, PF, _uadt)
+#define type_pf_gc             TOKEN(TYPE, PF, _gc)
+#define type_pf_gc_init                TOKEN(TYPE, PF, _gc_init)
+#define type_pf_variant                TOKEN(TYPE, PF, _variant)
+#define type_pf_tvariant       TOKEN(TYPE, PF, _tvariant)
+
+/* Flavour without timeout */
+
+/* Get the ith element from the array block n */
+#define ahash_data(n, i)       \
+       ((struct type_pf_elem *)((n)->value) + (i))
+
+/* Add an element to the hash table when resizing the set:
+ * we spare the maintenance of the internal counters. */
+static int
+type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value)
+{
+       if (n->pos >= n->size) {
+               void *tmp;
+
+               if (n->size >= AHASH_MAX_SIZE)
+                       /* Trigger rehashing */
+                       return -EAGAIN;
+
+               tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+                             * sizeof(struct type_pf_elem),
+                             GFP_ATOMIC);
+               if (!tmp)
+                       return -ENOMEM;
+               if (n->size) {
+                       memcpy(tmp, n->value,
+                              sizeof(struct type_pf_elem) * n->size);
+                       kfree(n->value);
+               }
+               n->value = tmp;
+               n->size += AHASH_INIT_SIZE;
+       }
+       type_pf_data_copy(ahash_data(n, n->pos++), value);
+       return 0;
+}
+
+/* Resize a hash: create a new hash table with doubling the hashsize
+ * and inserting the elements to it. Repeat until we succeed or
+ * fail due to memory pressures. */
+static int
+type_pf_resize(struct ip_set *set, bool retried)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t, *orig = h->table;
+       u8 htable_bits = orig->htable_bits;
+       const struct type_pf_elem *data;
+       struct hbucket *n, *m;
+       u32 i, j;
+       int ret;
+
+retry:
+       ret = 0;
+       htable_bits++;
+       pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+                set->name, orig->htable_bits, htable_bits, orig);
+       if (!htable_bits)
+               /* In case we have plenty of memory :-) */
+               return -IPSET_ERR_HASH_FULL;
+       t = ip_set_alloc(sizeof(*t)
+                        + jhash_size(htable_bits) * sizeof(struct hbucket));
+       if (!t)
+               return -ENOMEM;
+       t->htable_bits = htable_bits;
+
+       read_lock_bh(&set->lock);
+       for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+               n = hbucket(orig, i);
+               for (j = 0; j < n->pos; j++) {
+                       data = ahash_data(n, j);
+                       m = hbucket(t, HKEY(data, h->initval, htable_bits));
+                       ret = type_pf_elem_add(m, data);
+                       if (ret < 0) {
+                               read_unlock_bh(&set->lock);
+                               ahash_destroy(t);
+                               if (ret == -EAGAIN)
+                                       goto retry;
+                               return ret;
+                       }
+               }
+       }
+
+       rcu_assign_pointer(h->table, t);
+       read_unlock_bh(&set->lock);
+
+       /* Give time to other readers of the set */
+       synchronize_rcu_bh();
+
+       pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
+                orig->htable_bits, orig, t->htable_bits, t);
+       ahash_destroy(orig);
+
+       return 0;
+}
+
+/* Add an element to a hash and update the internal counters when succeeded,
+ * otherwise report the proper error code. */
+static int
+type_pf_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       int i, ret = 0;
+       u32 key;
+
+       if (h->elements >= h->maxelem)
+               return -IPSET_ERR_HASH_FULL;
+
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
+       key = HKEY(value, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++)
+               if (type_pf_data_equal(ahash_data(n, i), d)) {
+                       ret = -IPSET_ERR_EXIST;
+                       goto out;
+               }
+
+       ret = type_pf_elem_add(n, value);
+       if (ret != 0)
+               goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       add_cidr(h, d->cidr, HOST_MASK);
+#endif
+       h->elements++;
+out:
+       rcu_read_unlock_bh();
+       return ret;
+}
+
+/* Delete an element from the hash: swap it with the last element
+ * and free up space if possible.
+ */
+static int
+type_pf_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       int i;
+       struct type_pf_elem *data;
+       u32 key;
+
+       key = HKEY(value, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_data(n, i);
+               if (!type_pf_data_equal(data, d))
+                       continue;
+               if (i != n->pos - 1)
+                       /* Not last one */
+                       type_pf_data_copy(data, ahash_data(n, n->pos - 1));
+
+               n->pos--;
+               h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+               del_cidr(h, d->cidr, HOST_MASK);
+#endif
+               if (n->pos + AHASH_INIT_SIZE < n->size) {
+                       void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+                                           * sizeof(struct type_pf_elem),
+                                           GFP_ATOMIC);
+                       if (!tmp)
+                               return 0;
+                       n->size -= AHASH_INIT_SIZE;
+                       memcpy(tmp, n->value,
+                              n->size * sizeof(struct type_pf_elem));
+                       kfree(n->value);
+                       n->value = tmp;
+               }
+               return 0;
+       }
+
+       return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+/* Special test function which takes into account the different network
+ * sizes added to the set */
+static int
+type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct hbucket *n;
+       const struct type_pf_elem *data;
+       int i, j = 0;
+       u32 key;
+       u8 host_mask = SET_HOST_MASK(set->family);
+
+       pr_debug("test by nets\n");
+       for (; j < host_mask && h->nets[j].cidr; j++) {
+               type_pf_data_netmask(d, h->nets[j].cidr);
+               key = HKEY(d, h->initval, t->htable_bits);
+               n = hbucket(t, key);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_data(n, i);
+                       if (type_pf_data_equal(data, d))
+                               return 1;
+               }
+       }
+       return 0;
+}
+#endif
+
+/* Test whether the element is added to the set */
+static int
+type_pf_test(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct type_pf_elem *d = value;
+       struct hbucket *n;
+       const struct type_pf_elem *data;
+       int i;
+       u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       /* If we test an IP address and not a network address,
+        * try all possible network sizes */
+       if (d->cidr == SET_HOST_MASK(set->family))
+               return type_pf_test_cidrs(set, d, timeout);
+#endif
+
+       key = HKEY(d, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_data(n, i);
+               if (type_pf_data_equal(data, d))
+                       return 1;
+       }
+       return 0;
+}
+
+/* Reply a HEADER request: fill out the header part of the set */
+static int
+type_pf_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct ip_set_hash *h = set->data;
+       struct nlattr *nested;
+       size_t memsize;
+
+       read_lock_bh(&set->lock);
+       memsize = ahash_memsize(h, with_timeout(h->timeout)
+                                       ? sizeof(struct type_pf_telem)
+                                       : sizeof(struct type_pf_elem),
+                               set->family == AF_INET ? 32 : 128);
+       read_unlock_bh(&set->lock);
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
+                     htonl(jhash_size(h->table->htable_bits)));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+#ifdef IP_SET_HASH_WITH_NETMASK
+       if (h->netmask != HOST_MASK)
+               NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+#endif
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
+       if (with_timeout(h->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+/* Reply a LIST/SAVE request: dump the elements of the specified set */
+static int
+type_pf_list(const struct ip_set *set,
+            struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct ip_set_hash *h = set->data;
+       const struct htable *t = h->table;
+       struct nlattr *atd, *nested;
+       const struct hbucket *n;
+       const struct type_pf_elem *data;
+       u32 first = cb->args[2];
+       /* We assume that one hash bucket fills into one page */
+       void *incomplete;
+       int i;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       pr_debug("list hash set %s\n", set->name);
+       for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+               incomplete = skb_tail_pointer(skb);
+               n = hbucket(t, cb->args[2]);
+               pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_data(n, i);
+                       pr_debug("list hash %lu hbucket %p i %u, data %p\n",
+                                cb->args[2], n, i, data);
+                       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+                       if (!nested) {
+                               if (cb->args[2] == first) {
+                                       nla_nest_cancel(skb, atd);
+                                       return -EMSGSIZE;
+                               } else
+                                       goto nla_put_failure;
+                       }
+                       if (type_pf_data_list(skb, data))
+                               goto nla_put_failure;
+                       ipset_nest_end(skb, nested);
+               }
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, incomplete);
+       ipset_nest_end(skb, atd);
+       if (unlikely(first == cb->args[2])) {
+               pr_warning("Can't list set %s: one bucket does not fit into "
+                          "a message. Please report it!\n", set->name);
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static int
+type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+            enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+static int
+type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
+            enum ipset_adt adt, u32 *lineno, u32 flags);
+
+static const struct ip_set_type_variant type_pf_variant = {
+       .kadt   = type_pf_kadt,
+       .uadt   = type_pf_uadt,
+       .adt    = {
+               [IPSET_ADD] = type_pf_add,
+               [IPSET_DEL] = type_pf_del,
+               [IPSET_TEST] = type_pf_test,
+       },
+       .destroy = type_pf_destroy,
+       .flush  = type_pf_flush,
+       .head   = type_pf_head,
+       .list   = type_pf_list,
+       .resize = type_pf_resize,
+       .same_set = type_pf_same_set,
+};
+
+/* Flavour with timeout support */
+
+#define ahash_tdata(n, i) \
+       (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
+
+static inline u32
+type_pf_data_timeout(const struct type_pf_elem *data)
+{
+       const struct type_pf_telem *tdata =
+               (const struct type_pf_telem *) data;
+
+       return tdata->timeout;
+}
+
+static inline bool
+type_pf_data_expired(const struct type_pf_elem *data)
+{
+       const struct type_pf_telem *tdata =
+               (const struct type_pf_telem *) data;
+
+       return ip_set_timeout_expired(tdata->timeout);
+}
+
+static inline void
+type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
+{
+       struct type_pf_telem *tdata = (struct type_pf_telem *) data;
+
+       tdata->timeout = ip_set_timeout_set(timeout);
+}
+
+static int
+type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
+                 u32 timeout)
+{
+       struct type_pf_elem *data;
+
+       if (n->pos >= n->size) {
+               void *tmp;
+
+               if (n->size >= AHASH_MAX_SIZE)
+                       /* Trigger rehashing */
+                       return -EAGAIN;
+
+               tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+                             * sizeof(struct type_pf_telem),
+                             GFP_ATOMIC);
+               if (!tmp)
+                       return -ENOMEM;
+               if (n->size) {
+                       memcpy(tmp, n->value,
+                              sizeof(struct type_pf_telem) * n->size);
+                       kfree(n->value);
+               }
+               n->value = tmp;
+               n->size += AHASH_INIT_SIZE;
+       }
+       data = ahash_tdata(n, n->pos++);
+       type_pf_data_copy(data, value);
+       type_pf_data_timeout_set(data, timeout);
+       return 0;
+}
+
+/* Delete expired elements from the hashtable */
+static void
+type_pf_expire(struct ip_set_hash *h)
+{
+       struct htable *t = h->table;
+       struct hbucket *n;
+       struct type_pf_elem *data;
+       u32 i;
+       int j;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               for (j = 0; j < n->pos; j++) {
+                       data = ahash_tdata(n, j);
+                       if (type_pf_data_expired(data)) {
+                               pr_debug("expired %u/%u\n", i, j);
+#ifdef IP_SET_HASH_WITH_NETS
+                               del_cidr(h, data->cidr, HOST_MASK);
+#endif
+                               if (j != n->pos - 1)
+                                       /* Not last one */
+                                       type_pf_data_copy(data,
+                                               ahash_tdata(n, n->pos - 1));
+                               n->pos--;
+                               h->elements--;
+                       }
+               }
+               if (n->pos + AHASH_INIT_SIZE < n->size) {
+                       void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+                                           * sizeof(struct type_pf_telem),
+                                           GFP_ATOMIC);
+                       if (!tmp)
+                               /* Still try to delete expired elements */
+                               continue;
+                       n->size -= AHASH_INIT_SIZE;
+                       memcpy(tmp, n->value,
+                              n->size * sizeof(struct type_pf_telem));
+                       kfree(n->value);
+                       n->value = tmp;
+               }
+       }
+}
+
+static int
+type_pf_tresize(struct ip_set *set, bool retried)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t, *orig = h->table;
+       u8 htable_bits = orig->htable_bits;
+       const struct type_pf_elem *data;
+       struct hbucket *n, *m;
+       u32 i, j;
+       int ret;
+
+       /* Try to cleanup once */
+       if (!retried) {
+               i = h->elements;
+               write_lock_bh(&set->lock);
+               type_pf_expire(set->data);
+               write_unlock_bh(&set->lock);
+               if (h->elements <  i)
+                       return 0;
+       }
+
+retry:
+       ret = 0;
+       htable_bits++;
+       if (!htable_bits)
+               /* In case we have plenty of memory :-) */
+               return -IPSET_ERR_HASH_FULL;
+       t = ip_set_alloc(sizeof(*t)
+                        + jhash_size(htable_bits) * sizeof(struct hbucket));
+       if (!t)
+               return -ENOMEM;
+       t->htable_bits = htable_bits;
+
+       read_lock_bh(&set->lock);
+       for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+               n = hbucket(orig, i);
+               for (j = 0; j < n->pos; j++) {
+                       data = ahash_tdata(n, j);
+                       m = hbucket(t, HKEY(data, h->initval, htable_bits));
+                       ret = type_pf_elem_tadd(m, data,
+                                               type_pf_data_timeout(data));
+                       if (ret < 0) {
+                               read_unlock_bh(&set->lock);
+                               ahash_destroy(t);
+                               if (ret == -EAGAIN)
+                                       goto retry;
+                               return ret;
+                       }
+               }
+       }
+
+       rcu_assign_pointer(h->table, t);
+       read_unlock_bh(&set->lock);
+
+       /* Give time to other readers of the set */
+       synchronize_rcu_bh();
+
+       ahash_destroy(orig);
+
+       return 0;
+}
+
+static int
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       struct type_pf_elem *data;
+       int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+       u32 key;
+
+       if (h->elements >= h->maxelem)
+               /* FIXME: when set is full, we slow down here */
+               type_pf_expire(h);
+       if (h->elements >= h->maxelem)
+               return -IPSET_ERR_HASH_FULL;
+
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
+       key = HKEY(d, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_tdata(n, i);
+               if (type_pf_data_equal(data, d)) {
+                       if (type_pf_data_expired(data))
+                               j = i;
+                       else {
+                               ret = -IPSET_ERR_EXIST;
+                               goto out;
+                       }
+               } else if (j == AHASH_MAX_SIZE + 1 &&
+                          type_pf_data_expired(data))
+                       j = i;
+       }
+       if (j != AHASH_MAX_SIZE + 1) {
+               data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+               del_cidr(h, data->cidr, HOST_MASK);
+               add_cidr(h, d->cidr, HOST_MASK);
+#endif
+               type_pf_data_copy(data, d);
+               type_pf_data_timeout_set(data, timeout);
+               goto out;
+       }
+       ret = type_pf_elem_tadd(n, d, timeout);
+       if (ret != 0)
+               goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       add_cidr(h, d->cidr, HOST_MASK);
+#endif
+       h->elements++;
+out:
+       rcu_read_unlock_bh();
+       return ret;
+}
+
+static int
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       const struct type_pf_elem *d = value;
+       struct hbucket *n;
+       int i, ret = 0;
+       struct type_pf_elem *data;
+       u32 key;
+
+       key = HKEY(value, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_tdata(n, i);
+               if (!type_pf_data_equal(data, d))
+                       continue;
+               if (type_pf_data_expired(data))
+                       ret = -IPSET_ERR_EXIST;
+               if (i != n->pos - 1)
+                       /* Not last one */
+                       type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
+
+               n->pos--;
+               h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+               del_cidr(h, d->cidr, HOST_MASK);
+#endif
+               if (n->pos + AHASH_INIT_SIZE < n->size) {
+                       void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+                                           * sizeof(struct type_pf_telem),
+                                           GFP_ATOMIC);
+                       if (!tmp)
+                               return 0;
+                       n->size -= AHASH_INIT_SIZE;
+                       memcpy(tmp, n->value,
+                              n->size * sizeof(struct type_pf_telem));
+                       kfree(n->value);
+                       n->value = tmp;
+               }
+               return 0;
+       }
+
+       return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+static int
+type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct type_pf_elem *data;
+       struct hbucket *n;
+       int i, j = 0;
+       u32 key;
+       u8 host_mask = SET_HOST_MASK(set->family);
+
+       for (; j < host_mask && h->nets[j].cidr; j++) {
+               type_pf_data_netmask(d, h->nets[j].cidr);
+               key = HKEY(d, h->initval, t->htable_bits);
+               n = hbucket(t, key);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_tdata(n, i);
+                       if (type_pf_data_equal(data, d))
+                               return !type_pf_data_expired(data);
+               }
+       }
+       return 0;
+}
+#endif
+
+static int
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       struct ip_set_hash *h = set->data;
+       struct htable *t = h->table;
+       struct type_pf_elem *data, *d = value;
+       struct hbucket *n;
+       int i;
+       u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+       if (d->cidr == SET_HOST_MASK(set->family))
+               return type_pf_ttest_cidrs(set, d, timeout);
+#endif
+       key = HKEY(d, h->initval, t->htable_bits);
+       n = hbucket(t, key);
+       for (i = 0; i < n->pos; i++) {
+               data = ahash_tdata(n, i);
+               if (type_pf_data_equal(data, d))
+                       return !type_pf_data_expired(data);
+       }
+       return 0;
+}
+
+static int
+type_pf_tlist(const struct ip_set *set,
+             struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct ip_set_hash *h = set->data;
+       const struct htable *t = h->table;
+       struct nlattr *atd, *nested;
+       const struct hbucket *n;
+       const struct type_pf_elem *data;
+       u32 first = cb->args[2];
+       /* We assume that one hash bucket fills into one page */
+       void *incomplete;
+       int i;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+               incomplete = skb_tail_pointer(skb);
+               n = hbucket(t, cb->args[2]);
+               for (i = 0; i < n->pos; i++) {
+                       data = ahash_tdata(n, i);
+                       pr_debug("list %p %u\n", n, i);
+                       if (type_pf_data_expired(data))
+                               continue;
+                       pr_debug("do list %p %u\n", n, i);
+                       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+                       if (!nested) {
+                               if (cb->args[2] == first) {
+                                       nla_nest_cancel(skb, atd);
+                                       return -EMSGSIZE;
+                               } else
+                                       goto nla_put_failure;
+                       }
+                       if (type_pf_data_tlist(skb, data))
+                               goto nla_put_failure;
+                       ipset_nest_end(skb, nested);
+               }
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, incomplete);
+       ipset_nest_end(skb, atd);
+       if (unlikely(first == cb->args[2])) {
+               pr_warning("Can't list set %s: one bucket does not fit into "
+                          "a message. Please report it!\n", set->name);
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static const struct ip_set_type_variant type_pf_tvariant = {
+       .kadt   = type_pf_kadt,
+       .uadt   = type_pf_uadt,
+       .adt    = {
+               [IPSET_ADD] = type_pf_tadd,
+               [IPSET_DEL] = type_pf_tdel,
+               [IPSET_TEST] = type_pf_ttest,
+       },
+       .destroy = type_pf_destroy,
+       .flush  = type_pf_flush,
+       .head   = type_pf_head,
+       .list   = type_pf_tlist,
+       .resize = type_pf_tresize,
+       .same_set = type_pf_same_set,
+};
+
+static void
+type_pf_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct ip_set_hash *h = set->data;
+
+       pr_debug("called\n");
+       write_lock_bh(&set->lock);
+       type_pf_expire(h);
+       write_unlock_bh(&set->lock);
+
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       add_timer(&h->gc);
+}
+
+static void
+type_pf_gc_init(struct ip_set *set)
+{
+       struct ip_set_hash *h = set->data;
+
+       init_timer(&h->gc);
+       h->gc.data = (unsigned long) set;
+       h->gc.function = type_pf_gc;
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       add_timer(&h->gc);
+       pr_debug("gc initialized, run in every %u\n",
+                IPSET_GC_PERIOD(h->timeout));
+}
+
+#undef type_pf_data_equal
+#undef type_pf_data_isnull
+#undef type_pf_data_copy
+#undef type_pf_data_zero_out
+#undef type_pf_data_list
+#undef type_pf_data_tlist
+
+#undef type_pf_elem
+#undef type_pf_telem
+#undef type_pf_data_timeout
+#undef type_pf_data_expired
+#undef type_pf_data_netmask
+#undef type_pf_data_timeout_set
+
+#undef type_pf_elem_add
+#undef type_pf_add
+#undef type_pf_del
+#undef type_pf_test_cidrs
+#undef type_pf_test
+
+#undef type_pf_elem_tadd
+#undef type_pf_expire
+#undef type_pf_tadd
+#undef type_pf_tdel
+#undef type_pf_ttest_cidrs
+#undef type_pf_ttest
+
+#undef type_pf_resize
+#undef type_pf_tresize
+#undef type_pf_flush
+#undef type_pf_destroy
+#undef type_pf_head
+#undef type_pf_list
+#undef type_pf_tlist
+#undef type_pf_same_set
+#undef type_pf_kadt
+#undef type_pf_uadt
+#undef type_pf_gc
+#undef type_pf_gc_init
+#undef type_pf_variant
+#undef type_pf_tvariant
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644 (file)
index 0000000..61a9e87
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+/* Bitmap type specific error codes */
+enum {
+       /* The element is out of the range of the set */
+       IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC,
+       /* The range exceeds the size limit of the set type */
+       IPSET_ERR_BITMAP_RANGE_SIZE,
+};
+
+#ifdef __KERNEL__
+#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+       u32 mask = 0xFFFFFFFE;
+
+       *bits = 32;
+       while (--(*bits) > 0 && mask && (to & mask) != from)
+               mask <<= 1;
+
+       return mask;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644 (file)
index 0000000..3882a81
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+                               __be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+                               __be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+                                      __be16 *port, u8 *proto)
+{
+       return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+                               __be16 *port);
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644 (file)
index 0000000..b86f15c
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+/* Hash type specific error codes */
+enum {
+       /* Hash is full */
+       IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC,
+       /* Null-valued element */
+       IPSET_ERR_HASH_ELEM,
+       /* Invalid protocol */
+       IPSET_ERR_INVALID_PROTO,
+       /* Protocol missing but must be specified */
+       IPSET_ERR_MISSING_PROTO,
+};
+
+#ifdef __KERNEL__
+
+#define IPSET_DEFAULT_HASHSIZE         1024
+#define IPSET_MIMINAL_HASHSIZE         64
+#define IPSET_DEFAULT_MAXELEM          65536
+#define IPSET_DEFAULT_PROBES           4
+#define IPSET_DEFAULT_RESIZE           100
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644 (file)
index 0000000..40a63f3
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+/* List type specific error codes */
+enum {
+       /* Set name to be added/deleted/tested does not exist. */
+       IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC,
+       /* list:set type is not permitted to add */
+       IPSET_ERR_LOOP,
+       /* Missing reference set */
+       IPSET_ERR_BEFORE,
+       /* Reference set does not exist */
+       IPSET_ERR_NAMEREF,
+       /* Set is full */
+       IPSET_ERR_LIST_FULL,
+       /* Reference set is not added to the set */
+       IPSET_ERR_REF_EXIST,
+};
+
+#ifdef __KERNEL__
+
+#define IP_SET_LIST_DEFAULT_SIZE       8
+#define IP_SET_LIST_MIN_SIZE           4
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644 (file)
index 0000000..9f30c5f
--- /dev/null
@@ -0,0 +1,127 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME                  (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+       ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Set is defined without timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT       UINT_MAX
+
+#define with_timeout(timeout)  ((timeout) != IPSET_NO_TIMEOUT)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+       unsigned int timeout = ip_set_get_h32(tb);
+
+       /* Userspace supplied TIMEOUT parameter: adjust crazy size */
+       return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+#ifdef IP_SET_BITMAP_TIMEOUT
+
+/* Bitmap specific timeout constants and macros for the entries */
+
+/* Bitmap entry is unset */
+#define IPSET_ELEM_UNSET       0
+/* Bitmap entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT   (UINT_MAX/2)
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+       return timeout != IPSET_ELEM_UNSET &&
+              (timeout == IPSET_ELEM_PERMANENT ||
+               time_after(timeout, jiffies));
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+       return timeout != IPSET_ELEM_UNSET &&
+              timeout != IPSET_ELEM_PERMANENT &&
+              time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+       unsigned long t;
+
+       if (!timeout)
+               return IPSET_ELEM_PERMANENT;
+
+       t = timeout * HZ + jiffies;
+       if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
+               /* Bingo! */
+               t++;
+
+       return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+       return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+
+#else
+
+/* Hash specific timeout constants and macros for the entries */
+
+/* Hash entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT   0
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+       return timeout == IPSET_ELEM_PERMANENT ||
+              time_after(timeout, jiffies);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+       return timeout != IPSET_ELEM_PERMANENT &&
+              time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+       unsigned long t;
+
+       if (!timeout)
+               return IPSET_ELEM_PERMANENT;
+
+       t = timeout * HZ + jiffies;
+       if (t == IPSET_ELEM_PERMANENT)
+               /* Bingo! :-) */
+               t++;
+
+       return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+       return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+#endif /* ! IP_SET_BITMAP_TIMEOUT */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644 (file)
index 0000000..0e1fb50
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h> 
+
+/* Prefixlen maps, by Jan Engelhardt  */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+       return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+       return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+       return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+       return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644 (file)
index 0000000..064bc63
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+                               unsigned int protoff,
+                               struct nf_conn *ct,
+                               enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
index 361d6b5630ee8105e407477308bed1ab6f03ccf0..2b11fc1a86bedb64cf609537daf476de87d41dcc 100644 (file)
@@ -47,7 +47,8 @@ struct nfgenmsg {
 #define NFNL_SUBSYS_QUEUE              3
 #define NFNL_SUBSYS_ULOG               4
 #define NFNL_SUBSYS_OSF                        5
-#define NFNL_SUBSYS_COUNT              6
+#define NFNL_SUBSYS_IPSET              6
+#define NFNL_SUBSYS_COUNT              7
 
 #ifdef __KERNEL__
 
index 19711e3ffd428a83d3368ccee182a0b3f466bbb1..debf1aefd753cef4770c4249222f0bebe2cd0a5c 100644 (file)
@@ -42,6 +42,7 @@ enum ctattr_type {
        CTA_SECMARK,            /* obsolete */
        CTA_ZONE,
        CTA_SECCTX,
+       CTA_TIMESTAMP,
        __CTA_MAX
 };
 #define CTA_MAX (__CTA_MAX - 1)
@@ -127,6 +128,14 @@ enum ctattr_counters {
 };
 #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
 
+enum ctattr_tstamp {
+       CTA_TIMESTAMP_UNSPEC,
+       CTA_TIMESTAMP_START,
+       CTA_TIMESTAMP_STOP,
+       __CTA_TIMESTAMP_MAX
+};
+#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
+
 enum ctattr_nat {
        CTA_NAT_UNSPEC,
        CTA_NAT_MINIP,
index 6712e713b2995e9354ef8b6aec8b896937823553..37219525ff6fbe7a5298f5586bd00c88f7024351 100644 (file)
@@ -611,8 +611,9 @@ struct _compat_xt_align {
 extern void xt_compat_lock(u_int8_t af);
 extern void xt_compat_unlock(u_int8_t af);
 
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
 extern void xt_compat_flush_offsets(u_int8_t af);
+extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
 extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 
 extern int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h
new file mode 100644 (file)
index 0000000..38751d2
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Header file for iptables xt_AUDIT target
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _XT_AUDIT_TARGET_H
+#define _XT_AUDIT_TARGET_H
+
+#include <linux/types.h>
+
+enum {
+       XT_AUDIT_TYPE_ACCEPT = 0,
+       XT_AUDIT_TYPE_DROP,
+       XT_AUDIT_TYPE_REJECT,
+       __XT_AUDIT_TYPE_MAX,
+};
+
+#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1)
+
+struct xt_audit_info {
+       __u8 type; /* XT_AUDIT_TYPE_* */
+};
+
+#endif /* _XT_AUDIT_TARGET_H */
index 1b564106891dbc9e4aa181f7bb95bc471202dadc..b56e76811c04380e9779dbe82c2cfa4a5b0c6abd 100644 (file)
@@ -1,14 +1,16 @@
 #ifndef _XT_CT_H
 #define _XT_CT_H
 
+#include <linux/types.h>
+
 #define XT_CT_NOTRACK  0x1
 
 struct xt_ct_target_info {
-       u_int16_t       flags;
-       u_int16_t       zone;
-       u_int32_t       ct_events;
-       u_int32_t       exp_events;
-       char            helper[16];
+       __u16 flags;
+       __u16 zone;
+       __u32 ct_events;
+       __u32 exp_events;
+       char helper[16];
 
        /* Used internally by the kernel */
        struct nf_conn  *ct __attribute__((aligned(8)));
index 2584f4a777def8fcaf11960d05a2be7386ba63ad..9eafdbbb401cd3609095c2395e541d5ddef0b869 100644 (file)
@@ -20,4 +20,10 @@ struct xt_NFQ_info_v1 {
        __u16 queues_total;
 };
 
+struct xt_NFQ_info_v2 {
+       __u16 queuenum;
+       __u16 queues_total;
+       __u16 bypass;
+};
+
 #endif /* _XT_NFQ_TARGET_H */
index 2db543214ff59b57b644c5abc1163086ef8ca732..7157318499c27b3c96b0e07ae19413fcbe94a827 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef _XT_TCPOPTSTRIP_H
 #define _XT_TCPOPTSTRIP_H
 
+#include <linux/types.h>
+
 #define tcpoptstrip_set_bit(bmap, idx) \
        (bmap[(idx) >> 5] |= 1U << (idx & 31))
 #define tcpoptstrip_test_bit(bmap, idx) \
        (((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0)
 
 struct xt_tcpoptstrip_target_info {
-       u_int32_t strip_bmap[8];
+       __u32 strip_bmap[8];
 };
 
 #endif /* _XT_TCPOPTSTRIP_H */
index 3f3d69361289ca4cecda2182f71d62e0b7137053..902043c2073ff7e9d5e195e16246438410f79b1e 100644 (file)
@@ -1,19 +1,21 @@
 #ifndef _XT_TPROXY_H
 #define _XT_TPROXY_H
 
+#include <linux/types.h>
+
 /* TPROXY target is capable of marking the packet to perform
  * redirection. We can get rid of that whenever we get support for
  * mutliple targets in the same rule. */
 struct xt_tproxy_target_info {
-       u_int32_t mark_mask;
-       u_int32_t mark_value;
+       __u32 mark_mask;
+       __u32 mark_value;
        __be32 laddr;
        __be16 lport;
 };
 
 struct xt_tproxy_target_info_v1 {
-       u_int32_t mark_mask;
-       u_int32_t mark_value;
+       __u32 mark_mask;
+       __u32 mark_value;
        union nf_inet_addr laddr;
        __be16 lport;
 };
index 886682656f098db6df2dd5cd0327cd5a76c5b6c1..9b883c8fbf548bb34ab0e97ff48944d85a93d012 100644 (file)
@@ -1,15 +1,17 @@
 #ifndef _XT_CLUSTER_MATCH_H
 #define _XT_CLUSTER_MATCH_H
 
+#include <linux/types.h>
+
 enum xt_cluster_flags {
        XT_CLUSTER_F_INV        = (1 << 0)
 };
 
 struct xt_cluster_match_info {
-       u_int32_t               total_nodes;
-       u_int32_t               node_mask;
-       u_int32_t               hash_seed;
-       u_int32_t               flags;
+       __u32 total_nodes;
+       __u32 node_mask;
+       __u32 hash_seed;
+       __u32 flags;
 };
 
 #define XT_CLUSTER_NODES_MAX   32
index eacfedc6b5d07e3899ca8ad88f66ccc0f817cff3..0ea5e79f5bd73fade99d020a96b68c55a8c32224 100644 (file)
@@ -4,7 +4,7 @@
 #define XT_MAX_COMMENT_LEN 256
 
 struct xt_comment_info {
-       unsigned char comment[XT_MAX_COMMENT_LEN];
+       char comment[XT_MAX_COMMENT_LEN];
 };
 
 #endif /* XT_COMMENT_H */
index 7e3284bcbd2bd118470cbd5b9fc9ec955ad9ccea..0ca66e97acbc5eda481befdda3ef13eea6119975 100644 (file)
@@ -1,8 +1,15 @@
 #ifndef _XT_CONNLIMIT_H
 #define _XT_CONNLIMIT_H
 
+#include <linux/types.h>
+
 struct xt_connlimit_data;
 
+enum {
+       XT_CONNLIMIT_INVERT = 1 << 0,
+       XT_CONNLIMIT_DADDR  = 1 << 1,
+};
+
 struct xt_connlimit_info {
        union {
                union nf_inet_addr mask;
@@ -13,7 +20,14 @@ struct xt_connlimit_info {
                };
 #endif
        };
-       unsigned int limit, inverse;
+       unsigned int limit;
+       union {
+               /* revision 0 */
+               unsigned int inverse;
+
+               /* revision 1 */
+               __u32 flags;
+       };
 
        /* Used internally by the kernel */
        struct xt_connlimit_data *data __attribute__((aligned(8)));
index 54f47a2f6152bbbbc542dc52c37dee19b5676dda..74b904d8f99c28121229ef7da0ef3ec8b01c601a 100644 (file)
@@ -58,4 +58,19 @@ struct xt_conntrack_mtinfo2 {
        __u16 state_mask, status_mask;
 };
 
+struct xt_conntrack_mtinfo3 {
+       union nf_inet_addr origsrc_addr, origsrc_mask;
+       union nf_inet_addr origdst_addr, origdst_mask;
+       union nf_inet_addr replsrc_addr, replsrc_mask;
+       union nf_inet_addr repldst_addr, repldst_mask;
+       __u32 expires_min, expires_max;
+       __u16 l4proto;
+       __u16 origsrc_port, origdst_port;
+       __u16 replsrc_port, repldst_port;
+       __u16 match_flags, invert_flags;
+       __u16 state_mask, status_mask;
+       __u16 origsrc_port_high, origdst_port_high;
+       __u16 replsrc_port_high, repldst_port_high;
+};
+
 #endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h
new file mode 100644 (file)
index 0000000..1babde0
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _XT_DEVGROUP_H
+#define _XT_DEVGROUP_H
+
+#include <linux/types.h>
+
+enum xt_devgroup_flags {
+       XT_DEVGROUP_MATCH_SRC   = 0x1,
+       XT_DEVGROUP_INVERT_SRC  = 0x2,
+       XT_DEVGROUP_MATCH_DST   = 0x4,
+       XT_DEVGROUP_INVERT_DST  = 0x8,
+};
+
+struct xt_devgroup_info {
+       __u32   flags;
+       __u32   src_group;
+       __u32   src_mask;
+       __u32   dst_group;
+       __u32   dst_mask;
+};
+
+#endif /* _XT_DEVGROUP_H */
index b0d28c659ab75c7a87aa9e4cb956c516bf5aeaf1..ca6e03e47a17ca08bae40f4cf5a66292d4fc38ca 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_QUOTA_H
 #define _XT_QUOTA_H
 
+#include <linux/types.h>
+
 enum xt_quota_flags {
        XT_QUOTA_INVERT         = 0x1,
 };
@@ -9,9 +11,9 @@ enum xt_quota_flags {
 struct xt_quota_priv;
 
 struct xt_quota_info {
-       u_int32_t               flags;
-       u_int32_t               pad;
-       aligned_u64             quota;
+       __u32 flags;
+       __u32 pad;
+       aligned_u64 quota;
 
        /* Used internally by the kernel */
        struct xt_quota_priv    *master;
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
new file mode 100644 (file)
index 0000000..081f1de
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef _XT_SET_H
+#define _XT_SET_H
+
+#include <linux/types.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+/*
+ * Option flags for kernel operations (xt_set_info_v0)
+ */
+#define IPSET_SRC              0x01    /* Source match/add */
+#define IPSET_DST              0x02    /* Destination match/add */
+#define IPSET_MATCH_INV                0x04    /* Inverse matching */
+
+struct xt_set_info_v0 {
+       ip_set_id_t index;
+       union {
+               __u32 flags[IPSET_DIM_MAX + 1];
+               struct {
+                       __u32 __flags[IPSET_DIM_MAX];
+                       __u8 dim;
+                       __u8 flags;
+               } compat;
+       } u;
+};
+
+/* match and target infos */
+struct xt_set_info_match_v0 {
+       struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+       struct xt_set_info_v0 add_set;
+       struct xt_set_info_v0 del_set;
+};
+
+/* Revision 1: current interface to netfilter/iptables */
+
+struct xt_set_info {
+       ip_set_id_t index;
+       __u8 dim;
+       __u8 flags;
+};
+
+/* match and target infos */
+struct xt_set_info_match {
+       struct xt_set_info match_set;
+};
+
+struct xt_set_info_target {
+       struct xt_set_info add_set;
+       struct xt_set_info del_set;
+};
+
+#endif /*_XT_SET_H*/
index 6f475b8ff34be81caa612bf1a947d3ad627290ab..26d7217bd4f1cf1337a7c1d5d5643801d23b4e8a 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_SOCKET_H
 #define _XT_SOCKET_H
 
+#include <linux/types.h>
+
 enum {
        XT_SOCKET_TRANSPARENT = 1 << 0,
 };
index 14b6df412c9fa1674267be11f0c3ca698b5411aa..7c37fac576c440d34c650dbc352bcceded891808 100644 (file)
@@ -1,14 +1,16 @@
 #ifndef _XT_TIME_H
 #define _XT_TIME_H 1
 
+#include <linux/types.h>
+
 struct xt_time_info {
-       u_int32_t date_start;
-       u_int32_t date_stop;
-       u_int32_t daytime_start;
-       u_int32_t daytime_stop;
-       u_int32_t monthdays_match;
-       u_int8_t weekdays_match;
-       u_int8_t flags;
+       __u32 date_start;
+       __u32 date_stop;
+       __u32 daytime_start;
+       __u32 daytime_stop;
+       __u32 monthdays_match;
+       __u8 weekdays_match;
+       __u8 flags;
 };
 
 enum {
index 9947f56cdbddc6021b15c5e0a7641a39e1ef43f3..04d1bfea03c2cbd9ba4a9c2575e44ef1e9766cd1 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_U32_H
 #define _XT_U32_H 1
 
+#include <linux/types.h>
+
 enum xt_u32_ops {
        XT_U32_AND,
        XT_U32_LEFTSH,
@@ -9,13 +11,13 @@ enum xt_u32_ops {
 };
 
 struct xt_u32_location_element {
-       u_int32_t number;
-       u_int8_t nextop;
+       __u32 number;
+       __u8 nextop;
 };
 
 struct xt_u32_value_element {
-       u_int32_t min;
-       u_int32_t max;
+       __u32 min;
+       __u32 max;
 };
 
 /*
@@ -27,14 +29,14 @@ struct xt_u32_value_element {
 struct xt_u32_test {
        struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
        struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
-       u_int8_t nnums;
-       u_int8_t nvalues;
+       __u8 nnums;
+       __u8 nvalues;
 };
 
 struct xt_u32 {
        struct xt_u32_test tests[XT_U32_MAXSIZE+1];
-       u_int8_t ntests;
-       u_int8_t invert;
+       __u8 ntests;
+       __u8 invert;
 };
 
 #endif /* _XT_U32_H */
index c73ef0b18bdc4b8296746369437b43f712370a25..be5be1577a56e28797db0817579ac22adc22e1e9 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_802_3_H
 #define __LINUX_BRIDGE_EBT_802_3_H
 
+#include <linux/types.h>
+
 #define EBT_802_3_SAP 0x01
 #define EBT_802_3_TYPE 0x02
 
 
 /* ui has one byte ctrl, ni has two */
 struct hdr_ui {
-       uint8_t dsap;
-       uint8_t ssap;
-       uint8_t ctrl;
-       uint8_t orig[3];
+       __u8 dsap;
+       __u8 ssap;
+       __u8 ctrl;
+       __u8 orig[3];
        __be16 type;
 };
 
 struct hdr_ni {
-       uint8_t dsap;
-       uint8_t ssap;
+       __u8 dsap;
+       __u8 ssap;
        __be16 ctrl;
-       uint8_t  orig[3];
+       __u8  orig[3];
        __be16 type;
 };
 
 struct ebt_802_3_hdr {
-       uint8_t  daddr[6];
-       uint8_t  saddr[6];
+       __u8  daddr[6];
+       __u8  saddr[6];
        __be16 len;
        union {
                struct hdr_ui ui;
@@ -59,10 +61,10 @@ static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
 #endif
 
 struct ebt_802_3_info {
-       uint8_t  sap;
+       __u8  sap;
        __be16 type;
-       uint8_t  bitmask;
-       uint8_t  invflags;
+       __u8  bitmask;
+       __u8  invflags;
 };
 
 #endif
index 0009558609a75219a7678fe1dddcab17526f5462..bd4e3ad0b7067da89fac177a4de92fb5fcf14533 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_AMONG_H
 #define __LINUX_BRIDGE_EBT_AMONG_H
 
+#include <linux/types.h>
+
 #define EBT_AMONG_DST 0x01
 #define EBT_AMONG_SRC 0x02
 
@@ -30,7 +32,7 @@
  */
 
 struct ebt_mac_wormhash_tuple {
-       uint32_t cmp[2];
+       __u32 cmp[2];
        __be32 ip;
 };
 
index cbf4843b6b0f771ca8392287a0c6ce665e308ec0..522f3e427f49e7656c7c239d44b7fb71fca8daeb 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_ARP_H
 #define __LINUX_BRIDGE_EBT_ARP_H
 
+#include <linux/types.h>
+
 #define EBT_ARP_OPCODE 0x01
 #define EBT_ARP_HTYPE 0x02
 #define EBT_ARP_PTYPE 0x04
@@ -27,8 +29,8 @@ struct ebt_arp_info
        unsigned char smmsk[ETH_ALEN];
        unsigned char dmaddr[ETH_ALEN];
        unsigned char dmmsk[ETH_ALEN];
-       uint8_t  bitmask;
-       uint8_t  invflags;
+       __u8  bitmask;
+       __u8  invflags;
 };
 
 #endif
index 6a708fb92241edf64f94b2b61eee5ba26d6f599e..c4bbc41b0ea47c576d4649262fc337f445fb0601 100644 (file)
@@ -15,6 +15,8 @@
 #ifndef __LINUX_BRIDGE_EBT_IP_H
 #define __LINUX_BRIDGE_EBT_IP_H
 
+#include <linux/types.h>
+
 #define EBT_IP_SOURCE 0x01
 #define EBT_IP_DEST 0x02
 #define EBT_IP_TOS 0x04
@@ -31,12 +33,12 @@ struct ebt_ip_info {
        __be32 daddr;
        __be32 smsk;
        __be32 dmsk;
-       uint8_t  tos;
-       uint8_t  protocol;
-       uint8_t  bitmask;
-       uint8_t  invflags;
-       uint16_t sport[2];
-       uint16_t dport[2];
+       __u8  tos;
+       __u8  protocol;
+       __u8  bitmask;
+       __u8  invflags;
+       __u16 sport[2];
+       __u16 dport[2];
 };
 
 #endif
index e5de987015197bc3846669fed884473405a5b4cc..42b889682721783b84d03797188bc3d9fc409fad 100644 (file)
 #ifndef __LINUX_BRIDGE_EBT_IP6_H
 #define __LINUX_BRIDGE_EBT_IP6_H
 
+#include <linux/types.h>
+
 #define EBT_IP6_SOURCE 0x01
 #define EBT_IP6_DEST 0x02
 #define EBT_IP6_TCLASS 0x04
 #define EBT_IP6_PROTO 0x08
 #define EBT_IP6_SPORT 0x10
 #define EBT_IP6_DPORT 0x20
+#define EBT_IP6_ICMP6 0x40
+
 #define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
-                     EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+                     EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
+                     EBT_IP6_ICMP6)
 #define EBT_IP6_MATCH "ip6"
 
 /* the same values are used for the invflags */
@@ -28,12 +33,18 @@ struct ebt_ip6_info {
        struct in6_addr daddr;
        struct in6_addr smsk;
        struct in6_addr dmsk;
-       uint8_t  tclass;
-       uint8_t  protocol;
-       uint8_t  bitmask;
-       uint8_t  invflags;
-       uint16_t sport[2];
-       uint16_t dport[2];
+       __u8  tclass;
+       __u8  protocol;
+       __u8  bitmask;
+       __u8  invflags;
+       union {
+               __u16 sport[2];
+               __u8 icmpv6_type[2];
+       };
+       union {
+               __u16 dport[2];
+               __u8 icmpv6_code[2];
+       };
 };
 
 #endif
index 4bf76b751676e3eab0f0ab5748509fa4bd97d9e1..66d80b30ba0e2779b4798ec8d0affc3fa0722815 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_LIMIT_H
 #define __LINUX_BRIDGE_EBT_LIMIT_H
 
+#include <linux/types.h>
+
 #define EBT_LIMIT_MATCH "limit"
 
 /* timings are in milliseconds. */
    seconds, or one every 59 hours. */
 
 struct ebt_limit_info {
-       u_int32_t avg;    /* Average secs between packets * scale */
-       u_int32_t burst;  /* Period multiplier for upper limit. */
+       __u32 avg;    /* Average secs between packets * scale */
+       __u32 burst;  /* Period multiplier for upper limit. */
 
        /* Used internally by the kernel */
        unsigned long prev;
-       u_int32_t credit;
-       u_int32_t credit_cap, cost;
+       __u32 credit;
+       __u32 credit_cap, cost;
 };
 
 #endif
index cc2cdfb764bc14a116426870d49826dbfb6fc79d..7e7f1d1fe4946808655c1582b76930dfdf39adb1 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_LOG_H
 #define __LINUX_BRIDGE_EBT_LOG_H
 
+#include <linux/types.h>
+
 #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
 #define EBT_LOG_ARP 0x02
 #define EBT_LOG_NFLOG 0x04
@@ -10,9 +12,9 @@
 #define EBT_LOG_WATCHER "log"
 
 struct ebt_log_info {
-       uint8_t loglevel;
-       uint8_t prefix[EBT_LOG_PREFIX_SIZE];
-       uint32_t bitmask;
+       __u8 loglevel;
+       __u8 prefix[EBT_LOG_PREFIX_SIZE];
+       __u32 bitmask;
 };
 
 #endif
index 9ceb10ec0ed6badda0f695a5512a2de9cef64c14..410f9e5a71d44c44ac5592aee4420e0bdafc1976 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef __LINUX_BRIDGE_EBT_MARK_M_H
 #define __LINUX_BRIDGE_EBT_MARK_M_H
 
+#include <linux/types.h>
+
 #define EBT_MARK_AND 0x01
 #define EBT_MARK_OR 0x02
 #define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
 struct ebt_mark_m_info {
        unsigned long mark, mask;
-       uint8_t invert;
-       uint8_t bitmask;
+       __u8 invert;
+       __u8 bitmask;
 };
 #define EBT_MARK_MATCH "mark_m"
 
index 052817849b83621d8054e5843b7904fd978567cb..df829fce9125d6f3120d7d3e1b5ad367f99c1dbb 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_NFLOG_H
 #define __LINUX_BRIDGE_EBT_NFLOG_H
 
+#include <linux/types.h>
+
 #define EBT_NFLOG_MASK 0x0
 
 #define EBT_NFLOG_PREFIX_SIZE 64
 #define EBT_NFLOG_DEFAULT_THRESHOLD    1
 
 struct ebt_nflog_info {
-       u_int32_t len;
-       u_int16_t group;
-       u_int16_t threshold;
-       u_int16_t flags;
-       u_int16_t pad;
+       __u32 len;
+       __u16 group;
+       __u16 threshold;
+       __u16 flags;
+       __u16 pad;
        char prefix[EBT_NFLOG_PREFIX_SIZE];
 };
 
index 51a7998409312e9cdd58fc50b89e9ea4038c8226..c241badcd036a3b403e7e15abc7c010d3a546cbd 100644 (file)
@@ -1,9 +1,11 @@
 #ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
 #define __LINUX_BRIDGE_EBT_PKTTYPE_H
 
+#include <linux/types.h>
+
 struct ebt_pkttype_info {
-       uint8_t pkt_type;
-       uint8_t invert;
+       __u8 pkt_type;
+       __u8 invert;
 };
 #define EBT_PKTTYPE_MATCH "pkttype"
 
index e503a0aa2728bec9108bb161f65ab038fa90ad60..1025b9f5fb7ddd2657de1b28866f898a0a19bab7 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_STP_H
 #define __LINUX_BRIDGE_EBT_STP_H
 
+#include <linux/types.h>
+
 #define EBT_STP_TYPE           0x0001
 
 #define EBT_STP_FLAGS          0x0002
 #define EBT_STP_MATCH "stp"
 
 struct ebt_stp_config_info {
-       uint8_t flags;
-       uint16_t root_priol, root_priou;
+       __u8 flags;
+       __u16 root_priol, root_priou;
        char root_addr[6], root_addrmsk[6];
-       uint32_t root_costl, root_costu;
-       uint16_t sender_priol, sender_priou;
+       __u32 root_costl, root_costu;
+       __u16 sender_priol, sender_priou;
        char sender_addr[6], sender_addrmsk[6];
-       uint16_t portl, portu;
-       uint16_t msg_agel, msg_ageu;
-       uint16_t max_agel, max_ageu;
-       uint16_t hello_timel, hello_timeu;
-       uint16_t forward_delayl, forward_delayu;
+       __u16 portl, portu;
+       __u16 msg_agel, msg_ageu;
+       __u16 max_agel, max_ageu;
+       __u16 hello_timel, hello_timeu;
+       __u16 forward_delayl, forward_delayu;
 };
 
 struct ebt_stp_info {
-       uint8_t type;
+       __u8 type;
        struct ebt_stp_config_info config;
-       uint16_t bitmask;
-       uint16_t invflags;
+       __u16 bitmask;
+       __u16 invflags;
 };
 
 #endif
index b677e2671541ee4cf0228a29bed7913a44b0eade..89a6becb526932d2c2f067d7155b15edefd43102 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _EBT_ULOG_H
 #define _EBT_ULOG_H
 
+#include <linux/types.h>
+
 #define EBT_ULOG_DEFAULT_NLGROUP 0
 #define EBT_ULOG_DEFAULT_QTHRESHOLD 1
 #define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
@@ -10,7 +12,7 @@
 #define EBT_ULOG_VERSION 1
 
 struct ebt_ulog_info {
-       uint32_t nlgroup;
+       __u32 nlgroup;
        unsigned int cprange;
        unsigned int qthreshold;
        char prefix[EBT_ULOG_PREFIX_LEN];
index 1d98be4031e720d6fdc4c74ab9f997047872c6b9..967d1d5cf98d81e88413f0d9dc2f6a1ecdeeff16 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_VLAN_H
 #define __LINUX_BRIDGE_EBT_VLAN_H
 
+#include <linux/types.h>
+
 #define EBT_VLAN_ID    0x01
 #define EBT_VLAN_PRIO  0x02
 #define EBT_VLAN_ENCAP 0x04
 #define EBT_VLAN_MATCH "vlan"
 
 struct ebt_vlan_info {
-       uint16_t id;            /* VLAN ID {1-4095} */
-       uint8_t prio;           /* VLAN User Priority {0-7} */
+       __u16 id;               /* VLAN ID {1-4095} */
+       __u8 prio;              /* VLAN User Priority {0-7} */
        __be16 encap;           /* VLAN Encapsulated frame code {0-65535} */
-       uint8_t bitmask;                /* Args bitmask bit 1=1 - ID arg,
+       __u8 bitmask;           /* Args bitmask bit 1=1 - ID arg,
                                   bit 2=1 User-Priority arg, bit 3=1 encap*/
-       uint8_t invflags;               /* Inverse bitmask  bit 1=1 - inversed ID arg, 
+       __u8 invflags;          /* Inverse bitmask  bit 1=1 - inversed ID arg, 
                                   bit 2=1 - inversed Pirority arg */
 };
 
index e5a3687c8a7238d90c6be77941429d0ca6071db0..c6a204c97047a671bed64f03230064ef9e174175 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IPT_CLUSTERIP_H_target
 #define _IPT_CLUSTERIP_H_target
 
+#include <linux/types.h>
+
 enum clusterip_hashmode {
     CLUSTERIP_HASHMODE_SIP = 0,
     CLUSTERIP_HASHMODE_SIP_SPT,
@@ -17,15 +19,15 @@ struct clusterip_config;
 
 struct ipt_clusterip_tgt_info {
 
-       u_int32_t flags;
+       __u32 flags;
 
        /* only relevant for new ones */
-       u_int8_t clustermac[6];
-       u_int16_t num_total_nodes;
-       u_int16_t num_local_nodes;
-       u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
-       u_int32_t hash_mode;
-       u_int32_t hash_initval;
+       __u8 clustermac[6];
+       __u16 num_total_nodes;
+       __u16 num_local_nodes;
+       __u16 local_nodes[CLUSTERIP_MAX_NODES];
+       __u32 hash_mode;
+       __u32 hash_initval;
 
        /* Used internally by the kernel */
        struct clusterip_config *config;
index 7ca45918ab8e5bf86dc349c6bbd2365e679486ad..bb88d5315a4dd7bdd793b2b165a153143fcb19b3 100644 (file)
@@ -8,6 +8,8 @@
 */
 #ifndef _IPT_ECN_TARGET_H
 #define _IPT_ECN_TARGET_H
+
+#include <linux/types.h>
 #include <linux/netfilter/xt_DSCP.h>
 
 #define IPT_ECN_IP_MASK        (~XT_DSCP_MASK)
 #define IPT_ECN_OP_MASK                0xce
 
 struct ipt_ECN_info {
-       u_int8_t operation;     /* bitset of operations */
-       u_int8_t ip_ect;        /* ECT codepoint of IPv4 header, pre-shifted */
+       __u8 operation; /* bitset of operations */
+       __u8 ip_ect;    /* ECT codepoint of IPv4 header, pre-shifted */
        union {
                struct {
-                       u_int8_t ece:1, cwr:1; /* TCP ECT bits */
+                       __u8 ece:1, cwr:1; /* TCP ECT bits */
                } tcp;
        } proto;
 };
index 2529660c5b38e338aa454bcdb3d4a9a2fbc6a513..5bca78267afddf0275a59368ec7db35d31b6c6b6 100644 (file)
@@ -1,15 +1,17 @@
 #ifndef _IPT_SAME_H
 #define _IPT_SAME_H
 
+#include <linux/types.h>
+
 #define IPT_SAME_MAX_RANGE     10
 
 #define IPT_SAME_NODST         0x01
 
 struct ipt_same_info {
        unsigned char info;
-       u_int32_t rangesize;
-       u_int32_t ipnum;
-       u_int32_t *iparray;
+       __u32 rangesize;
+       __u32 ipnum;
+       __u32 *iparray;
 
        /* hangs off end. */
        struct nf_nat_range range[IPT_SAME_MAX_RANGE];
index ee6611edc112e19dcc02bc46d80be0eae97fbc45..f6ac169d92f9bbc6751a930ef0ed64d752dd4542 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _IPT_TTL_H
 #define _IPT_TTL_H
 
+#include <linux/types.h>
+
 enum {
        IPT_TTL_SET = 0,
        IPT_TTL_INC,
@@ -13,8 +15,8 @@ enum {
 #define IPT_TTL_MAXMODE        IPT_TTL_DEC
 
 struct ipt_TTL_info {
-       u_int8_t        mode;
-       u_int8_t        ttl;
+       __u8    mode;
+       __u8    ttl;
 };
 
 
index 446de6aef9838f07775ae5d28008b1d6ca379ca7..0da42237c8da971d2c94b0c0fc39b4a415e6f8be 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IPT_ADDRTYPE_H
 #define _IPT_ADDRTYPE_H
 
+#include <linux/types.h>
+
 enum {
        IPT_ADDRTYPE_INVERT_SOURCE      = 0x0001,
        IPT_ADDRTYPE_INVERT_DEST        = 0x0002,
@@ -9,17 +11,17 @@ enum {
 };
 
 struct ipt_addrtype_info_v1 {
-       u_int16_t       source;         /* source-type mask */
-       u_int16_t       dest;           /* dest-type mask */
-       u_int32_t       flags;
+       __u16   source;         /* source-type mask */
+       __u16   dest;           /* dest-type mask */
+       __u32   flags;
 };
 
 /* revision 0 */
 struct ipt_addrtype_info {
-       u_int16_t       source;         /* source-type mask */
-       u_int16_t       dest;           /* dest-type mask */
-       u_int32_t       invert_source;
-       u_int32_t       invert_dest;
+       __u16   source;         /* source-type mask */
+       __u16   dest;           /* dest-type mask */
+       __u32   invert_source;
+       __u32   invert_dest;
 };
 
 #endif
index 2e555b4d05e324f99d793ec627d3cd3aa3ad0be7..4e02bb0119e3c131575118b6f0cf0d868f7de722 100644 (file)
@@ -1,9 +1,11 @@
 #ifndef _IPT_AH_H
 #define _IPT_AH_H
 
+#include <linux/types.h>
+
 struct ipt_ah {
-       u_int32_t spis[2];                      /* Security Parameter Index */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 spis[2];                  /* Security Parameter Index */
+       __u8  invflags;                 /* Inverse flags */
 };
 
 
index 9945baa4ccd7c75eb51190c4e404d9e529d51483..eabf95fb7d3e030c17a2f078ce8621e67d54b38a 100644 (file)
@@ -8,6 +8,8 @@
 */
 #ifndef _IPT_ECN_H
 #define _IPT_ECN_H
+
+#include <linux/types.h>
 #include <linux/netfilter/xt_dscp.h>
 
 #define IPT_ECN_IP_MASK        (~XT_DSCP_MASK)
 
 /* match info */
 struct ipt_ecn_info {
-       u_int8_t operation;
-       u_int8_t invert;
-       u_int8_t ip_ect;
+       __u8 operation;
+       __u8 invert;
+       __u8 ip_ect;
        union {
                struct {
-                       u_int8_t ect;
+                       __u8 ect;
                } tcp;
        } proto;
 };
index ee24fd86a3aaee2c4310921bc860970dd75b3e91..37bee44424860f19e801f1d431b93c9facf3e1cf 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _IPT_TTL_H
 #define _IPT_TTL_H
 
+#include <linux/types.h>
+
 enum {
        IPT_TTL_EQ = 0,         /* equals */
        IPT_TTL_NE,             /* not equals */
@@ -13,8 +15,8 @@ enum {
 
 
 struct ipt_ttl_info {
-       u_int8_t        mode;
-       u_int8_t        ttl;
+       __u8    mode;
+       __u8    ttl;
 };
 
 
index afb7813d45ab29b37903ded57d221d39ac42b366..ebd8ead1bb63e52a81fc73eb847fbb9cf5861273 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _IP6T_HL_H
 #define _IP6T_HL_H
 
+#include <linux/types.h>
+
 enum {
        IP6T_HL_SET = 0,
        IP6T_HL_INC,
@@ -14,8 +16,8 @@ enum {
 #define IP6T_HL_MAXMODE        IP6T_HL_DEC
 
 struct ip6t_HL_info {
-       u_int8_t        mode;
-       u_int8_t        hop_limit;
+       __u8    mode;
+       __u8    hop_limit;
 };
 
 
index 6be6504162bbb05dfcbf641e6073731eb2fae781..205ed62e4605dbf3d1ce080ec721b919829af50f 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _IP6T_REJECT_H
 #define _IP6T_REJECT_H
 
+#include <linux/types.h>
+
 enum ip6t_reject_with {
        IP6T_ICMP6_NO_ROUTE,
        IP6T_ICMP6_ADM_PROHIBITED,
@@ -12,7 +14,7 @@ enum ip6t_reject_with {
 };
 
 struct ip6t_reject_info {
-       u_int32_t       with;   /* reject type */
+       __u32   with;   /* reject type */
 };
 
 #endif /*_IP6T_REJECT_H*/
index 17a745cfb2c778cfff11b2601c2ce0bacf184f24..5da2b65cb3ade90c6ab2c515b86277812ec20a66 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _IP6T_AH_H
 #define _IP6T_AH_H
 
+#include <linux/types.h>
+
 struct ip6t_ah {
-       u_int32_t spis[2];                      /* Security Parameter Index */
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t  hdrres;                       /* Test of the Reserved Filed */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 spis[2];                  /* Security Parameter Index */
+       __u32 hdrlen;                   /* Header Length */
+       __u8  hdrres;                   /* Test of the Reserved Filed */
+       __u8  invflags;                 /* Inverse flags */
 };
 
 #define IP6T_AH_SPI 0x01
index 3724d08509200bab13421b3ed61c61ccbef48825..b47f61b9e082f1281023076577fa2e182aa2b616 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _IP6T_FRAG_H
 #define _IP6T_FRAG_H
 
+#include <linux/types.h>
+
 struct ip6t_frag {
-       u_int32_t ids[2];                       /* Security Parameter Index */
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t  flags;                        /*  */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 ids[2];                   /* Security Parameter Index */
+       __u32 hdrlen;                   /* Header Length */
+       __u8  flags;                    /*  */
+       __u8  invflags;                 /* Inverse flags */
 };
 
 #define IP6T_FRAG_IDS          0x01
index 5ef91b8319a8afd7bb320d15d8d90f3e6e7353a9..6e76dbc6c19aa0c7f0d15205535501f0035acdcb 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _IP6T_HL_H
 #define _IP6T_HL_H
 
+#include <linux/types.h>
+
 enum {
        IP6T_HL_EQ = 0,         /* equals */
        IP6T_HL_NE,             /* not equals */
@@ -14,8 +16,8 @@ enum {
 
 
 struct ip6t_hl_info {
-       u_int8_t        mode;
-       u_int8_t        hop_limit;
+       __u8    mode;
+       __u8    hop_limit;
 };
 
 
index 01dfd445596a3bf01810b0f61568653acda76ac2..efae3a20c2141f9212dcfb37539efdb0f090e599 100644 (file)
@@ -8,10 +8,12 @@ on whether they contain certain headers */
 #ifndef __IPV6HEADER_H
 #define __IPV6HEADER_H
 
+#include <linux/types.h>
+
 struct ip6t_ipv6header_info {
-       u_int8_t matchflags;
-       u_int8_t invflags;
-       u_int8_t modeflag;
+       __u8 matchflags;
+       __u8 invflags;
+       __u8 modeflag;
 };
 
 #define MASK_HOPOPTS    128
index 18549bca2d1f5609785ee33a42972ad288af66e9..a7729a5025cd08a7fee0da40169e5d1d451b1f34 100644 (file)
@@ -1,10 +1,12 @@
 #ifndef _IP6T_MH_H
 #define _IP6T_MH_H
 
+#include <linux/types.h>
+
 /* MH matching stuff */
 struct ip6t_mh {
-       u_int8_t types[2];      /* MH type range */
-       u_int8_t invflags;      /* Inverse flags */
+       __u8 types[2];  /* MH type range */
+       __u8 invflags;  /* Inverse flags */
 };
 
 /* Values for "invflags" field in struct ip6t_mh. */
index 62d89bcd9f9cff3be87ad029cf3e5ba2495a79a1..17d419a811fdfef99678bf12d9bc060fccb7192b 100644 (file)
@@ -1,14 +1,16 @@
 #ifndef _IP6T_OPTS_H
 #define _IP6T_OPTS_H
 
+#include <linux/types.h>
+
 #define IP6T_OPTS_OPTSNR 16
 
 struct ip6t_opts {
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t flags;                         /*  */
-       u_int8_t invflags;                      /* Inverse flags */
-       u_int16_t opts[IP6T_OPTS_OPTSNR];       /* opts */
-       u_int8_t optsnr;                        /* Nr of OPts */
+       __u32 hdrlen;                   /* Header Length */
+       __u8 flags;                             /*  */
+       __u8 invflags;                  /* Inverse flags */
+       __u16 opts[IP6T_OPTS_OPTSNR];   /* opts */
+       __u8 optsnr;                    /* Nr of OPts */
 };
 
 #define IP6T_OPTS_LEN          0x01
index ab91bfd2cd00f7dcc931f53f395feaaa2a793660..7605a5ff81cd6560c10b60daf91ab889e4843b9d 100644 (file)
@@ -1,18 +1,19 @@
 #ifndef _IP6T_RT_H
 #define _IP6T_RT_H
 
+#include <linux/types.h>
 /*#include <linux/in6.h>*/
 
 #define IP6T_RT_HOPS 16
 
 struct ip6t_rt {
-       u_int32_t rt_type;                      /* Routing Type */
-       u_int32_t segsleft[2];                  /* Segments Left */
-       u_int32_t hdrlen;                       /* Header Length */
-       u_int8_t  flags;                        /*  */
-       u_int8_t  invflags;                     /* Inverse flags */
+       __u32 rt_type;                  /* Routing Type */
+       __u32 segsleft[2];                      /* Segments Left */
+       __u32 hdrlen;                   /* Header Length */
+       __u8  flags;                    /*  */
+       __u8  invflags;                 /* Inverse flags */
        struct in6_addr addrs[IP6T_RT_HOPS];    /* Hops */
-       u_int8_t addrnr;                        /* Nr of Addresses */
+       __u8 addrnr;                    /* Nr of Addresses */
 };
 
 #define IP6T_RT_TYP            0x01
index e2b9e63afa68b53f45fdc60c1a3f76cf0f338705..4c4ac3f3ce5a9968b0b5f2c7534ebc04faa74248 100644 (file)
@@ -160,10 +160,6 @@ struct netlink_skb_parms {
        struct ucred            creds;          /* Skb credentials      */
        __u32                   pid;
        __u32                   dst_group;
-       kernel_cap_t            eff_cap;
-       __u32                   loginuid;       /* Login (audit) uid */
-       __u32                   sessionid;      /* Session id (audit) */
-       __u32                   sid;            /* SELinux security id */
 };
 
 #define NETLINK_CB(skb)                (*(struct netlink_skb_parms*)&((skb)->cb))
index 821ffb954f14738abf42439aedf5538cdffcd4ad..30022189104dad709ebfdca4b661b6b97dc4fd27 100644 (file)
@@ -1243,6 +1243,8 @@ enum nl80211_rate_info {
  * @NL80211_STA_INFO_LLID: the station's mesh LLID
  * @NL80211_STA_INFO_PLID: the station's mesh PLID
  * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested
+ *     attribute, like NL80211_STA_INFO_TX_BITRATE.
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -1261,6 +1263,7 @@ enum nl80211_sta_info {
        NL80211_STA_INFO_TX_RETRIES,
        NL80211_STA_INFO_TX_FAILED,
        NL80211_STA_INFO_SIGNAL_AVG,
+       NL80211_STA_INFO_RX_BITRATE,
 
        /* keep last */
        __NL80211_STA_INFO_AFTER_LAST,
index 559d028970752672cc92260eb7b64c61276f5e89..ff5bccb8713661bbf6247a5c06be78e80570df1d 100644 (file)
@@ -1479,6 +1479,7 @@ void pci_request_acs(void);
 #define PCI_VPD_RO_KEYWORD_PARTNO      "PN"
 #define PCI_VPD_RO_KEYWORD_MFR_ID      "MN"
 #define PCI_VPD_RO_KEYWORD_VENDOR0     "V0"
+#define PCI_VPD_RO_KEYWORD_CHKSUM      "RV"
 
 /**
  * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
index 26c8df7869180d4b94e4daf8b6c3b8988f7070ba..6fb13841db45e7bddcce8618f08ea644ef6c6edb 100644 (file)
@@ -36,9 +36,7 @@
 /* Socket options for SOL_PNPIPE level */
 #define PNPIPE_ENCAP           1
 #define PNPIPE_IFINDEX         2
-#define PNPIPE_PIPE_HANDLE     3
-#define PNPIPE_ENABLE           4
-/* unused slot */
+#define PNPIPE_HANDLE          3
 
 #define PNADDR_ANY             0
 #define PNADDR_BROADCAST       0xFC
index 2cfa4bc8dea6669f46999c86814a693e5185db7c..b1032a3fafdc2e9e49b2ce3be52262ba9ca16c3e 100644 (file)
@@ -247,6 +247,35 @@ struct tc_gred_sopt {
        __u16           pad1;
 };
 
+/* CHOKe section */
+
+enum {
+       TCA_CHOKE_UNSPEC,
+       TCA_CHOKE_PARMS,
+       TCA_CHOKE_STAB,
+       __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+       __u32           limit;          /* Hard queue length (packets)  */
+       __u32           qth_min;        /* Min average threshold (packets) */
+       __u32           qth_max;        /* Max average threshold (packets) */
+       unsigned char   Wlog;           /* log(W)               */
+       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
+       unsigned char   Scell_log;      /* cell size for idle damping */
+       unsigned char   flags;          /* see RED flags */
+};
+
+struct tc_choke_xstats {
+       __u32           early;          /* Early drops */
+       __u32           pdrop;          /* Drops due to queue limits */
+       __u32           other;          /* Drops due to drop() calls */
+       __u32           marked;         /* Marked packets */
+       __u32           matched;        /* Drops due to flow match */
+};
+
 /* HTB section */
 #define TC_HTB_NUMPRIO         8
 #define TC_HTB_MAXDEPTH                8
@@ -435,6 +464,7 @@ enum {
        TCA_NETEM_DELAY_DIST,
        TCA_NETEM_REORDER,
        TCA_NETEM_CORRUPT,
+       TCA_NETEM_LOSS,
        __TCA_NETEM_MAX,
 };
 
@@ -465,7 +495,33 @@ struct tc_netem_corrupt {
        __u32   correlation;
 };
 
+enum {
+       NETEM_LOSS_UNSPEC,
+       NETEM_LOSS_GI,          /* General Intuitive - 4 state model */
+       NETEM_LOSS_GE,          /* Gilbert Elliot models */
+       __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probablities for 4 state model */
+struct tc_netem_gimodel {
+       __u32   p13;
+       __u32   p31;
+       __u32   p32;
+       __u32   p14;
+       __u32   p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+       __u32 p;
+       __u32 r;
+       __u32 h;
+       __u32 k1;
+};
+
 #define NETEM_DIST_SCALE       8192
+#define NETEM_DIST_MAX         16384
 
 /* DRR */
 
@@ -481,4 +537,55 @@ struct tc_drr_stats {
        __u32   deficit;
 };
 
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+struct tc_mqprio_qopt {
+       __u8    num_tc;
+       __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
+       __u8    hw;
+       __u16   count[TC_QOPT_MAX_QUEUE];
+       __u16   offset[TC_QOPT_MAX_QUEUE];
+};
+
+/* SFB */
+
+enum {
+       TCA_SFB_UNSPEC,
+       TCA_SFB_PARMS,
+       __TCA_SFB_MAX,
+};
+
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
+
+/*
+ * Note: increment, decrement are Q0.16 fixed-point values.
+ */
+struct tc_sfb_qopt {
+       __u32 rehash_interval;  /* delay between hash move, in ms */
+       __u32 warmup_time;      /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+       __u32 max;              /* max len of qlen_min */
+       __u32 bin_size;         /* maximum queue length per bin */
+       __u32 increment;        /* probability increment, (d1 in Blue) */
+       __u32 decrement;        /* probability decrement, (d2 in Blue) */
+       __u32 limit;            /* max SFB queue length */
+       __u32 penalty_rate;     /* inelastic flows are rate limited to 'rate' pps */
+       __u32 penalty_burst;
+};
+
+struct tc_sfb_xstats {
+       __u32 earlydrop;
+       __u32 penaltydrop;
+       __u32 bucketdrop;
+       __u32 queuedrop;
+       __u32 childdrop; /* drops in child qdisc */
+       __u32 marked;
+       __u32 maxqlen;
+       __u32 maxprob;
+       __u32 avgprob;
+};
+
+#define SFB_MAX_PROB 0xFFFF
+
 #endif
index b2b7f9749f5eb2da633264afd7453fe0a6263561..9b5f184a7f6578b876e0a274ada09b5c1d956fbd 100644 (file)
@@ -1623,7 +1623,7 @@ struct security_operations {
        int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
        int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
                                          struct xfrm_policy *xp,
-                                         struct flowi *fl);
+                                         const struct flowi *fl);
        int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
 #endif /* CONFIG_SECURITY_NETWORK_XFRM */
 
@@ -2761,7 +2761,8 @@ int security_xfrm_state_delete(struct xfrm_state *x);
 void security_xfrm_state_free(struct xfrm_state *x);
 int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
 int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                                      struct xfrm_policy *xp, struct flowi *fl);
+                                      struct xfrm_policy *xp,
+                                      const struct flowi *fl);
 int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
 
@@ -2813,7 +2814,7 @@ static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_s
 }
 
 static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                       struct xfrm_policy *xp, struct flowi *fl)
+                       struct xfrm_policy *xp, const struct flowi *fl)
 {
        return 1;
 }
index bf221d65d9ad5d0c2878795021b2733012c4ef2f..31f02d0b46a7e5d508ab7fbd7670e7026e10224d 100644 (file)
@@ -1801,6 +1801,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
                     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));   \
                     skb = skb->prev)
 
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)                           \
+               for (skb = (queue)->prev, tmp = skb->prev;                      \
+                    skb != (struct sk_buff *)(queue);                          \
+                    skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)                      \
+               for (tmp = skb->prev;                                           \
+                    skb != (struct sk_buff *)(queue);                          \
+                    skb = tmp, tmp = skb->prev)
 
 static inline bool skb_has_frag_list(const struct sk_buff *skb)
 {
@@ -1868,7 +1877,7 @@ extern void              skb_split(struct sk_buff *skb,
 extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
                                 int shiftlen);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
index 241f179347d94c7fbbbfb558096f09752bf9a62c..7997a506ad4105fb145a9ddc120286a8431a1502 100644 (file)
@@ -22,7 +22,7 @@
 
 /* Linux-specific socket ioctls */
 #define SIOCINQ                FIONREAD
-#define SIOCOUTQ       TIOCOUTQ
+#define SIOCOUTQ       TIOCOUTQ        /* output queue size (not sent + not acked) */
 
 /* Routing table calls. */
 #define SIOCADDRT      0x890B          /* add routing table entry      */
@@ -83,6 +83,8 @@
 
 #define SIOCWANDEV     0x894A          /* get/set netdev parameters    */
 
+#define SIOCOUTQNSD    0x894B          /* output queue size (not sent only) */
+
 /* ARP cache control calls. */
                    /*  0x8950 - 0x8952  * obsolete calls, don't re-use */
 #define SIOCDARP       0x8953          /* delete ARP table entry       */
index 489f7b6d61c5eea7f92d53780609898f334eb22e..402955ae48ceac9c118833bed28ca093407e4638 100644 (file)
@@ -85,6 +85,8 @@
 #define  SSB_IMSTATE_AP_RSV    0x00000030 /* Reserved */
 #define  SSB_IMSTATE_IBE       0x00020000 /* In Band Error */
 #define  SSB_IMSTATE_TO                0x00040000 /* Timeout */
+#define  SSB_IMSTATE_BUSY      0x01800000 /* Busy (Backplane rev >= 2.3 only) */
+#define  SSB_IMSTATE_REJECT    0x02000000 /* Reject (Backplane rev >= 2.3 only) */
 #define SSB_INTVEC             0x0F94     /* SB Interrupt Mask */
 #define  SSB_INTVEC_PCI                0x00000001 /* Enable interrupts for PCI */
 #define  SSB_INTVEC_ENET0      0x00000002 /* Enable interrupts for enet 0 */
@@ -97,7 +99,6 @@
 #define  SSB_TMSLOW_RESET      0x00000001 /* Reset */
 #define  SSB_TMSLOW_REJECT_22  0x00000002 /* Reject (Backplane rev 2.2) */
 #define  SSB_TMSLOW_REJECT_23  0x00000004 /* Reject (Backplane rev 2.3) */
-#define  SSB_TMSLOW_PHYCLK     0x00000010 /* MAC PHY Clock Control Enable */
 #define  SSB_TMSLOW_CLOCK      0x00010000 /* Clock Enable */
 #define  SSB_TMSLOW_FGC                0x00020000 /* Force Gated Clocks On */
 #define  SSB_TMSLOW_PE         0x40000000 /* Power Management Enable */
 /* SPROM Revision 4 */
 #define SSB_SPROM4_BFLLO               0x0044  /* Boardflags (low 16 bits) */
 #define SSB_SPROM4_BFLHI               0x0046  /* Board Flags Hi */
+#define SSB_SPROM4_BFL2LO              0x0048  /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM4_BFL2HI              0x004A  /* Board flags 2 Hi */
 #define SSB_SPROM4_IL0MAC              0x004C  /* 6 byte MAC address for a/b/g/n */
 #define SSB_SPROM4_CCODE               0x0052  /* Country Code (2 bytes) */
 #define SSB_SPROM4_GPIOA               0x0056  /* Gen. Purpose IO # 0 and 1 */
 #define SSB_SPROM5_CCODE               0x0044  /* Country Code (2 bytes) */
 #define SSB_SPROM5_BFLLO               0x004A  /* Boardflags (low 16 bits) */
 #define SSB_SPROM5_BFLHI               0x004C  /* Board Flags Hi */
+#define SSB_SPROM5_BFL2LO              0x004E  /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM5_BFL2HI              0x0050  /* Board flags 2 Hi */
 #define SSB_SPROM5_IL0MAC              0x0052  /* 6 byte MAC address for a/b/g/n */
 #define SSB_SPROM5_GPIOA               0x0076  /* Gen. Purpose IO # 0 and 1 */
 #define  SSB_SPROM5_GPIOA_P0           0x00FF  /* Pin 0 */
index 1eefa3f6d1f446a06b4362492e2cea081950cf40..a5b994a204d26f9aff34f3e8058b068eeec3a78f 100644 (file)
@@ -2,7 +2,7 @@
  * include/linux/tipc.h: Include file for TIPC socket interface
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -130,12 +130,6 @@ static inline unsigned int tipc_node(__u32 addr)
 #define TIPC_SUB_PORTS         0x01    /* filter for port availability */
 #define TIPC_SUB_SERVICE       0x02    /* filter for service availability */
 #define TIPC_SUB_CANCEL                0x04    /* cancel a subscription */
-#if 0
-/* The following filter options are not currently implemented */
-#define TIPC_SUB_NO_BIND_EVTS  0x04    /* filter out "publish" events */
-#define TIPC_SUB_NO_UNBIND_EVTS        0x08    /* filter out "withdraw" events */
-#define TIPC_SUB_SINGLE_EVT    0x10    /* expire after first event */
-#endif
 
 #define TIPC_WAIT_FOREVER      (~0)    /* timeout for permanent subscription */
 
index 7d42460a5e3c58ff89bedcbdcfdaed1be8e963fd..011556fcef04af326d4f81b3a85859e4b97a5736 100644 (file)
@@ -2,7 +2,7 @@
  * include/linux/tipc_config.h: Include file for TIPC configuration interface
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define  TIPC_CMD_SHOW_LINK_STATS   0x000B    /* tx link_name, rx ultra_string */
 #define  TIPC_CMD_SHOW_STATS        0x000F    /* tx unsigned, rx ultra_string */
 
-#if 0
-#define  TIPC_CMD_SHOW_PORT_STATS   0x0008    /* tx port_ref, rx ultra_string */
-#define  TIPC_CMD_RESET_PORT_STATS  0x0009    /* tx port_ref, rx none */
-#define  TIPC_CMD_GET_ROUTES        0x000A    /* tx ?, rx ? */
-#define  TIPC_CMD_GET_LINK_PEER     0x000D    /* tx link_name, rx ? */
-#endif
-
 /*
  * Protected commands:
  * May only be issued by "network administration capable" process.
 #define  TIPC_CMD_DUMP_LOG          0x410B    /* tx none, rx ultra_string */
 #define  TIPC_CMD_RESET_LINK_STATS  0x410C    /* tx link_name, rx none */
 
-#if 0
-#define  TIPC_CMD_CREATE_LINK       0x4103    /* tx link_create, rx none */
-#define  TIPC_CMD_REMOVE_LINK       0x4104    /* tx link_name, rx none */
-#define  TIPC_CMD_BLOCK_LINK        0x4105    /* tx link_name, rx none */
-#define  TIPC_CMD_UNBLOCK_LINK      0x4106    /* tx link_name, rx none */
-#endif
-
 /*
  * Private commands:
  * May only be issued by "network administration capable" process.
  */
 
 #define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
-#if 0
-#define  TIPC_CMD_SET_ZONE_MASTER   0x8002    /* tx none, rx none */
-#endif
 #define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
 #define TIPC_DEF_LINK_TOL 1500
 #define TIPC_MAX_LINK_TOL 30000
 
+#if (TIPC_MIN_LINK_TOL < 16)
+#error "TIPC_MIN_LINK_TOL is too small (abort limit may be NaN)"
+#endif
+
 /*
  * Link window limits (min, default, max), in packets
  */
@@ -247,15 +234,6 @@ struct tipc_name_table_query {
 #define TIPC_CFG_NOT_SUPPORTED  "\x84" /* request is not supported by TIPC */
 #define TIPC_CFG_INVALID_VALUE  "\x85"  /* request has invalid argument value */
 
-#if 0
-/* prototypes TLV structures for proposed commands */
-struct tipc_link_create {
-       __u32   domain;
-       struct tipc_media_addr peer_addr;
-       char bearer_name[TIPC_MAX_BEARER_NAME];
-};
-#endif
-
 /*
  * A TLV consists of a descriptor, followed by the TLV value.
  * TLV descriptor fields are stored in network byte order;
index 930fdd2de79c1182d19c95093d311f835b95285d..b93d6f5980851e5c7fe4ea1cfe11235ebada4fd3 100644 (file)
@@ -350,6 +350,7 @@ struct xfrm_usersa_info {
 #define XFRM_STATE_WILDRECV    8
 #define XFRM_STATE_ICMP                16
 #define XFRM_STATE_AF_UNSPEC   32
+#define XFRM_STATE_ALIGN4      64
 };
 
 struct xfrm_usersa_id {
index 0c5e72503b775282bda667771c54ca8c3a448e6c..43750439c52181fffdaecb795c1cdbe1b9bd05be 100644 (file)
@@ -64,6 +64,11 @@ struct bt_security {
 
 #define BT_DEFER_SETUP 7
 
+#define BT_FLUSHABLE   8
+
+#define BT_FLUSHABLE_OFF       0
+#define BT_FLUSHABLE_ON                1
+
 #define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
 #define BT_ERR(fmt, arg...)  printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
 #define BT_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n" , __func__ , ## arg)
@@ -200,4 +205,32 @@ extern void bt_sysfs_cleanup(void);
 
 extern struct dentry *bt_debugfs;
 
+#ifdef CONFIG_BT_L2CAP
+int l2cap_init(void);
+void l2cap_exit(void);
+#else
+static inline int l2cap_init(void)
+{
+       return 0;
+}
+
+static inline void l2cap_exit(void)
+{
+}
+#endif
+
+#ifdef CONFIG_BT_SCO
+int sco_init(void);
+void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+       return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
 #endif /* __BLUETOOTH_H */
index 29a7a8ca0438996f51d153033bde8f60ea2ca3d6..a5f8c4684a3279b70d75449a92b5110393e5a2b3 100644 (file)
@@ -76,6 +76,14 @@ enum {
        HCI_INQUIRY,
 
        HCI_RAW,
+
+       HCI_SETUP,
+       HCI_AUTO_OFF,
+       HCI_MGMT,
+       HCI_PAIRABLE,
+       HCI_SERVICE_CACHE,
+       HCI_LINK_KEYS,
+       HCI_DEBUG_KEYS,
 };
 
 /* HCI ioctl defines */
@@ -111,6 +119,7 @@ enum {
 #define HCI_PAIRING_TIMEOUT    (60000) /* 60 seconds */
 #define HCI_IDLE_TIMEOUT       (6000)  /* 6 seconds */
 #define HCI_INIT_TIMEOUT       (10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT                (1000)  /* 1 seconds */
 
 /* HCI data types */
 #define HCI_COMMAND_PKT                0x01
@@ -150,6 +159,7 @@ enum {
 #define EDR_ESCO_MASK  (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
 
 /* ACL flags */
+#define ACL_START_NO_FLUSH     0x00
 #define ACL_CONT               0x01
 #define ACL_START              0x02
 #define ACL_ACTIVE_BCAST       0x04
@@ -159,6 +169,8 @@ enum {
 #define SCO_LINK       0x00
 #define ACL_LINK       0x01
 #define ESCO_LINK      0x02
+/* Low Energy links do not have defined link type. Use invented one */
+#define LE_LINK                0x80
 
 /* LMP features */
 #define LMP_3SLOT      0x01
@@ -183,17 +195,25 @@ enum {
 #define LMP_PSCHEME    0x02
 #define LMP_PCONTROL   0x04
 
+#define LMP_RSSI_INQ   0x40
 #define LMP_ESCO       0x80
 
 #define LMP_EV4                0x01
 #define LMP_EV5                0x02
+#define LMP_LE         0x40
 
 #define LMP_SNIFF_SUBR 0x02
+#define LMP_PAUSE_ENC  0x04
 #define LMP_EDR_ESCO_2M        0x20
 #define LMP_EDR_ESCO_3M        0x40
 #define LMP_EDR_3S_ESCO        0x80
 
+#define LMP_EXT_INQ    0x01
 #define LMP_SIMPLE_PAIR        0x08
+#define LMP_NO_FLUSH   0x40
+
+#define LMP_LSTO       0x01
+#define LMP_INQ_TX_PWR 0x02
 
 /* Connection modes */
 #define HCI_CM_ACTIVE  0x0000
@@ -225,6 +245,8 @@ enum {
 #define HCI_AT_GENERAL_BONDING_MITM    0x05
 
 /* -----  HCI Commands ---- */
+#define HCI_OP_NOP                     0x0000
+
 #define HCI_OP_INQUIRY                 0x0401
 struct hci_cp_inquiry {
        __u8     lap[3];
@@ -292,11 +314,19 @@ struct hci_cp_pin_code_reply {
        __u8     pin_len;
        __u8     pin_code[16];
 } __packed;
+struct hci_rp_pin_code_reply {
+       __u8     status;
+       bdaddr_t bdaddr;
+} __packed;
 
 #define HCI_OP_PIN_CODE_NEG_REPLY      0x040e
 struct hci_cp_pin_code_neg_reply {
        bdaddr_t bdaddr;
 } __packed;
+struct hci_rp_pin_code_neg_reply {
+       __u8     status;
+       bdaddr_t bdaddr;
+} __packed;
 
 #define HCI_OP_CHANGE_CONN_PTYPE       0x040f
 struct hci_cp_change_conn_ptype {
@@ -377,6 +407,20 @@ struct hci_cp_reject_sync_conn_req {
        __u8     reason;
 } __packed;
 
+#define HCI_OP_IO_CAPABILITY_REPLY     0x042b
+struct hci_cp_io_capability_reply {
+       bdaddr_t bdaddr;
+       __u8     capability;
+       __u8     oob_data;
+       __u8     authentication;
+} __packed;
+
+#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
+struct hci_cp_io_capability_neg_reply {
+       bdaddr_t bdaddr;
+       __u8     reason;
+} __packed;
+
 #define HCI_OP_SNIFF_MODE              0x0803
 struct hci_cp_sniff_mode {
        __le16   handle;
@@ -474,6 +518,12 @@ struct hci_cp_set_event_flt {
 #define HCI_CONN_SETUP_AUTO_OFF        0x01
 #define HCI_CONN_SETUP_AUTO_ON 0x02
 
+#define HCI_OP_DELETE_STORED_LINK_KEY  0x0c12
+struct hci_cp_delete_stored_link_key {
+       bdaddr_t bdaddr;
+       __u8     delete_all;
+} __packed;
+
 #define HCI_OP_WRITE_LOCAL_NAME                0x0c13
 struct hci_cp_write_local_name {
        __u8     name[248];
@@ -537,6 +587,8 @@ struct hci_cp_host_buffer_size {
        __le16   sco_max_pkt;
 } __packed;
 
+#define HCI_OP_WRITE_INQUIRY_MODE      0x0c45
+
 #define HCI_OP_READ_SSP_MODE           0x0c55
 struct hci_rp_read_ssp_mode {
        __u8     status;
@@ -548,6 +600,8 @@ struct hci_cp_write_ssp_mode {
        __u8     mode;
 } __packed;
 
+#define HCI_OP_READ_INQ_RSP_TX_POWER   0x0c58
+
 #define HCI_OP_READ_LOCAL_VERSION      0x1001
 struct hci_rp_read_local_version {
        __u8     status;
@@ -593,6 +647,47 @@ struct hci_rp_read_bd_addr {
        bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_OP_LE_SET_EVENT_MASK       0x2001
+struct hci_cp_le_set_event_mask {
+       __u8     mask[8];
+} __packed;
+
+#define HCI_OP_LE_READ_BUFFER_SIZE     0x2002
+struct hci_rp_le_read_buffer_size {
+       __u8     status;
+       __le16   le_mtu;
+       __u8     le_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN          0x200d
+struct hci_cp_le_create_conn {
+       __le16   scan_interval;
+       __le16   scan_window;
+       __u8     filter_policy;
+       __u8     peer_addr_type;
+       bdaddr_t peer_addr;
+       __u8     own_address_type;
+       __le16   conn_interval_min;
+       __le16   conn_interval_max;
+       __le16   conn_latency;
+       __le16   supervision_timeout;
+       __le16   min_ce_len;
+       __le16   max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CREATE_CONN_CANCEL   0x200e
+
+#define HCI_OP_LE_CONN_UPDATE          0x2013
+struct hci_cp_le_conn_update {
+       __le16   handle;
+       __le16   conn_interval_min;
+       __le16   conn_interval_max;
+       __le16   conn_latency;
+       __le16   supervision_timeout;
+       __le16   min_ce_len;
+       __le16   max_ce_len;
+} __packed;
+
 /* ---- HCI Events ---- */
 #define HCI_EV_INQUIRY_COMPLETE                0x01
 
@@ -833,6 +928,14 @@ struct hci_ev_io_capa_request {
        bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_EV_IO_CAPA_REPLY           0x32
+struct hci_ev_io_capa_reply {
+       bdaddr_t bdaddr;
+       __u8     capability;
+       __u8     oob_data;
+       __u8     authentication;
+} __packed;
+
 #define HCI_EV_SIMPLE_PAIR_COMPLETE    0x36
 struct hci_ev_simple_pair_complete {
        __u8     status;
@@ -845,6 +948,25 @@ struct hci_ev_remote_host_features {
        __u8     features[8];
 } __packed;
 
+#define HCI_EV_LE_META                 0x3e
+struct hci_ev_le_meta {
+       __u8     subevent;
+} __packed;
+
+/* Low energy meta events */
+#define HCI_EV_LE_CONN_COMPLETE                0x01
+struct hci_ev_le_conn_complete {
+       __u8     status;
+       __le16   handle;
+       __u8     role;
+       __u8     bdaddr_type;
+       bdaddr_t bdaddr;
+       __le16   interval;
+       __le16   latency;
+       __le16   supervision_timeout;
+       __u8     clk_accurancy;
+} __packed;
+
 /* Internal events generated by Bluetooth stack */
 #define HCI_EV_STACK_INTERNAL  0xfd
 struct hci_ev_stack_internal {
index d2cf88407690e432f80938985d7cbcf4fb16d0ed..d5d8454236bfbb21c88a55beffea59bfe292bd58 100644 (file)
@@ -60,12 +60,28 @@ struct hci_conn_hash {
        spinlock_t       lock;
        unsigned int     acl_num;
        unsigned int     sco_num;
+       unsigned int     le_num;
 };
 
 struct bdaddr_list {
        struct list_head list;
        bdaddr_t bdaddr;
 };
+
+struct bt_uuid {
+       struct list_head list;
+       u8 uuid[16];
+       u8 svc_hint;
+};
+
+struct link_key {
+       struct list_head list;
+       bdaddr_t bdaddr;
+       u8 type;
+       u8 val[16];
+       u8 pin_len;
+};
+
 #define NUM_REASSEMBLY 4
 struct hci_dev {
        struct list_head list;
@@ -80,13 +96,18 @@ struct hci_dev {
        bdaddr_t        bdaddr;
        __u8            dev_name[248];
        __u8            dev_class[3];
+       __u8            major_class;
+       __u8            minor_class;
        __u8            features[8];
        __u8            commands[64];
        __u8            ssp_mode;
        __u8            hci_ver;
        __u16           hci_rev;
+       __u8            lmp_ver;
        __u16           manufacturer;
+       __le16          lmp_subver;
        __u16           voice_setting;
+       __u8            io_capability;
 
        __u16           pkt_type;
        __u16           esco_type;
@@ -102,18 +123,26 @@ struct hci_dev {
        atomic_t        cmd_cnt;
        unsigned int    acl_cnt;
        unsigned int    sco_cnt;
+       unsigned int    le_cnt;
 
        unsigned int    acl_mtu;
        unsigned int    sco_mtu;
+       unsigned int    le_mtu;
        unsigned int    acl_pkts;
        unsigned int    sco_pkts;
+       unsigned int    le_pkts;
 
-       unsigned long   cmd_last_tx;
        unsigned long   acl_last_tx;
        unsigned long   sco_last_tx;
+       unsigned long   le_last_tx;
 
        struct workqueue_struct *workqueue;
 
+       struct work_struct      power_on;
+       struct work_struct      power_off;
+       struct timer_list       off_timer;
+
+       struct timer_list       cmd_timer;
        struct tasklet_struct   cmd_task;
        struct tasklet_struct   rx_task;
        struct tasklet_struct   tx_task;
@@ -129,12 +158,17 @@ struct hci_dev {
        wait_queue_head_t       req_wait_q;
        __u32                   req_status;
        __u32                   req_result;
-       __u16                   req_last_cmd;
+
+       __u16                   init_last_cmd;
 
        struct inquiry_cache    inq_cache;
        struct hci_conn_hash    conn_hash;
        struct list_head        blacklist;
 
+       struct list_head        uuids;
+
+       struct list_head        link_keys;
+
        struct hci_dev_stats    stat;
 
        struct sk_buff_head     driver_init;
@@ -165,31 +199,37 @@ struct hci_dev {
 struct hci_conn {
        struct list_head list;
 
-       atomic_t         refcnt;
-       spinlock_t       lock;
-
-       bdaddr_t         dst;
-       __u16            handle;
-       __u16            state;
-       __u8             mode;
-       __u8             type;
-       __u8             out;
-       __u8             attempt;
-       __u8             dev_class[3];
-       __u8             features[8];
-       __u8             ssp_mode;
-       __u16            interval;
-       __u16            pkt_type;
-       __u16            link_policy;
-       __u32            link_mode;
-       __u8             auth_type;
-       __u8             sec_level;
-       __u8             pending_sec_level;
-       __u8             power_save;
-       __u16            disc_timeout;
-       unsigned long    pend;
-
-       unsigned int     sent;
+       atomic_t        refcnt;
+       spinlock_t      lock;
+
+       bdaddr_t        dst;
+       __u16           handle;
+       __u16           state;
+       __u8            mode;
+       __u8            type;
+       __u8            out;
+       __u8            attempt;
+       __u8            dev_class[3];
+       __u8            features[8];
+       __u8            ssp_mode;
+       __u16           interval;
+       __u16           pkt_type;
+       __u16           link_policy;
+       __u32           link_mode;
+       __u8            auth_type;
+       __u8            sec_level;
+       __u8            pending_sec_level;
+       __u8            pin_length;
+       __u8            io_capability;
+       __u8            power_save;
+       __u16           disc_timeout;
+       unsigned long   pend;
+
+       __u8            remote_cap;
+       __u8            remote_oob;
+       __u8            remote_auth;
+
+       unsigned int    sent;
 
        struct sk_buff_head data_q;
 
@@ -274,24 +314,40 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        list_add(&c->list, &h->list);
-       if (c->type == ACL_LINK)
+       switch (c->type) {
+       case ACL_LINK:
                h->acl_num++;
-       else
+               break;
+       case LE_LINK:
+               h->le_num++;
+               break;
+       case SCO_LINK:
+       case ESCO_LINK:
                h->sco_num++;
+               break;
+       }
 }
 
 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        list_del(&c->list);
-       if (c->type == ACL_LINK)
+       switch (c->type) {
+       case ACL_LINK:
                h->acl_num--;
-       else
+               break;
+       case LE_LINK:
+               h->le_num--;
+               break;
+       case SCO_LINK:
+       case ESCO_LINK:
                h->sco_num--;
+               break;
+       }
 }
 
 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
-                                       __u16 handle)
+                                                               __u16 handle)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
@@ -306,7 +362,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
 }
 
 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
-                                       __u8 type, bdaddr_t *ba)
+                                                       __u8 type, bdaddr_t *ba)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
@@ -321,7 +377,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
 }
 
 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
-                                       __u8 type, __u16 state)
+                                                       __u8 type, __u16 state)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
@@ -437,6 +493,16 @@ int hci_inquiry(void __user *arg);
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int hci_blacklist_clear(struct hci_dev *hdev);
 
+int hci_uuids_clear(struct hci_dev *hdev);
+
+int hci_link_keys_clear(struct hci_dev *hdev);
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+                                               u8 *key, u8 type, u8 pin_len);
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
+
+void hci_del_off_timer(struct hci_dev *hdev);
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct sk_buff *skb);
@@ -458,6 +524,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
+#define lmp_no_flush_capable(dev)  ((dev)->features[6] & LMP_NO_FLUSH)
+#define lmp_le_capable(dev)        ((dev)->features[4] & LMP_LE)
 
 /* ----- HCI protocols ----- */
 struct hci_proto {
@@ -660,12 +728,24 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
 
 /* ----- HCI Sockets ----- */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+                                                       struct sock *skip_sk);
 
 /* Management interface */
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
 int mgmt_index_added(u16 index);
 int mgmt_index_removed(u16 index);
+int mgmt_powered(u16 index, u8 powered);
+int mgmt_discoverable(u16 index, u8 discoverable);
+int mgmt_connectable(u16 index, u8 connectable);
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type);
+int mgmt_connected(u16 index, bdaddr_t *bdaddr);
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
+int mgmt_disconnect_failed(u16 index);
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr);
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -697,4 +777,6 @@ struct hci_sec_filter {
 
 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
 
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+                                       u16 latency, u16 to_multiplier);
 #endif /* __HCI_CORE_H */
index 7ad25ca60ec042aeac8b2336c003fbc9e6fe07e5..4f4bff1eaed68ccc0b9e804d7c95d150ec0c5344 100644 (file)
@@ -38,6 +38,7 @@
 #define L2CAP_DEFAULT_MAX_PDU_SIZE     1009    /* Sized for 3-DH5 packet */
 #define L2CAP_DEFAULT_ACK_TO           200
 #define L2CAP_LOCAL_BUSY_TRIES         12
+#define L2CAP_LE_DEFAULT_MTU           23
 
 #define L2CAP_CONN_TIMEOUT     (40000) /* 40 seconds */
 #define L2CAP_INFO_TIMEOUT     (4000)  /*  4 seconds */
@@ -88,6 +89,8 @@ struct l2cap_conninfo {
 #define L2CAP_ECHO_RSP         0x09
 #define L2CAP_INFO_REQ         0x0a
 #define L2CAP_INFO_RSP         0x0b
+#define L2CAP_CONN_PARAM_UPDATE_REQ    0x12
+#define L2CAP_CONN_PARAM_UPDATE_RSP    0x13
 
 /* L2CAP feature mask */
 #define L2CAP_FEAT_FLOWCTL     0x00000001
@@ -160,6 +163,9 @@ struct l2cap_conn_rsp {
 /* channel indentifier */
 #define L2CAP_CID_SIGNALING    0x0001
 #define L2CAP_CID_CONN_LESS    0x0002
+#define L2CAP_CID_LE_DATA      0x0004
+#define L2CAP_CID_LE_SIGNALING 0x0005
+#define L2CAP_CID_SMP          0x0006
 #define L2CAP_CID_DYN_START    0x0040
 #define L2CAP_CID_DYN_END      0xffff
 
@@ -255,6 +261,21 @@ struct l2cap_info_rsp {
 #define L2CAP_IR_SUCCESS    0x0000
 #define L2CAP_IR_NOTSUPP    0x0001
 
+struct l2cap_conn_param_update_req {
+       __le16      min;
+       __le16      max;
+       __le16      latency;
+       __le16      to_multiplier;
+} __packed;
+
+struct l2cap_conn_param_update_rsp {
+       __le16      result;
+} __packed;
+
+/* Connection Parameters result */
+#define L2CAP_CONN_PARAM_ACCEPTED      0x0000
+#define L2CAP_CONN_PARAM_REJECTED      0x0001
+
 /* ----- L2CAP connections ----- */
 struct l2cap_chan_list {
        struct sock     *head;
@@ -327,6 +348,7 @@ struct l2cap_pinfo {
        __u8            sec_level;
        __u8            role_switch;
        __u8            force_reliable;
+       __u8            flushable;
 
        __u8            conf_req[64];
        __u8            conf_len;
@@ -423,6 +445,35 @@ static inline int l2cap_tx_window_full(struct sock *sk)
 #define __is_sframe(ctrl)      ((ctrl) & L2CAP_CTRL_FRAME_TYPE)
 #define __is_sar_start(ctrl)   (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START)
 
-void l2cap_load(void);
+extern int disable_ertm;
+extern const struct proto_ops l2cap_sock_ops;
+extern struct bt_sock_list l2cap_sk_list;
+
+int l2cap_init_sockets(void);
+void l2cap_cleanup_sockets(void);
+
+u8 l2cap_get_ident(struct l2cap_conn *conn);
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
+int l2cap_build_conf_req(struct sock *sk, void *data);
+int __l2cap_wait_ack(struct sock *sk);
+
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len);
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len);
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen);
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len);
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb);
+void l2cap_streaming_send(struct sock *sk);
+int l2cap_ertm_send(struct sock *sk);
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout);
+void l2cap_sock_clear_timer(struct sock *sk);
+void __l2cap_sock_close(struct sock *sk, int reason);
+void l2cap_sock_kill(struct sock *sk);
+void l2cap_sock_init(struct sock *sk, struct sock *parent);
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+                                                       int proto, gfp_t prio);
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err);
+void l2cap_chan_del(struct sock *sk, int err);
+int l2cap_do_connect(struct sock *sk);
 
 #endif /* __L2CAP_H */
index ca29c1367ffd38b203960b1a3ea39412a234957b..44ac55c850794ec971b7a820e1186f6052f9bc25 100644 (file)
@@ -47,6 +47,7 @@ struct mgmt_rp_read_info {
        __le16 index;
        __u8 type;
        __u8 powered;
+       __u8 connectable;
        __u8 discoverable;
        __u8 pairable;
        __u8 sec_mode;
@@ -58,6 +59,107 @@ struct mgmt_rp_read_info {
        __u16 hci_rev;
 } __packed;
 
+struct mgmt_mode {
+       __le16 index;
+       __u8 val;
+} __packed;
+
+#define MGMT_OP_SET_POWERED            0x0005
+
+#define MGMT_OP_SET_DISCOVERABLE       0x0006
+
+#define MGMT_OP_SET_CONNECTABLE                0x0007
+
+#define MGMT_OP_SET_PAIRABLE           0x0008
+
+#define MGMT_OP_ADD_UUID               0x0009
+struct mgmt_cp_add_uuid {
+       __le16 index;
+       __u8 uuid[16];
+       __u8 svc_hint;
+} __packed;
+
+#define MGMT_OP_REMOVE_UUID            0x000A
+struct mgmt_cp_remove_uuid {
+       __le16 index;
+       __u8 uuid[16];
+} __packed;
+
+#define MGMT_OP_SET_DEV_CLASS          0x000B
+struct mgmt_cp_set_dev_class {
+       __le16 index;
+       __u8 major;
+       __u8 minor;
+} __packed;
+
+#define MGMT_OP_SET_SERVICE_CACHE      0x000C
+struct mgmt_cp_set_service_cache {
+       __le16 index;
+       __u8 enable;
+} __packed;
+
+struct mgmt_key_info {
+       bdaddr_t bdaddr;
+       u8 type;
+       u8 val[16];
+       u8 pin_len;
+} __packed;
+
+#define MGMT_OP_LOAD_KEYS              0x000D
+struct mgmt_cp_load_keys {
+       __le16 index;
+       __u8 debug_keys;
+       __le16 key_count;
+       struct mgmt_key_info keys[0];
+} __packed;
+
+#define MGMT_OP_REMOVE_KEY             0x000E
+struct mgmt_cp_remove_key {
+       __le16 index;
+       bdaddr_t bdaddr;
+       __u8 disconnect;
+} __packed;
+
+#define MGMT_OP_DISCONNECT             0x000F
+struct mgmt_cp_disconnect {
+       __le16 index;
+       bdaddr_t bdaddr;
+} __packed;
+struct mgmt_rp_disconnect {
+       __le16 index;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS                0x0010
+struct mgmt_cp_get_connections {
+       __le16 index;
+} __packed;
+struct mgmt_rp_get_connections {
+       __le16 index;
+       __le16 conn_count;
+       bdaddr_t conn[0];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_REPLY         0x0011
+struct mgmt_cp_pin_code_reply {
+       __le16 index;
+       bdaddr_t bdaddr;
+       __u8 pin_len;
+       __u8 pin_code[16];
+} __packed;
+
+#define MGMT_OP_PIN_CODE_NEG_REPLY     0x0012
+struct mgmt_cp_pin_code_neg_reply {
+       __le16 index;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_SET_IO_CAPABILITY      0x0013
+struct mgmt_cp_set_io_capability {
+       __le16 index;
+       __u8 io_capability;
+} __packed;
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16 opcode;
@@ -85,3 +187,43 @@ struct mgmt_ev_index_added {
 struct mgmt_ev_index_removed {
        __le16 index;
 } __packed;
+
+#define MGMT_EV_POWERED                        0x0006
+
+#define MGMT_EV_DISCOVERABLE           0x0007
+
+#define MGMT_EV_CONNECTABLE            0x0008
+
+#define MGMT_EV_PAIRABLE               0x0009
+
+#define MGMT_EV_NEW_KEY                        0x000A
+struct mgmt_ev_new_key {
+       __le16 index;
+       struct mgmt_key_info key;
+       __u8 old_key_type;
+} __packed;
+
+#define MGMT_EV_CONNECTED              0x000B
+struct mgmt_ev_connected {
+       __le16 index;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_DISCONNECTED           0x000C
+struct mgmt_ev_disconnected {
+       __le16 index;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_EV_CONNECT_FAILED         0x000D
+struct mgmt_ev_connect_failed {
+       __le16 index;
+       bdaddr_t bdaddr;
+       __u8 status;
+} __packed;
+
+#define MGMT_EV_PIN_CODE_REQUEST       0x000E
+struct mgmt_ev_pin_code_request {
+       __le16 index;
+       bdaddr_t bdaddr;
+} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
new file mode 100644 (file)
index 0000000..8f2edbf
--- /dev/null
@@ -0,0 +1,76 @@
+#ifndef __SMP_H
+#define __SMP_H
+
+struct smp_command_hdr {
+       __u8    code;
+} __packed;
+
+#define SMP_CMD_PAIRING_REQ    0x01
+#define SMP_CMD_PAIRING_RSP    0x02
+struct smp_cmd_pairing {
+       __u8    io_capability;
+       __u8    oob_flag;
+       __u8    auth_req;
+       __u8    max_key_size;
+       __u8    init_key_dist;
+       __u8    resp_key_dist;
+} __packed;
+
+#define SMP_CMD_PAIRING_CONFIRM        0x03
+struct smp_cmd_pairing_confirm {
+       __u8    confirm_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_RANDOM 0x04
+struct smp_cmd_pairing_random {
+       __u8    rand_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_FAIL   0x05
+struct smp_cmd_pairing_fail {
+       __u8    reason;
+} __packed;
+
+#define SMP_CMD_ENCRYPT_INFO   0x06
+struct smp_cmd_encrypt_info {
+       __u8    ltk[16];
+} __packed;
+
+#define SMP_CMD_MASTER_IDENT   0x07
+struct smp_cmd_master_ident {
+       __u16   ediv;
+       __u8    rand[8];
+} __packed;
+
+#define SMP_CMD_IDENT_INFO     0x08
+struct smp_cmd_ident_info {
+       __u8    irk[16];
+} __packed;
+
+#define SMP_CMD_IDENT_ADDR_INFO        0x09
+struct smp_cmd_ident_addr_info {
+       __u8    addr_type;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define SMP_CMD_SIGN_INFO      0x0a
+struct smp_cmd_sign_info {
+       __u8    csrk[16];
+} __packed;
+
+#define SMP_CMD_SECURITY_REQ   0x0b
+struct smp_cmd_security_req {
+       __u8    auth_req;
+} __packed;
+
+#define SMP_PASSKEY_ENTRY_FAILED       0x01
+#define SMP_OOB_NOT_AVAIL              0x02
+#define SMP_AUTH_REQUIREMENTS          0x03
+#define SMP_CONFIRM_FAILED             0x04
+#define SMP_PAIRING_NOTSUPP            0x05
+#define SMP_ENC_KEY_SIZE               0x06
+#define SMP_CMD_NOTSUPP                0x07
+#define SMP_UNSPECIFIED                0x08
+#define SMP_REPEATED_ATTEMPTS          0x09
+
+#endif /* __SMP_H */
index 1322695beb52980e741b17473ed9906ba159895e..1ac5786da14b1ecb3ab234ff7e1b86ba92f1a8d0 100644 (file)
@@ -413,7 +413,7 @@ struct station_parameters {
  * @STATION_INFO_PLID: @plid filled
  * @STATION_INFO_PLINK_STATE: @plink_state filled
  * @STATION_INFO_SIGNAL: @signal filled
- * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled
+ * @STATION_INFO_TX_BITRATE: @txrate fields are filled
  *  (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
  * @STATION_INFO_RX_PACKETS: @rx_packets filled
  * @STATION_INFO_TX_PACKETS: @tx_packets filled
@@ -421,6 +421,7 @@ struct station_parameters {
  * @STATION_INFO_TX_FAILED: @tx_failed filled
  * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
  * @STATION_INFO_SIGNAL_AVG: @signal_avg filled
+ * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
  */
 enum station_info_flags {
        STATION_INFO_INACTIVE_TIME      = 1<<0,
@@ -437,6 +438,7 @@ enum station_info_flags {
        STATION_INFO_TX_FAILED          = 1<<11,
        STATION_INFO_RX_DROP_MISC       = 1<<12,
        STATION_INFO_SIGNAL_AVG         = 1<<13,
+       STATION_INFO_RX_BITRATE         = 1<<14,
 };
 
 /**
@@ -506,6 +508,7 @@ struct station_info {
        s8 signal;
        s8 signal_avg;
        struct rate_info txrate;
+       struct rate_info rxrate;
        u32 rx_packets;
        u32 tx_packets;
        u32 tx_retries;
@@ -1790,8 +1793,9 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
 /**
  * ieee80211_channel_to_frequency - convert channel number to frequency
  * @chan: channel number
+ * @band: band, necessary due to channel number overlap
  */
-extern int ieee80211_channel_to_frequency(int chan);
+extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
 
 /**
  * ieee80211_frequency_to_channel - convert frequency to channel number
index a8e7852b10abf705a7c7db5385b34a6d1a6a058e..e5983c9053dcd6fd2cfdfd5cd3415aa23e97734a 100644 (file)
@@ -43,6 +43,8 @@ struct dcbnl_rtnl_ops {
        int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
        int (*ieee_getapp) (struct net_device *, struct dcb_app *);
        int (*ieee_setapp) (struct net_device *, struct dcb_app *);
+       int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *);
+       int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *);
 
        /* CEE std */
        u8   (*getstate)(struct net_device *);
@@ -77,7 +79,14 @@ struct dcbnl_rtnl_ops {
        u8   (*getdcbx)(struct net_device *);
        u8   (*setdcbx)(struct net_device *, u8);
 
+       /* peer apps */
+       int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *,
+                              u16 *);
+       int (*peer_getapptable)(struct net_device *, struct dcb_app *);
 
+       /* CEE peer */
+       int (*cee_peer_getpg) (struct net_device *, struct cee_pg *);
+       int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *);
 };
 
 #endif /* __NET_DCBNL_H__ */
index 93b0310317bed44f54350aeecc9abf80141ca592..2a46cbaef92d30dd7c890deb34a9554634eb94b5 100644 (file)
@@ -40,24 +40,10 @@ struct dst_entry {
        struct rcu_head         rcu_head;
        struct dst_entry        *child;
        struct net_device       *dev;
-       short                   error;
-       short                   obsolete;
-       int                     flags;
-#define DST_HOST               0x0001
-#define DST_NOXFRM             0x0002
-#define DST_NOPOLICY           0x0004
-#define DST_NOHASH             0x0008
-#define DST_NOCACHE            0x0010
+       struct  dst_ops         *ops;
+       unsigned long           _metrics;
        unsigned long           expires;
-
-       unsigned short          header_len;     /* more space at head required */
-       unsigned short          trailer_len;    /* space to reserve at tail */
-
-       unsigned int            rate_tokens;
-       unsigned long           rate_last;      /* rate limiting for ICMP */
-
        struct dst_entry        *path;
-
        struct neighbour        *neighbour;
        struct hh_cache         *hh;
 #ifdef CONFIG_XFRM
@@ -68,17 +54,16 @@ struct dst_entry {
        int                     (*input)(struct sk_buff*);
        int                     (*output)(struct sk_buff*);
 
-       struct  dst_ops         *ops;
-
-       u32                     _metrics[RTAX_MAX];
-
-#ifdef CONFIG_NET_CLS_ROUTE
+       short                   error;
+       short                   obsolete;
+       unsigned short          header_len;     /* more space at head required */
+       unsigned short          trailer_len;    /* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
        __u32                   tclassid;
 #else
        __u32                   __pad2;
 #endif
 
-
        /*
         * Align __refcnt to a 64 bytes alignment
         * (L1_CACHE_SIZE would be too much)
@@ -93,6 +78,12 @@ struct dst_entry {
        atomic_t                __refcnt;       /* client references    */
        int                     __use;
        unsigned long           lastuse;
+       int                     flags;
+#define DST_HOST               0x0001
+#define DST_NOXFRM             0x0002
+#define DST_NOPOLICY           0x0004
+#define DST_NOHASH             0x0008
+#define DST_NOCACHE            0x0010
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
@@ -103,10 +94,70 @@ struct dst_entry {
 
 #ifdef __KERNEL__
 
+extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[RTAX_MAX];
+
+#define DST_METRICS_READ_ONLY  0x1UL
+#define __DST_METRICS_PTR(Y)   \
+       ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+#define DST_METRICS_PTR(X)     __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+       return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+       unsigned long val = dst->_metrics;
+       if (!(val & DST_METRICS_READ_ONLY))
+               __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+       unsigned long p = dst->_metrics;
+
+       if (p & DST_METRICS_READ_ONLY)
+               return dst->ops->cow_metrics(dst, p);
+       return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+                                   const u32 *src_metrics,
+                                   bool read_only)
+{
+       dst->_metrics = ((unsigned long) src_metrics) |
+               (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+       u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+       if (dst_metrics) {
+               u32 *src_metrics = DST_METRICS_PTR(src);
+
+               memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+       }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+       return DST_METRICS_PTR(dst);
+}
+
 static inline u32
 dst_metric_raw(const struct dst_entry *dst, const int metric)
 {
-       return dst->_metrics[metric-1];
+       u32 *p = DST_METRICS_PTR(dst);
+
+       return p[metric-1];
 }
 
 static inline u32
@@ -131,22 +182,10 @@ dst_metric_advmss(const struct dst_entry *dst)
 
 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
 {
-       dst->_metrics[metric-1] = val;
-}
+       u32 *p = dst_metrics_write_ptr(dst);
 
-static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
-{
-       memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
-}
-
-static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
-{
-       dst_import_metrics(dest, src->_metrics);
-}
-
-static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
-{
-       return dst->_metrics;
+       if (p)
+               p[metric-1] = val;
 }
 
 static inline u32
@@ -181,8 +220,6 @@ static inline u32
 dst_allfrag(const struct dst_entry *dst)
 {
        int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
-       /* Yes, _exactly_. This is paranoia. */
-       barrier();
        return ret;
 }
 
@@ -315,7 +352,7 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
 }
 
 extern int dst_discard(struct sk_buff *skb);
-extern void * dst_alloc(struct dst_ops * ops);
+extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
 extern void __dst_free(struct dst_entry * dst);
 extern struct dst_entry *dst_destroy(struct dst_entry * dst);
 
@@ -384,27 +421,22 @@ extern void               dst_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
-       XFRM_LOOKUP_WAIT = 1 << 0,
-       XFRM_LOOKUP_ICMP = 1 << 1,
+       XFRM_LOOKUP_ICMP = 1 << 0,
 };
 
 struct flowi;
 #ifndef CONFIG_XFRM
-static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                             struct flowi *fl, struct sock *sk, int flags)
+static inline struct dst_entry *xfrm_lookup(struct net *net,
+                                           struct dst_entry *dst_orig,
+                                           const struct flowi *fl, struct sock *sk,
+                                           int flags)
 {
-       return 0;
+       return dst_orig;
 } 
-static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                               struct flowi *fl, struct sock *sk, int flags)
-{
-       return 0;
-}
 #else
-extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                      struct flowi *fl, struct sock *sk, int flags);
-extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
-                        struct flowi *fl, struct sock *sk, int flags);
+extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+                                    const struct flowi *fl, struct sock *sk,
+                                    int flags);
 #endif
 #endif
 
index 21a320b8708eb1b987c5d981b4b14ff684123715..dc0746328947f3b259c093093f4068d98a934d5a 100644 (file)
@@ -18,6 +18,7 @@ struct dst_ops {
        struct dst_entry *      (*check)(struct dst_entry *, __u32 cookie);
        unsigned int            (*default_advmss)(const struct dst_entry *);
        unsigned int            (*default_mtu)(const struct dst_entry *);
+       u32 *                   (*cow_metrics)(struct dst_entry *, unsigned long);
        void                    (*destroy)(struct dst_entry *);
        void                    (*ifdown)(struct dst_entry *,
                                          struct net_device *dev, int how);
index 240b7f356c7105c5ded9baa91c6c41cbdbc85261..fd0413873b8ed58e985c3377ba42657f5a15036c 100644 (file)
@@ -48,7 +48,9 @@ struct flowi {
 
        __u8    proto;
        __u8    flags;
-#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_ANYSRC              0x01
+#define FLOWI_FLAG_PRECOW_METRICS      0x02
+#define FLOWI_FLAG_CAN_SLEEP           0x04
        union {
                struct {
                        __be16  sport;
@@ -101,17 +103,18 @@ struct flow_cache_ops {
 };
 
 typedef struct flow_cache_object *(*flow_resolve_t)(
-               struct net *net, struct flowi *key, u16 family,
+               struct net *net, const struct flowi *key, u16 family,
                u8 dir, struct flow_cache_object *oldobj, void *ctx);
 
 extern struct flow_cache_object *flow_cache_lookup(
-               struct net *net, struct flowi *key, u16 family,
+               struct net *net, const struct flowi *key, u16 family,
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
 extern atomic_t flow_cache_genid;
 
-static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
+static inline int flow_cache_uli_match(const struct flowi *fl1,
+                                      const struct flowi *fl2)
 {
        return (fl1->proto == fl2->proto &&
                !memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
index 6e991e0d0d6fef8763a1d58693c373a7105c9ddc..f0698b955b73c7ab41051fdf6508f4ab827932c0 100644 (file)
@@ -45,7 +45,4 @@ extern int    icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int     icmp_init(void);
 extern void    icmp_out_count(struct net *net, unsigned char type);
 
-/* Move into dst.h ? */
-extern int     xrlim_allow(struct dst_entry *dst, int timeout);
-
 #endif /* _ICMP_H */
index af49f8ab7f8166de6c7b135e6296968ed094e0d1..b0be5fb9de19675ef765ec9e69ecde771b62a15b 100644 (file)
@@ -178,6 +178,11 @@ struct ieee80211_radiotap_header {
  *
  *     Number of unicast retries a transmitted frame used.
  *
+ * IEEE80211_RADIOTAP_MCS      u8, u8, u8              unitless
+ *
+ *     Contains a bitmap of known fields/flags, the flags, and
+ *     the MCS index.
+ *
  */
 enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_TSFT = 0,
@@ -199,6 +204,8 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_RTS_RETRIES = 16,
        IEEE80211_RADIOTAP_DATA_RETRIES = 17,
 
+       IEEE80211_RADIOTAP_MCS = 19,
+
        /* valid in every it_present bitmap, even vendor namespaces */
        IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
        IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
@@ -245,6 +252,24 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_F_TX_CTS    0x0002  /* used cts 'protection' */
 #define IEEE80211_RADIOTAP_F_TX_RTS    0x0004  /* used rts/cts handshake */
 
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW         0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS                0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI         0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT                0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC                0x10
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK         0x03
+#define                IEEE80211_RADIOTAP_MCS_BW_20    0
+#define                IEEE80211_RADIOTAP_MCS_BW_40    1
+#define                IEEE80211_RADIOTAP_MCS_BW_20L   2
+#define                IEEE80211_RADIOTAP_MCS_BW_20U   3
+#define IEEE80211_RADIOTAP_MCS_SGI             0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
+
+
 /* Ugly macro to convert literal channel numbers into their mhz equivalents
  * There are certianly some conditions that will break this (like feeding it '30')
  * but they shouldn't arise since nothing talks on channel 30. */
index 8181498fa96ca5334cbff5f10a36a41abd8882dd..7a37369f8ea3fed962a7eed9a07ae39bf75a1114 100644 (file)
@@ -86,6 +86,19 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
        return (struct inet_request_sock *)sk;
 }
 
+struct inet_cork {
+       unsigned int            flags;
+       unsigned int            fragsize;
+       struct ip_options       *opt;
+       struct dst_entry        *dst;
+       int                     length; /* Total length of all frames */
+       __be32                  addr;
+       struct flowi            fl;
+       struct page             *page;
+       u32                     off;
+       u8                      tx_flags;
+};
+
 struct ip_mc_socklist;
 struct ipv6_pinfo;
 struct rtable;
@@ -143,15 +156,7 @@ struct inet_sock {
        int                     mc_index;
        __be32                  mc_addr;
        struct ip_mc_socklist __rcu     *mc_list;
-       struct {
-               unsigned int            flags;
-               unsigned int            fragsize;
-               struct ip_options       *opt;
-               struct dst_entry        *dst;
-               int                     length; /* Total length of all frames */
-               __be32                  addr;
-               struct flowi            fl;
-       } cork;
+       struct inet_cork        cork;
 };
 
 #define IPCORK_OPT     1       /* ip-options has been held in ipcork.opt */
@@ -219,7 +224,13 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops
 
 static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
 {
-       return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+       __u8 flags = 0;
+
+       if (inet_sk(sk)->transparent)
+               flags |= FLOWI_FLAG_ANYSRC;
+       if (sk->sk_protocol == IPPROTO_TCP)
+               flags |= FLOWI_FLAG_PRECOW_METRICS;
+       return flags;
 }
 
 #endif /* _INET_SOCK_H */
index 599d96e7411455e75d77ce0e8f85da97754bde42..e6dd8da6b2ad6207eb9f884e390bf62442820b75 100644 (file)
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
 #include <net/ipv6.h>
 #include <asm/atomic.h>
 
-struct inetpeer_addr {
+struct inetpeer_addr_base {
        union {
-               __be32          a4;
-               __be32          a6[4];
+               __be32                  a4;
+               __be32                  a6[4];
        };
-       __u16   family;
+};
+
+struct inetpeer_addr {
+       struct inetpeer_addr_base       addr;
+       __u16                           family;
 };
 
 struct inet_peer {
@@ -33,15 +38,22 @@ struct inet_peer {
        atomic_t                refcnt;
        /*
         * Once inet_peer is queued for deletion (refcnt == -1), following fields
-        * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
-        * We can share memory with rcu_head to keep inet_peer small
+        * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+        * We can share memory with rcu_head to help keep inet_peer small.
         */
        union {
                struct {
-                       atomic_t        rid;            /* Frag reception counter */
-                       atomic_t        ip_id_count;    /* IP ID for the next packet */
-                       __u32           tcp_ts;
-                       __u32           tcp_ts_stamp;
+                       atomic_t                        rid;            /* Frag reception counter */
+                       atomic_t                        ip_id_count;    /* IP ID for the next packet */
+                       __u32                           tcp_ts;
+                       __u32                           tcp_ts_stamp;
+                       u32                             metrics[RTAX_MAX];
+                       u32                             rate_tokens;    /* rate limiting for ICMP */
+                       unsigned long                   rate_last;
+                       unsigned long                   pmtu_expires;
+                       u32                             pmtu_orig;
+                       u32                             pmtu_learned;
+                       struct inetpeer_addr_base       redirect_learned;
                };
                struct rcu_head         rcu;
        };
@@ -49,6 +61,13 @@ struct inet_peer {
 
 void                   inet_initpeers(void) __init;
 
+#define INETPEER_METRICS_NEW   (~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+       return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
+
 /* can be called with or without local BH being disabled */
 struct inet_peer       *inet_getpeer(struct inetpeer_addr *daddr, int create);
 
@@ -56,7 +75,7 @@ static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
 {
        struct inetpeer_addr daddr;
 
-       daddr.a4 = v4daddr;
+       daddr.addr.a4 = v4daddr;
        daddr.family = AF_INET;
        return inet_getpeer(&daddr, create);
 }
@@ -65,13 +84,14 @@ static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int cr
 {
        struct inetpeer_addr daddr;
 
-       ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+       ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
        daddr.family = AF_INET6;
        return inet_getpeer(&daddr, create);
 }
 
 /* can be called from BH context or outside */
 extern void inet_putpeer(struct inet_peer *p);
+extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
 /*
  * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
index 67fac78a186b147c816a5aa0ae50fd1c3d1f2f72..a4f631108c54eec5e80b4cb6c44b69fc0f984e6f 100644 (file)
@@ -116,8 +116,24 @@ extern int         ip_append_data(struct sock *sk,
 extern int             ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
 extern ssize_t         ip_append_page(struct sock *sk, struct page *page,
                                int offset, size_t size, int flags);
+extern struct sk_buff  *__ip_make_skb(struct sock *sk,
+                                     struct sk_buff_head *queue,
+                                     struct inet_cork *cork);
+extern int             ip_send_skb(struct sk_buff *skb);
 extern int             ip_push_pending_frames(struct sock *sk);
 extern void            ip_flush_pending_frames(struct sock *sk);
+extern struct sk_buff  *ip_make_skb(struct sock *sk,
+                                   int getfrag(void *from, char *to, int offset, int len,
+                                               int odd, struct sk_buff *skb),
+                                   void *from, int length, int transhdrlen,
+                                   struct ipcm_cookie *ipc,
+                                   struct rtable **rtp,
+                                   unsigned int flags);
+
+static inline struct sk_buff *ip_finish_skb(struct sock *sk)
+{
+       return __ip_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
+}
 
 /* datagram.c */
 extern int             ip4_datagram_connect(struct sock *sk, 
index 708ff7cb880696c2e2fe2d811ae2352e0d12d27e..46a6e8ae232c4e74b5f94262a351ec3291a0ab7d 100644 (file)
@@ -108,6 +108,7 @@ struct rt6_info {
        u32                             rt6i_flags;
        struct rt6key                   rt6i_src;
        u32                             rt6i_metric;
+       u32                             rt6i_peer_genid;
 
        struct inet6_dev                *rt6i_idev;
        struct inet_peer                *rt6i_peer;
index 07bdb5e9e8ac0b80628400d4b79e7e5041ce8199..3f6c943faedc3535e526cb5c562044dd2d32fa21 100644 (file)
@@ -51,15 +51,17 @@ struct fib_nh {
        struct fib_info         *nh_parent;
        unsigned                nh_flags;
        unsigned char           nh_scope;
+       unsigned char           nh_cfg_scope;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        int                     nh_weight;
        int                     nh_power;
 #endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        __u32                   nh_tclassid;
 #endif
        int                     nh_oif;
        __be32                  nh_gw;
+       __be32                  nh_saddr;
 };
 
 /*
@@ -77,7 +79,7 @@ struct fib_info {
        int                     fib_protocol;
        __be32                  fib_prefsrc;
        u32                     fib_priority;
-       u32                     fib_metrics[RTAX_MAX];
+       u32                     *fib_metrics;
 #define fib_mtu fib_metrics[RTAX_MTU-1]
 #define fib_window fib_metrics[RTAX_WINDOW-1]
 #define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,12 +98,15 @@ struct fib_info {
 struct fib_rule;
 #endif
 
+struct fib_table;
 struct fib_result {
        unsigned char   prefixlen;
        unsigned char   nh_sel;
        unsigned char   type;
        unsigned char   scope;
        struct fib_info *fi;
+       struct fib_table *table;
+       struct list_head *fa_head;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        struct fib_rule *r;
 #endif
@@ -136,11 +141,13 @@ struct fib_result_nl {
 
 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
-#define FIB_RES_PREFSRC(res)           ((res).fi->fib_prefsrc ? : __fib_res_prefsrc(&res))
+#define FIB_RES_SADDR(res)             (FIB_RES_NH(res).nh_saddr)
 #define FIB_RES_GW(res)                        (FIB_RES_NH(res).nh_gw)
 #define FIB_RES_DEV(res)               (FIB_RES_NH(res).nh_dev)
 #define FIB_RES_OIF(res)               (FIB_RES_NH(res).nh_oif)
 
+#define FIB_RES_PREFSRC(res)           ((res).fi->fib_prefsrc ? : FIB_RES_SADDR(res))
+
 struct fib_table {
        struct hlist_node tb_hlist;
        u32             tb_id;
@@ -155,9 +162,6 @@ extern int fib_table_delete(struct fib_table *, struct fib_config *);
 extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
                          struct netlink_callback *cb);
 extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
-                                    const struct flowi *flp,
-                                    struct fib_result *res);
 extern void fib_free_table(struct fib_table *tb);
 
 
@@ -201,8 +205,8 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
 extern int __net_init fib4_rules_init(struct net *net);
 extern void __net_exit fib4_rules_exit(struct net *net);
 
-#ifdef CONFIG_NET_CLS_ROUTE
-extern u32 fib_rules_tclass(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+extern u32 fib_rules_tclass(const struct fib_result *res);
 #endif
 
 extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res);
@@ -218,24 +222,23 @@ extern void               ip_fib_init(void);
 extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                               struct net_device *dev, __be32 *spec_dst,
                               u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
-                              struct fib_result *res);
+extern void fib_select_default(struct fib_result *res);
 
 /* Exported by fib_semantics.c */
 extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
 extern int fib_sync_down_dev(struct net_device *dev, int force);
 extern int fib_sync_down_addr(struct net *net, __be32 local);
+extern void fib_update_nh_saddrs(struct net_device *dev);
 extern int fib_sync_up(struct net_device *dev);
-extern __be32  __fib_res_prefsrc(struct fib_result *res);
 extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
 
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
+/* Exported by fib_trie.c */
+extern void fib_trie_init(void);
+extern struct fib_table *fib_trie_table(u32 id);
 
-static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        u32 rtag;
 #endif
index b7bbd6c28cfa17dde6fa3a972d33635c5a498312..e74da41ebd1be69d48845e4e6400dad96ad76004 100644 (file)
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
+#include <net/net_namespace.h>         /* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+       return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+       /*
+        * This is used for debug only.
+        * Start with the most likely hit
+        * End with BUG
+        */
+       if (likely(skb->dev && skb->dev->nd_net))
+               return dev_net(skb->dev);
+       if (skb_dst(skb)->dev)
+               return dev_net(skb_dst(skb)->dev);
+       WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+                     __func__, __LINE__);
+       if (likely(skb->sk && skb->sk->sk_net))
+               return sock_net(skb->sk);
+       pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+               __func__, __LINE__);
+       BUG();
+#else
+       return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+       return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+       /* Start with the most likely hit */
+       if (likely(skb->sk && skb->sk->sk_net))
+               return sock_net(skb->sk);
+       WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+                      __func__, __LINE__);
+       if (likely(skb->dev && skb->dev->nd_net))
+               return dev_net(skb->dev);
+       pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+               __func__, __LINE__);
+       BUG();
+#else
+       return sock_net(skb->sk);
+#endif
+#else
+       return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net cant be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+       return (struct net *)seq->private;
+#else
+       return &init_net;
+#endif
+}
 
 /* Connections' size value needed by ip_vs_ctl.c */
 extern int ip_vs_conn_tab_size;
@@ -258,6 +332,23 @@ struct ip_vs_seq {
                                                   before last resized pkt */
 };
 
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+       __u32           conns;          /* connections scheduled */
+       __u32           inpkts;         /* incoming packets */
+       __u32           outpkts;        /* outgoing packets */
+       __u64           inbytes;        /* incoming bytes */
+       __u64           outbytes;       /* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+       struct ip_vs_counters   ustats;
+       struct u64_stats_sync   syncp;
+};
 
 /*
  *     IPVS statistics objects
@@ -279,17 +370,34 @@ struct ip_vs_estimator {
 };
 
 struct ip_vs_stats {
-       struct ip_vs_stats_user ustats;         /* statistics */
+       struct ip_vs_stats_user ustats;         /* statistics */
        struct ip_vs_estimator  est;            /* estimator */
-
-       spinlock_t              lock;           /* spin lock */
+       struct ip_vs_cpu_stats  *cpustats;      /* per cpu counters */
+       spinlock_t              lock;           /* spin lock */
 };
 
+/*
+ * Helper Macros for per cpu
+ * ipvs->tot_stats->ustats.count
+ */
+#define IPVS_STAT_INC(ipvs, count)     \
+       __this_cpu_inc((ipvs)->ustats->count)
+
+#define IPVS_STAT_ADD(ipvs, count, value) \
+       do {\
+               write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
+                                    raw_smp_processor_id())); \
+               __this_cpu_add((ipvs)->ustats->count, value); \
+               write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
+                                  raw_smp_processor_id())); \
+       } while (0)
+
 struct dst_entry;
 struct iphdr;
 struct ip_vs_conn;
 struct ip_vs_app;
 struct sk_buff;
+struct ip_vs_proto_data;
 
 struct ip_vs_protocol {
        struct ip_vs_protocol   *next;
@@ -297,21 +405,22 @@ struct ip_vs_protocol {
        u16                     protocol;
        u16                     num_states;
        int                     dont_defrag;
-       atomic_t                appcnt;         /* counter of proto app incs */
-       int                     *timeout_table; /* protocol timeout table */
 
        void (*init)(struct ip_vs_protocol *pp);
 
        void (*exit)(struct ip_vs_protocol *pp);
 
+       void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+       void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
        int (*conn_schedule)(int af, struct sk_buff *skb,
-                            struct ip_vs_protocol *pp,
+                            struct ip_vs_proto_data *pd,
                             int *verdict, struct ip_vs_conn **cpp);
 
        struct ip_vs_conn *
        (*conn_in_get)(int af,
                       const struct sk_buff *skb,
-                      struct ip_vs_protocol *pp,
                       const struct ip_vs_iphdr *iph,
                       unsigned int proto_off,
                       int inverse);
@@ -319,7 +428,6 @@ struct ip_vs_protocol {
        struct ip_vs_conn *
        (*conn_out_get)(int af,
                        const struct sk_buff *skb,
-                       struct ip_vs_protocol *pp,
                        const struct ip_vs_iphdr *iph,
                        unsigned int proto_off,
                        int inverse);
@@ -337,11 +445,11 @@ struct ip_vs_protocol {
 
        int (*state_transition)(struct ip_vs_conn *cp, int direction,
                                const struct sk_buff *skb,
-                               struct ip_vs_protocol *pp);
+                               struct ip_vs_proto_data *pd);
 
-       int (*register_app)(struct ip_vs_app *inc);
+       int (*register_app)(struct net *net, struct ip_vs_app *inc);
 
-       void (*unregister_app)(struct ip_vs_app *inc);
+       void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
 
        int (*app_conn_bind)(struct ip_vs_conn *cp);
 
@@ -350,14 +458,26 @@ struct ip_vs_protocol {
                             int offset,
                             const char *msg);
 
-       void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+       void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
 
-       int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+       struct ip_vs_proto_data *next;
+       struct ip_vs_protocol   *pp;
+       int                     *timeout_table; /* protocol timeout table */
+       atomic_t                appcnt;         /* counter of proto app incs. */
+       struct tcp_states_t     *tcp_state_table;
 };
 
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+                                                    unsigned short proto);
 
 struct ip_vs_conn_param {
+       struct net                      *net;
        const union nf_inet_addr        *caddr;
        const union nf_inet_addr        *vaddr;
        __be16                          cport;
@@ -374,17 +494,20 @@ struct ip_vs_conn_param {
  *     IP_VS structure allocated for each dynamically scheduled connection
  */
 struct ip_vs_conn {
-       struct list_head        c_list;         /* hashed list heads */
-
+       struct hlist_node       c_list;         /* hashed list heads */
+#ifdef CONFIG_NET_NS
+       struct net              *net;           /* Name space */
+#endif
        /* Protocol, addresses and port numbers */
-       u16                      af;            /* address family */
-       union nf_inet_addr       caddr;          /* client address */
-       union nf_inet_addr       vaddr;          /* virtual address */
-       union nf_inet_addr       daddr;          /* destination address */
-       volatile __u32           flags;          /* status flags */
-       __be16                   cport;
-       __be16                   vport;
-       __be16                   dport;
+       u16                     af;             /* address family */
+       __be16                  cport;
+       __be16                  vport;
+       __be16                  dport;
+       __u32                   fwmark;         /* Fire wall mark from skb */
+       union nf_inet_addr      caddr;          /* client address */
+       union nf_inet_addr      vaddr;          /* virtual address */
+       union nf_inet_addr      daddr;          /* destination address */
+       volatile __u32          flags;          /* status flags */
        __u16                   protocol;       /* Which protocol (TCP/UDP) */
 
        /* counter and timer */
@@ -422,10 +545,38 @@ struct ip_vs_conn {
        struct ip_vs_seq        in_seq;         /* incoming seq. struct */
        struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 
+       const struct ip_vs_pe   *pe;
        char                    *pe_data;
        __u8                    pe_data_len;
 };
 
+/*
+ *  To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+       return cp->net;
+#else
+       return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+       cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+                                   struct net *net)
+{
+#ifdef CONFIG_NET_NS
+       return cp->net == net;
+#else
+       return 1;
+#endif
+}
 
 /*
  *     Extended internal versions of struct ip_vs_service_user and
@@ -485,6 +636,7 @@ struct ip_vs_service {
        unsigned                flags;    /* service status flags */
        unsigned                timeout;  /* persistent timeout in ticks */
        __be32                  netmask;  /* grouping granularity */
+       struct net              *net;
 
        struct list_head        destinations;  /* real server d-linked list */
        __u32                   num_dests;     /* number of servers */
@@ -510,8 +662,8 @@ struct ip_vs_dest {
        struct list_head        d_list;   /* for table with all the dests */
 
        u16                     af;             /* address family */
-       union nf_inet_addr      addr;           /* IP address of the server */
        __be16                  port;           /* port number of the server */
+       union nf_inet_addr      addr;           /* IP address of the server */
        volatile unsigned       flags;          /* dest status flags */
        atomic_t                conn_flags;     /* flags to copy to conn */
        atomic_t                weight;         /* server weight */
@@ -538,8 +690,8 @@ struct ip_vs_dest {
        /* for virtual service */
        struct ip_vs_service    *svc;           /* service it belongs to */
        __u16                   protocol;       /* which protocol (TCP/UDP) */
-       union nf_inet_addr      vaddr;          /* virtual IP address */
        __be16                  vport;          /* virtual port number */
+       union nf_inet_addr      vaddr;          /* virtual IP address */
        __u32                   vfwmark;        /* firewall mark of service */
 };
 
@@ -674,13 +826,14 @@ enum {
        IP_VS_DIR_LAST,
 };
 
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
                                         const union nf_inet_addr *caddr,
                                         __be16 cport,
                                         const union nf_inet_addr *vaddr,
                                         __be16 vport,
                                         struct ip_vs_conn_param *p)
 {
+       p->net = net;
        p->af = af;
        p->protocol = protocol;
        p->caddr = caddr;
@@ -695,7 +848,6 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-                                           struct ip_vs_protocol *pp,
                                            const struct ip_vs_iphdr *iph,
                                            unsigned int proto_off,
                                            int inverse);
@@ -703,7 +855,6 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-                                            struct ip_vs_protocol *pp,
                                             const struct ip_vs_iphdr *iph,
                                             unsigned int proto_off,
                                             int inverse);
@@ -719,14 +870,14 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
                                  const union nf_inet_addr *daddr,
                                  __be16 dport, unsigned flags,
-                                 struct ip_vs_dest *dest);
+                                 struct ip_vs_dest *dest, __u32 fwmark);
 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
 extern const char * ip_vs_state_name(__u16 proto, int state);
 
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
 extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
+extern void ip_vs_random_dropentry(struct net *net);
 extern int ip_vs_conn_init(void);
 extern void ip_vs_conn_cleanup(void);
 
@@ -796,12 +947,12 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
+                                 __u16 proto, __u16 port);
 extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
 extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
 
@@ -814,15 +965,27 @@ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
 void ip_vs_unbind_pe(struct ip_vs_service *svc);
 int register_ip_vs_pe(struct ip_vs_pe *pe);
 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
+{
+       if (pe && pe->module)
+               __module_get(pe->module);
+}
+
+static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
+{
+       if (pe && pe->module)
+               module_put(pe->module);
+}
 
 /*
  *     IPVS protocol functions (from ip_vs_proto.c)
  */
 extern int ip_vs_protocol_init(void);
 extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
+extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
 extern int *ip_vs_create_timeout_table(int *table, int size);
 extern int
 ip_vs_set_state_timeout(int *table, int num, const char *const *names,
@@ -852,26 +1015,23 @@ extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
 extern struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-              struct ip_vs_protocol *pp, int *ignored);
+              struct ip_vs_proto_data *pd, int *ignored);
 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-                       struct ip_vs_protocol *pp);
+                       struct ip_vs_proto_data *pd);
+
+extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 
 
 /*
  *      IPVS control data and functions (from ip_vs_ctl.c)
  */
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
 extern struct ip_vs_stats ip_vs_stats;
 extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
 
+extern void ip_vs_sync_switch_mode(struct net *net, int mode);
 extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
                  const union nf_inet_addr *vaddr, __be16 vport);
 
 static inline void ip_vs_service_put(struct ip_vs_service *svc)
@@ -880,7 +1040,7 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
 }
 
 extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
                          const union nf_inet_addr *daddr, __be16 dport);
 
 extern int ip_vs_use_count_inc(void);
@@ -888,8 +1048,9 @@ extern void ip_vs_use_count_dec(void);
 extern int ip_vs_control_init(void);
 extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
-               const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+               __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+               __u16 protocol, __u32 fwmark);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
@@ -897,14 +1058,12 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
  *      IPVS sync daemon data and function prototypes
  *      (from ip_vs_sync.c)
  */
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
+                            __u8 syncid);
+extern int stop_sync_thread(struct net *net, int state);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern int ip_vs_sync_init(void);
+extern void ip_vs_sync_cleanup(void);
 
 
 /*
@@ -912,8 +1071,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
  */
 extern int ip_vs_estimator_init(void);
 extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats);
 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
 
 /*
@@ -952,14 +1111,14 @@ extern int ip_vs_icmp_xmit_v6
  *     we are loaded. Just set ip_vs_drop_rate to 'n' and
  *     we start to drop 1/rate of the packets
  */
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
 
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
 {
-       if (!ip_vs_drop_rate) return 0;
-       if (--ip_vs_drop_counter > 0) return 0;
-       ip_vs_drop_counter = ip_vs_drop_rate;
+       if (!ipvs->drop_rate)
+               return 0;
+       if (--ipvs->drop_counter > 0)
+               return 0;
+       ipvs->drop_counter = ipvs->drop_rate;
        return 1;
 }
 
@@ -1047,9 +1206,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
  *      Netfilter connection tracking
  *      (from ip_vs_nfct.c)
  */
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 {
-       return sysctl_ip_vs_conntrack;
+       return ipvs->sysctl_conntrack;
 }
 
 extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1062,7 +1221,7 @@ extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
 
 #else
 
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 {
        return 0;
 }
@@ -1084,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
 /* CONFIG_IP_VS_NFCT */
 #endif
 
+static inline unsigned int
+ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+{
+       /*
+        * We think the overhead of processing active connections is 256
+        * times higher than that of inactive connections in average. (This
+        * 256 times might not be accurate, we will change it later) We
+        * use the following formula to estimate the overhead now:
+        *                dest->activeconns*256 + dest->inactconns
+        */
+       return (atomic_read(&dest->activeconns) << 8) +
+               atomic_read(&dest->inactconns);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _NET_IP_VS_H */
index 96e50e0ce3ca396700cd9797b932e391338a3b32..4635a5c809671e25c5ec6efe37861fe0a148650c 100644 (file)
@@ -524,12 +524,16 @@ extern void                       ip6_flush_pending_frames(struct sock *sk);
 extern int                     ip6_dst_lookup(struct sock *sk,
                                               struct dst_entry **dst,
                                               struct flowi *fl);
-extern int                     ip6_dst_blackhole(struct sock *sk,
-                                                 struct dst_entry **dst,
-                                                 struct flowi *fl);
-extern int                     ip6_sk_dst_lookup(struct sock *sk,
-                                                 struct dst_entry **dst,
-                                                 struct flowi *fl);
+extern struct dst_entry *      ip6_dst_lookup_flow(struct sock *sk,
+                                                   struct flowi *fl,
+                                                   const struct in6_addr *final_dst,
+                                                   bool can_sleep);
+extern struct dst_entry *      ip6_sk_dst_lookup_flow(struct sock *sk,
+                                                      struct flowi *fl,
+                                                      const struct in6_addr *final_dst,
+                                                      bool can_sleep);
+extern struct dst_entry *      ip6_blackhole_route(struct net *net,
+                                                   struct dst_entry *orig_dst);
 
 /*
  *     skb processing functions
index 62c0ce2d1dc874a4480ad07a91639795b2c70f64..2b072fa99399c3f3be161170a73861cdee1bb0d4 100644 (file)
@@ -341,6 +341,9 @@ struct ieee80211_bss_conf {
  *     the off-channel channel when a remain-on-channel offload is done
  *     in hardware -- normal packets still flow and are expected to be
  *     handled properly by the device.
+ * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
+ *     testing. It will be sent out with incorrect Michael MIC key to allow
+ *     TKIP countermeasures to be tested.
  *
  * Note: If you have to add new flags to the enumeration, then don't
  *      forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -370,6 +373,7 @@ enum mac80211_tx_control_flags {
        IEEE80211_TX_CTL_LDPC                   = BIT(22),
        IEEE80211_TX_CTL_STBC                   = BIT(23) | BIT(24),
        IEEE80211_TX_CTL_TX_OFFCHAN             = BIT(25),
+       IEEE80211_TX_INTFL_TKIP_MIC_FAILURE     = BIT(26),
 };
 
 #define IEEE80211_TX_CTL_STBC_SHIFT            23
@@ -595,9 +599,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  *     the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
  *     the frame.
- * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
- *     is valid. This is useful in monitor mode and necessary for beacon frames
- *     to enable IBSS merging.
+ * @RX_FLAG_MACTIME_MPDU: The timestamp passed in the RX status (@mactime
+ *     field) is valid and contains the time the first symbol of the MPDU
+ *     was received. This is useful in monitor mode and for proper IBSS
+ *     merging.
  * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
  * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
  * @RX_FLAG_40MHZ: HT40 (40 MHz) was used
@@ -610,7 +615,7 @@ enum mac80211_rx_flags {
        RX_FLAG_IV_STRIPPED     = 1<<4,
        RX_FLAG_FAILED_FCS_CRC  = 1<<5,
        RX_FLAG_FAILED_PLCP_CRC = 1<<6,
-       RX_FLAG_TSFT            = 1<<7,
+       RX_FLAG_MACTIME_MPDU    = 1<<7,
        RX_FLAG_SHORTPRE        = 1<<8,
        RX_FLAG_HT              = 1<<9,
        RX_FLAG_40MHZ           = 1<<10,
@@ -1069,6 +1074,13 @@ enum ieee80211_tkip_key_type {
  *     to decrypt group addressed frames, then IBSS RSN support is still
  *     possible but software crypto will be used. Advertise the wiphy flag
  *     only in that case.
+ *
+ * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device
+ *     autonomously manages the PS status of connected stations. When
+ *     this flag is set mac80211 will not trigger PS mode for connected
+ *     stations based on the PM bit of incoming frames.
+ *     Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
+ *     the PS mode of connected stations.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1093,6 +1105,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_CONNECTION_MONITOR                 = 1<<19,
        IEEE80211_HW_SUPPORTS_CQM_RSSI                  = 1<<20,
        IEEE80211_HW_SUPPORTS_PER_STA_GTK               = 1<<21,
+       IEEE80211_HW_AP_LINK_PS                         = 1<<22,
 };
 
 /**
@@ -1147,6 +1160,17 @@ enum ieee80211_hw_flags {
  * @napi_weight: weight used for NAPI polling.  You must specify an
  *     appropriate value here if a napi_poll operation is provided
  *     by your driver.
+
+ * @max_rx_aggregation_subframes: maximum buffer size (number of
+ *     sub-frames) to be used for A-MPDU block ack receiver
+ *     aggregation.
+ *     This is only relevant if the device has restrictions on the
+ *     number of subframes, if it relies on mac80211 to do reordering
+ *     it shouldn't be set.
+ *
+ * @max_tx_aggregation_subframes: maximum number of subframes in an
+ *     aggregate an HT driver will transmit, used by the peer as a
+ *     hint to size its reorder buffer.
  */
 struct ieee80211_hw {
        struct ieee80211_conf conf;
@@ -1165,6 +1189,8 @@ struct ieee80211_hw {
        u8 max_rates;
        u8 max_report_rates;
        u8 max_rate_tries;
+       u8 max_rx_aggregation_subframes;
+       u8 max_tx_aggregation_subframes;
 };
 
 /**
@@ -1688,7 +1714,9 @@ enum ieee80211_ampdu_mlme_action {
  *     station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
  *
  * @sta_notify: Notifies low level driver about power state transition of an
- *     associated station, AP,  IBSS/WDS/mesh peer etc. Must be atomic.
+ *     associated station, AP,  IBSS/WDS/mesh peer etc. For a VIF operating
+ *     in AP mode, this callback will not be called when the flag
+ *     %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
  *
  * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
  *     bursting) for a hardware TX queue.
@@ -1723,6 +1751,10 @@ enum ieee80211_ampdu_mlme_action {
  *     ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
  *     is the first frame we expect to perform the action on. Notice
  *     that TX/RX_STOP can pass NULL for this parameter.
+ *     The @buf_size parameter is only valid when the action is set to
+ *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
+ *     buffer size (number of subframes) for this session -- aggregates
+ *     containing more subframes than this may not be transmitted to the peer.
  *     Returns a negative error code on failure.
  *     The callback can sleep.
  *
@@ -1767,9 +1799,14 @@ enum ieee80211_ampdu_mlme_action {
  *     ieee80211_remain_on_channel_expired(). This callback may sleep.
  * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
  *     aborted before it expires. This callback may sleep.
+ * @offchannel_tx: Transmit frame on another channel, wait for a response
+ *     and return. Reliable TX status must be reported for the frame. If the
+ *     return value is 1, then the @remain_on_channel will be used with a
+ *     regular transmission (if supported.)
+ * @offchannel_tx_cancel_wait: cancel wait associated with offchannel TX
  */
 struct ieee80211_ops {
-       int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
        int (*start)(struct ieee80211_hw *hw);
        void (*stop)(struct ieee80211_hw *hw);
        int (*add_interface)(struct ieee80211_hw *hw,
@@ -1825,7 +1862,8 @@ struct ieee80211_ops {
        int (*ampdu_action)(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                           u8 buf_size);
        int (*get_survey)(struct ieee80211_hw *hw, int idx,
                struct survey_info *survey);
        void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -1845,6 +1883,11 @@ struct ieee80211_ops {
                                 enum nl80211_channel_type channel_type,
                                 int duration);
        int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
+       int (*offchannel_tx)(struct ieee80211_hw *hw, struct sk_buff *skb,
+                            struct ieee80211_channel *chan,
+                            enum nl80211_channel_type channel_type,
+                            unsigned int wait);
+       int (*offchannel_tx_cancel_wait)(struct ieee80211_hw *hw);
 };
 
 /**
@@ -2113,6 +2156,48 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
        local_bh_enable();
 }
 
+/**
+ * ieee80211_sta_ps_transition - PS transition for connected sta
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS
+ * flag set, use this function to inform mac80211 about a connected station
+ * entering/leaving PS mode.
+ *
+ * This function may not be called in IRQ context or with softirqs enabled.
+ *
+ * Calls to this function for a single hardware must be synchronized against
+ * each other.
+ *
+ * The function returns -EINVAL when the requested PS mode is already set.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start);
+
+/**
+ * ieee80211_sta_ps_transition_ni - PS transition for connected sta
+ *                                  (in process context)
+ *
+ * Like ieee80211_sta_ps_transition() but can be called in process context
+ * (internally disables bottom halves). Concurrent call restriction still
+ * applies.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
+                                                 bool start)
+{
+       int ret;
+
+       local_bh_disable();
+       ret = ieee80211_sta_ps_transition(sta, start);
+       local_bh_enable();
+
+       return ret;
+}
+
 /*
  * The TX headroom reserved by mac80211 for its own tx_status functions.
  * This is enough for the radiotap header.
index 1bf812b21fb706a8c108c1d2faeb40986f552746..b3b4a34cb2cc9a40f23a6f61c5e8696e625c097f 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/netns/conntrack.h>
 #endif
 #include <net/netns/xfrm.h>
+#include <net/netns/ip_vs.h>
 
 struct proc_dir_entry;
 struct net_device;
@@ -94,6 +95,7 @@ struct net {
 #ifdef CONFIG_XFRM
        struct netns_xfrm       xfrm;
 #endif
+       struct netns_ipvs       *ipvs;
 };
 
 
index e82b7bab3ff355bfe85bb6a0f8504ff3a4ea54d7..22b239c17eaa4509cd24a19146dd5676ff5b989a 100644 (file)
@@ -21,7 +21,6 @@ struct netevent_redirect {
 
 enum netevent_notif_type {
        NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
-       NETEVENT_PMTU_UPDATE,      /* arg is struct dst_entry ptr */
        NETEVENT_REDIRECT,         /* arg is struct netevent_redirect ptr */
 };
 
index d85cff10e1693535f326b7467330b985734d2309..d0d13378991e54f46f2dc961329edd9ddc015671 100644 (file)
@@ -50,11 +50,24 @@ union nf_conntrack_expect_proto {
 /* per conntrack: application helper private data */
 union nf_conntrack_help {
        /* insert conntrack helper private data (master) here */
+#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
        struct nf_ct_ftp_master ct_ftp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
+    defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
        struct nf_ct_pptp_master ct_pptp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_H323) || \
+    defined(CONFIG_NF_CONNTRACK_H323_MODULE)
        struct nf_ct_h323_master ct_h323_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SANE) || \
+    defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
        struct nf_ct_sane_master ct_sane_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
        struct nf_ct_sip_master ct_sip_info;
+#endif
 };
 
 #include <linux/types.h>
@@ -116,14 +129,14 @@ struct nf_conn {
        u_int32_t secmark;
 #endif
 
-       /* Storage reserved for other modules: */
-       union nf_conntrack_proto proto;
-
        /* Extensions */
        struct nf_ct_ext *ext;
 #ifdef CONFIG_NET_NS
        struct net *ct_net;
 #endif
+
+       /* Storage reserved for other modules, must be the last member */
+       union nf_conntrack_proto proto;
 };
 
 static inline struct nf_conn *
@@ -189,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto);
  * Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
 
 extern struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
index 349cefedc9f3c869627211b95bbc57e5ecfde341..4283508b3e185882bff18fe267f205df4eda39cc 100644 (file)
@@ -23,12 +23,17 @@ struct nf_conntrack_ecache {
 static inline struct nf_conntrack_ecache *
 nf_ct_ecache_find(const struct nf_conn *ct)
 {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
        return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+       return NULL;
+#endif
 }
 
 static inline struct nf_conntrack_ecache *
 nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
 {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
@@ -45,6 +50,9 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
                e->expmask = expmask;
        }
        return e;
+#else
+       return NULL;
+#endif
 };
 
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -59,7 +67,7 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
+extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
 extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
 extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
 
@@ -156,7 +164,7 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
+extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
 extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
 extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
 
index 0772d296dfdb12b26a4bd8d97fecdd914c088536..2dcf31703acba7a18b9f167866390827054f9d98 100644 (file)
@@ -7,10 +7,19 @@
 
 enum nf_ct_ext_id {
        NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
        NF_CT_EXT_NAT,
+#endif
        NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
        NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
        NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       NF_CT_EXT_TSTAMP,
+#endif
        NF_CT_EXT_NUM,
 };
 
@@ -19,6 +28,7 @@ enum nf_ct_ext_id {
 #define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
 #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
 #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
 
 /* Extensions: optional stuff which isn't permanently in struct. */
 struct nf_ct_ext {
index 32c305dbdab6908758bef3df812477cee15531ee..f1c1311adc2cb6fba8d013cc641e995105cdf655 100644 (file)
@@ -63,4 +63,10 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
 extern int nf_conntrack_helper_init(void);
 extern void nf_conntrack_helper_fini(void);
 
+extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
+                                      unsigned int protoff,
+                                      struct nf_conn *ct,
+                                      enum ip_conntrack_info ctinfo,
+                                      unsigned int timeout);
+
 #endif /*_NF_CONNTRACK_HELPER_H*/
index a7547611e8f17389a2d95df481e4232315531df3..e8010f445ae1318f9a6f1e55d156313aa2af70a3 100644 (file)
@@ -73,7 +73,7 @@ struct nf_conntrack_l3proto {
        struct module *me;
 };
 
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
 
 /* Protocol registration. */
 extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644 (file)
index 0000000..fc9c82b
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+       u_int64_t start;
+       u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+       return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       struct net *net = nf_ct_net(ct);
+
+       if (!net->ct.sysctl_tstamp)
+               return NULL;
+
+       return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+       return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+       return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+       net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+extern int nf_conntrack_tstamp_init(struct net *net);
+extern void nf_conntrack_tstamp_fini(struct net *net);
+#else
+static inline int nf_conntrack_tstamp_init(struct net *net)
+{
+       return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(struct net *net)
+{
+       return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
index f5f09f032a90fa4fab8c849e9d22302542328398..aff80b190c1231a8eb5cae1b61b34007c833d870 100644 (file)
@@ -56,7 +56,9 @@ struct nf_nat_multi_range_compat {
 /* per conntrack: nat application helper private data */
 union nf_conntrack_nat_help {
        /* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
        struct nf_nat_pptp nat_pptp_info;
+#endif
 };
 
 struct nf_conn;
@@ -84,7 +86,11 @@ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
 
 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
 {
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
        return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+       return NULL;
+#endif
 }
 
 #else  /* !__KERNEL__: iptables wants this to compile. */
index 33602ab66190c20e8de7cb70d284bae9f9d92e59..3dc7b98effebd49e9079e25c458a479c2448f59a 100644 (file)
@@ -21,9 +21,9 @@ static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
 {
        if (manip == IP_NAT_MANIP_SRC)
-               return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+               return ct->status & IPS_SRC_NAT_DONE;
        else
-               return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+               return ct->status & IPS_DST_NAT_DONE;
 }
 
 struct nlattr;
index 373f1a900cf4784abf1f4e71791f501e222dfe3f..8a3906a08f5fc71c8696984b8361f7a11bfbaf04 100644 (file)
@@ -856,18 +856,27 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
 #define NLA_PUT_BE16(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, __be16, attrtype, value)
 
+#define NLA_PUT_NET16(skb, attrtype, value) \
+       NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_U32(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, u32, attrtype, value)
 
 #define NLA_PUT_BE32(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, __be32, attrtype, value)
 
+#define NLA_PUT_NET32(skb, attrtype, value) \
+       NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_U64(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, u64, attrtype, value)
 
 #define NLA_PUT_BE64(skb, attrtype, value) \
        NLA_PUT_TYPE(skb, __be64, attrtype, value)
 
+#define NLA_PUT_NET64(skb, attrtype, value) \
+       NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_STRING(skb, attrtype, value) \
        NLA_PUT(skb, attrtype, strlen(value) + 1, value)
 
index d4958d4c65748f1f0d31453e39be6a85d00d988f..341eb089349e18654b8beb18172c4ce6c168a9e7 100644 (file)
@@ -21,15 +21,15 @@ struct netns_ct {
        int                     sysctl_events;
        unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_acct;
+       int                     sysctl_tstamp;
        int                     sysctl_checksum;
        unsigned int            sysctl_log_invalid; /* Log invalid packets */
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_header;
        struct ctl_table_header *acct_sysctl_header;
+       struct ctl_table_header *tstamp_sysctl_header;
        struct ctl_table_header *event_sysctl_header;
 #endif
-       int                     hash_vmalloc;
-       int                     expect_vmalloc;
        char                    *slabname;
 };
 #endif
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
new file mode 100644 (file)
index 0000000..259ebac
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ *  IP Virtual Server
+ *  Data structure for network namspace
+ *
+ */
+
+#ifndef IP_VS_H_
+#define IP_VS_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/list_nulls.h>
+#include <linux/ip_vs.h>
+#include <asm/atomic.h>
+#include <linux/in.h>
+
+struct ip_vs_stats;
+struct ip_vs_sync_buff;
+struct ctl_table_header;
+
+struct netns_ipvs {
+       int                     gen;            /* Generation */
+       /*
+        *      Hash table: for real service lookups
+        */
+       #define IP_VS_RTAB_BITS 4
+       #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+       #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+       struct list_head        rs_table[IP_VS_RTAB_SIZE];
+       /* ip_vs_app */
+       struct list_head        app_list;
+       struct mutex            app_mutex;
+       struct lock_class_key   app_key;        /* mutex debuging */
+
+       /* ip_vs_proto */
+       #define IP_VS_PROTO_TAB_SIZE    32      /* must be power of 2 */
+       struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+       /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+       #define TCP_APP_TAB_BITS        4
+       #define TCP_APP_TAB_SIZE        (1 << TCP_APP_TAB_BITS)
+       #define TCP_APP_TAB_MASK        (TCP_APP_TAB_SIZE - 1)
+       struct list_head        tcp_apps[TCP_APP_TAB_SIZE];
+       spinlock_t              tcp_app_lock;
+#endif
+       /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+       #define UDP_APP_TAB_BITS        4
+       #define UDP_APP_TAB_SIZE        (1 << UDP_APP_TAB_BITS)
+       #define UDP_APP_TAB_MASK        (UDP_APP_TAB_SIZE - 1)
+       struct list_head        udp_apps[UDP_APP_TAB_SIZE];
+       spinlock_t              udp_app_lock;
+#endif
+       /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+       #define SCTP_APP_TAB_BITS       4
+       #define SCTP_APP_TAB_SIZE       (1 << SCTP_APP_TAB_BITS)
+       #define SCTP_APP_TAB_MASK       (SCTP_APP_TAB_SIZE - 1)
+       /* Hash table for SCTP application incarnations  */
+       struct list_head        sctp_apps[SCTP_APP_TAB_SIZE];
+       spinlock_t              sctp_app_lock;
+#endif
+       /* ip_vs_conn */
+       atomic_t                conn_count;      /*  connection counter */
+
+       /* ip_vs_ctl */
+       struct ip_vs_stats              *tot_stats;  /* Statistics & est. */
+       struct ip_vs_cpu_stats __percpu *cpustats;   /* Stats per cpu */
+       seqcount_t                      *ustats_seq; /* u64 read retry */
+
+       int                     num_services;    /* no of virtual services */
+       /* 1/rate drop and drop-entry variables */
+       struct delayed_work     defense_work;   /* Work handler */
+       int                     drop_rate;
+       int                     drop_counter;
+       atomic_t                dropentry;
+       /* locks in ctl.c */
+       spinlock_t              dropentry_lock;  /* drop entry handling */
+       spinlock_t              droppacket_lock; /* drop packet handling */
+       spinlock_t              securetcp_lock;  /* state and timeout tables */
+       rwlock_t                rs_lock;         /* real services table */
+       /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+       struct lock_class_key   ctl_key;        /* ctl_mutex debuging */
+       /* Trash for destinations */
+       struct list_head        dest_trash;
+       /* Service counters */
+       atomic_t                ftpsvc_counter;
+       atomic_t                nullsvc_counter;
+
+       /* sys-ctl struct */
+       struct ctl_table_header *sysctl_hdr;
+       struct ctl_table        *sysctl_tbl;
+       /* sysctl variables */
+       int                     sysctl_amemthresh;
+       int                     sysctl_am_droprate;
+       int                     sysctl_drop_entry;
+       int                     sysctl_drop_packet;
+       int                     sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+       int                     sysctl_conntrack;
+#endif
+       int                     sysctl_snat_reroute;
+       int                     sysctl_sync_ver;
+       int                     sysctl_cache_bypass;
+       int                     sysctl_expire_nodest_conn;
+       int                     sysctl_expire_quiescent_template;
+       int                     sysctl_sync_threshold[2];
+       int                     sysctl_nat_icmp_send;
+
+       /* ip_vs_lblc */
+       int                     sysctl_lblc_expiration;
+       struct ctl_table_header *lblc_ctl_header;
+       struct ctl_table        *lblc_ctl_table;
+       /* ip_vs_lblcr */
+       int                     sysctl_lblcr_expiration;
+       struct ctl_table_header *lblcr_ctl_header;
+       struct ctl_table        *lblcr_ctl_table;
+       /* ip_vs_est */
+       struct list_head        est_list;       /* estimator list */
+       spinlock_t              est_lock;
+       struct timer_list       est_timer;      /* Estimation timer */
+       /* ip_vs_sync */
+       struct list_head        sync_queue;
+       spinlock_t              sync_lock;
+       struct ip_vs_sync_buff  *sync_buff;
+       spinlock_t              sync_buff_lock;
+       struct sockaddr_in      sync_mcast_addr;
+       struct task_struct      *master_thread;
+       struct task_struct      *backup_thread;
+       int                     send_mesg_maxlen;
+       int                     recv_mesg_maxlen;
+       volatile int            sync_state;
+       volatile int            master_syncid;
+       volatile int            backup_syncid;
+       /* multicast interface name */
+       char                    master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+       char                    backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+       /* net name space ptr */
+       struct net              *net;            /* Needed by timer routines */
+};
+
+#endif /* IP_VS_H_ */
index d68c3f12177484f7150cec7a8dd0ae7e91538d7d..e2e2ef57eca2c36233152df7d1eae1ee4ab569c2 100644 (file)
@@ -43,7 +43,6 @@ struct netns_ipv4 {
        struct xt_table         *nat_table;
        struct hlist_head       *nat_bysource;
        unsigned int            nat_htable_size;
-       int                     nat_vmalloced;
 #endif
 
        int sysctl_icmp_echo_ignore_all;
index b60b28c99e8704a3d0a77c1ec1b3095ac7180053..b669fe6dbc3bad2a6d61b7a2b9d54ff83b7e51c8 100644 (file)
@@ -28,7 +28,6 @@ struct pep_sock {
 
        /* XXX: union-ify listening vs connected stuff ? */
        /* Listening socket stuff: */
-       struct hlist_head       ackq;
        struct hlist_head       hlist;
 
        /* Connected socket stuff: */
@@ -45,10 +44,6 @@ struct pep_sock {
        u8                      tx_fc;  /* TX flow control */
        u8                      init_enable;    /* auto-enable at creation */
        u8                      aligned;
-#ifdef CONFIG_PHONET_PIPECTRLR
-       u8                      pipe_state;
-       struct sockaddr_pn      remote_pep;
-#endif
 };
 
 static inline struct pep_sock *pep_sk(struct sock *sk)
@@ -158,6 +153,7 @@ enum {
        PN_LEGACY_FLOW_CONTROL,
        PN_ONE_CREDIT_FLOW_CONTROL,
        PN_MULTI_CREDIT_FLOW_CONTROL,
+       PN_MAX_FLOW_CONTROL,
 };
 
 #define pn_flow_safe(fc) ((fc) >> 1)
@@ -169,21 +165,4 @@ enum {
        PEP_IND_READY,
 };
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-#define PNS_PEP_CONNECT_UTID           0x02
-#define PNS_PIPE_CREATED_IND_UTID      0x04
-#define PNS_PIPE_ENABLE_UTID           0x0A
-#define PNS_PIPE_ENABLED_IND_UTID      0x0C
-#define PNS_PIPE_DISABLE_UTID          0x0F
-#define PNS_PIPE_DISABLED_IND_UTID     0x11
-#define PNS_PEP_DISCONNECT_UTID        0x06
-
-/* Used for tracking state of a pipe */
-enum {
-       PIPE_IDLE,
-       PIPE_DISABLED,
-       PIPE_ENABLED,
-};
-#endif /* CONFIG_PHONET_PIPECTRLR */
-
 #endif
index 5395e09187df46f7d7e7e2403a0c6abeab418c4f..68e509750caa4cb60d54b48d62cb23cf109dd17d 100644 (file)
@@ -36,6 +36,7 @@
 struct pn_sock {
        struct sock     sk;
        u16             sobject;
+       u16             dobject;
        u8              resource;
 };
 
index dc07495bce4cb0dc7807bdaa390d62881fc7d614..6f7eb800974af3fae4c12a7d054d8e61d9e2cf9f 100644 (file)
@@ -38,7 +38,7 @@ struct net_protocol {
        void                    (*err_handler)(struct sk_buff *skb, u32 info);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff         *(*gso_segment)(struct sk_buff *skb,
-                                              int features);
+                                              u32 features);
        struct sk_buff        **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb);
@@ -57,7 +57,7 @@ struct inet6_protocol {
 
        int     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-                                      int features);
+                                      u32 features);
        struct sk_buff **(*gro_receive)(struct sk_buff **head,
                                        struct sk_buff *skb);
        int     (*gro_complete)(struct sk_buff *skb);
index 93e10c453f6b8a08cc12dd3a70cb86d6473a4b19..9257f5f17337d1f3f5e208ec21d38dda0500e600 100644 (file)
 
 struct fib_nh;
 struct inet_peer;
+struct fib_info;
 struct rtable {
        struct dst_entry        dst;
 
-       /* Cache lookup keys */
-       struct flowi            fl;
+       /* Lookup key. */
+       __be32                  rt_key_dst;
+       __be32                  rt_key_src;
 
        int                     rt_genid;
        unsigned                rt_flags;
        __u16                   rt_type;
+       __u8                    rt_tos;
 
        __be32                  rt_dst; /* Path destination     */
        __be32                  rt_src; /* Path source          */
        int                     rt_iif;
+       int                     rt_oif;
+       __u32                   rt_mark;
 
        /* Info on neighbour */
        __be32                  rt_gateway;
 
        /* Miscellaneous cached information */
        __be32                  rt_spec_dst; /* RFC1122 specific destination */
+       u32                     rt_peer_genid;
        struct inet_peer        *peer; /* long-living peer info */
+       struct fib_info         *fi; /* for client ref to shared metrics */
 };
 
 static inline bool rt_is_input_route(struct rtable *rt)
 {
-       return rt->fl.iif != 0;
+       return rt->rt_iif != 0;
 }
 
 static inline bool rt_is_output_route(struct rtable *rt)
 {
-       return rt->fl.iif == 0;
+       return rt->rt_iif == 0;
 }
 
 struct ip_rt_acct {
@@ -115,9 +122,15 @@ extern void                ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw,
                                       __be32 src, struct net_device *dev);
 extern void            rt_cache_flush(struct net *net, int how);
 extern void            rt_cache_flush_batch(struct net *net);
-extern int             __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp);
-extern int             ip_route_output_key(struct net *, struct rtable **, struct flowi *flp);
-extern int             ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
+extern struct rtable *__ip_route_output_key(struct net *, const struct flowi *flp);
+extern struct rtable *ip_route_output_flow(struct net *, struct flowi *flp,
+                                          struct sock *sk);
+extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
+
+static inline struct rtable *ip_route_output_key(struct net *net, struct flowi *flp)
+{
+       return ip_route_output_flow(net, flp, NULL);
+}
 
 extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
                                 u8 tos, struct net_device *devin, bool noref);
@@ -162,10 +175,10 @@ static inline char rt_tos2priority(u8 tos)
        return ip_tos2prio[IPTOS_TOS(tos)>>1];
 }
 
-static inline int ip_route_connect(struct rtable **rp, __be32 dst,
-                                  __be32 src, u32 tos, int oif, u8 protocol,
-                                  __be16 sport, __be16 dport, struct sock *sk,
-                                  int flags)
+static inline struct rtable *ip_route_connect(__be32 dst, __be32 src, u32 tos,
+                                             int oif, u8 protocol,
+                                             __be16 sport, __be16 dport,
+                                             struct sock *sk, bool can_sleep)
 {
        struct flowi fl = { .oif = oif,
                            .mark = sk->sk_mark,
@@ -175,44 +188,52 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
                            .proto = protocol,
                            .fl_ip_sport = sport,
                            .fl_ip_dport = dport };
-       int err;
        struct net *net = sock_net(sk);
+       struct rtable *rt;
 
        if (inet_sk(sk)->transparent)
                fl.flags |= FLOWI_FLAG_ANYSRC;
+       if (protocol == IPPROTO_TCP)
+               fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
+       if (can_sleep)
+               fl.flags |= FLOWI_FLAG_CAN_SLEEP;
 
        if (!dst || !src) {
-               err = __ip_route_output_key(net, rp, &fl);
-               if (err)
-                       return err;
-               fl.fl4_dst = (*rp)->rt_dst;
-               fl.fl4_src = (*rp)->rt_src;
-               ip_rt_put(*rp);
-               *rp = NULL;
+               rt = __ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
+                       return rt;
+               fl.fl4_dst = rt->rt_dst;
+               fl.fl4_src = rt->rt_src;
+               ip_rt_put(rt);
        }
        security_sk_classify_flow(sk, &fl);
-       return ip_route_output_flow(net, rp, &fl, sk, flags);
+       return ip_route_output_flow(net, &fl, sk);
 }
 
-static inline int ip_route_newports(struct rtable **rp, u8 protocol,
-                                   __be16 sport, __be16 dport, struct sock *sk)
+static inline struct rtable *ip_route_newports(struct rtable *rt,
+                                              u8 protocol, __be16 orig_sport,
+                                              __be16 orig_dport, __be16 sport,
+                                              __be16 dport, struct sock *sk)
 {
-       if (sport != (*rp)->fl.fl_ip_sport ||
-           dport != (*rp)->fl.fl_ip_dport) {
-               struct flowi fl;
-
-               memcpy(&fl, &(*rp)->fl, sizeof(fl));
-               fl.fl_ip_sport = sport;
-               fl.fl_ip_dport = dport;
-               fl.proto = protocol;
+       if (sport != orig_sport || dport != orig_dport) {
+               struct flowi fl = { .oif = rt->rt_oif,
+                                   .mark = rt->rt_mark,
+                                   .fl4_dst = rt->rt_key_dst,
+                                   .fl4_src = rt->rt_key_src,
+                                   .fl4_tos = rt->rt_tos,
+                                   .proto = protocol,
+                                   .fl_ip_sport = sport,
+                                   .fl_ip_dport = dport };
+
                if (inet_sk(sk)->transparent)
                        fl.flags |= FLOWI_FLAG_ANYSRC;
-               ip_rt_put(*rp);
-               *rp = NULL;
+               if (protocol == IPPROTO_TCP)
+                       fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
+               ip_rt_put(rt);
                security_sk_classify_flow(sk, &fl);
-               return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0);
+               return ip_route_output_flow(sock_net(sk), &fl, sk);
        }
-       return 0;
+       return rt;
 }
 
 extern void rt_bind_peer(struct rtable *rt, int create);
index 04f8556313d5f52c37ff9722ab7c1f38a2f955af..a9505b6a18e3365249cf663363bac9e29a257266 100644 (file)
@@ -31,10 +31,12 @@ enum qdisc_state_t {
  * following bits are only changed while qdisc lock is held
  */
 enum qdisc___state_t {
-       __QDISC___STATE_RUNNING,
+       __QDISC___STATE_RUNNING = 1,
+       __QDISC___STATE_THROTTLED = 2,
 };
 
 struct qdisc_size_table {
+       struct rcu_head         rcu;
        struct list_head        list;
        struct tc_sizespec      szopts;
        int                     refcnt;
@@ -46,14 +48,13 @@ struct Qdisc {
        struct sk_buff *        (*dequeue)(struct Qdisc *dev);
        unsigned                flags;
 #define TCQ_F_BUILTIN          1
-#define TCQ_F_THROTTLED                2
-#define TCQ_F_INGRESS          4
-#define TCQ_F_CAN_BYPASS       8
-#define TCQ_F_MQROOT           16
+#define TCQ_F_INGRESS          2
+#define TCQ_F_CAN_BYPASS       4
+#define TCQ_F_MQROOT           8
 #define TCQ_F_WARN_NONWC       (1 << 16)
        int                     padded;
        struct Qdisc_ops        *ops;
-       struct qdisc_size_table *stab;
+       struct qdisc_size_table __rcu *stab;
        struct list_head        list;
        u32                     handle;
        u32                     parent;
@@ -78,25 +79,44 @@ struct Qdisc {
        unsigned long           state;
        struct sk_buff_head     q;
        struct gnet_stats_basic_packed bstats;
-       unsigned long           __state;
+       unsigned int            __state;
        struct gnet_stats_queue qstats;
        struct rcu_head         rcu_head;
        spinlock_t              busylock;
+       u32                     limit;
 };
 
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
 {
-       return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+       return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
 }
 
 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 {
-       return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+       if (qdisc_is_running(qdisc))
+               return false;
+       qdisc->__state |= __QDISC___STATE_RUNNING;
+       return true;
 }
 
 static inline void qdisc_run_end(struct Qdisc *qdisc)
 {
-       __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+       qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+       return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+       qdisc->__state |= __QDISC___STATE_THROTTLED;
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+       qdisc->__state &= ~__QDISC___STATE_THROTTLED;
 }
 
 struct Qdisc_class_ops {
@@ -331,8 +351,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                                 struct Qdisc_ops *ops);
 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                       struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
-                                  struct qdisc_size_table *stab);
+extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+                                     const struct qdisc_size_table *stab);
 extern void tcf_destroy(struct tcf_proto *tp);
 extern void tcf_destroy_chain(struct tcf_proto **fl);
 
@@ -411,12 +431,20 @@ enum net_xmit_qdisc_t {
 #define net_xmit_drop_count(e) (1)
 #endif
 
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+                                          const struct Qdisc *sch)
 {
 #ifdef CONFIG_NET_SCHED
-       if (sch->stab)
-               qdisc_calculate_pkt_len(skb, sch->stab);
+       struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+       if (stab)
+               __qdisc_calculate_pkt_len(skb, stab);
 #endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+       qdisc_calculate_pkt_len(skb, sch);
        return sch->enqueue(skb, sch);
 }
 
index bc1cf7d88ccb922bd0093f6284cd3f03fbe00336..da0534d3401cdee0ed17ce84150365b73c3c9327 100644 (file)
@@ -281,7 +281,7 @@ struct sock {
        int                     sk_rcvbuf;
 
        struct sk_filter __rcu  *sk_filter;
-       struct socket_wq        *sk_wq;
+       struct socket_wq __rcu  *sk_wq;
 
 #ifdef CONFIG_NET_DMA
        struct sk_buff_head     sk_async_wait_queue;
@@ -1191,7 +1191,7 @@ extern void sk_filter_release_rcu(struct rcu_head *rcu);
 static inline void sk_filter_release(struct sk_filter *fp)
 {
        if (atomic_dec_and_test(&fp->refcnt))
-               call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
+               call_rcu(&fp->rcu, sk_filter_release_rcu);
 }
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
@@ -1266,7 +1266,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 
 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
 {
-       return &sk->sk_wq->wait;
+       BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+       return &rcu_dereference_raw(sk->sk_wq)->wait;
 }
 /* Detach socket from process context.
  * Announce socket dead, detach it from wait queue and inode.
@@ -1287,7 +1288,7 @@ static inline void sock_orphan(struct sock *sk)
 static inline void sock_graft(struct sock *sk, struct socket *parent)
 {
        write_lock_bh(&sk->sk_callback_lock);
-       rcu_assign_pointer(sk->sk_wq, parent->wq);
+       sk->sk_wq = parent->wq;
        parent->sk = sk;
        sk_set_socket(sk, parent);
        security_sock_graft(sk, parent);
index 38509f047382c35b7b5d06211f3feaa0820eacb4..cda30ea354a214072b634ee9c2fa9b7ff23cc216 100644 (file)
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 /* TCP thin-stream limits */
 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 
+/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+#define TCP_INIT_CWND          10
+
 extern struct inet_timewait_death_row tcp_death_row;
 
 /* sysctl variables for tcp */
@@ -799,15 +802,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)        WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
-       return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
 
@@ -1074,8 +1068,6 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
        return 1;
 }
 
-#define TCP_CHECK_TIMER(sk) do { } while (0)
-
 static inline void tcp_mib_init(struct net *net)
 {
        /* See RFC 2012 */
@@ -1404,7 +1396,7 @@ extern struct request_sock_ops tcp6_request_sock_ops;
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
                                        struct sk_buff *skb);
 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
index bb967dd59bf74fa5abac83964486fabc4398cabd..67ea6fcb3ec063165bf0e4fe69a33834b18a8e9f 100644 (file)
@@ -144,6 +144,17 @@ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
        return csum;
 }
 
+static inline __wsum udp_csum(struct sk_buff *skb)
+{
+       __wsum csum = csum_partial(skb_transport_header(skb),
+                                  sizeof(struct udphdr), skb->csum);
+
+       for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+               csum = csum_add(csum, skb->csum);
+       }
+       return csum;
+}
+
 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
 static inline void udp_lib_hash(struct sock *sk)
 {
@@ -245,5 +256,5 @@ extern void udp4_proc_exit(void);
 extern void udp_init(void);
 
 extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
 #endif /* _UDP_H */
index afdffe607b2425fd66a2a1c8ae30b3bca1eb1494..673a024c6b2ade7b0b1d60cbf580413e0a0ba67f 100644 (file)
@@ -115,6 +115,18 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
        return csum;
 }
 
+static inline __wsum udplite_csum(struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+       const int off = skb_transport_offset(skb);
+       const int len = skb->len - off;
+
+       skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
+
+       return skb_checksum(skb, off, min(cscov, len), 0);
+}
+
 extern void    udplite4_register(void);
 extern int     udplite_get_port(struct sock *sk, unsigned short snum,
                        int (*scmp)(const struct sock *, const struct sock *));
index b9f385da758ede5c9286c8801b5bbe28d4f2c128..d5dcf3974636c4417cfd66566bad2fc5762ce7ad 100644 (file)
@@ -36,6 +36,7 @@
 #define XFRM_PROTO_ROUTING     IPPROTO_ROUTING
 #define XFRM_PROTO_DSTOPTS     IPPROTO_DSTOPTS
 
+#define XFRM_ALIGN4(len)       (((len) + 3) & ~3)
 #define XFRM_ALIGN8(len)       (((len) + 7) & ~7)
 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
        MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
@@ -266,25 +267,26 @@ struct xfrm_policy_afinfo {
        struct dst_ops          *dst_ops;
        void                    (*garbage_collect)(struct net *net);
        struct dst_entry        *(*dst_lookup)(struct net *net, int tos,
-                                              xfrm_address_t *saddr,
-                                              xfrm_address_t *daddr);
+                                              const xfrm_address_t *saddr,
+                                              const xfrm_address_t *daddr);
        int                     (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
        void                    (*decode_session)(struct sk_buff *skb,
                                                  struct flowi *fl,
                                                  int reverse);
-       int                     (*get_tos)(struct flowi *fl);
+       int                     (*get_tos)(const struct flowi *fl);
        int                     (*init_path)(struct xfrm_dst *path,
                                             struct dst_entry *dst,
                                             int nfheader_len);
        int                     (*fill_dst)(struct xfrm_dst *xdst,
                                            struct net_device *dev,
-                                           struct flowi *fl);
+                                           const struct flowi *fl);
+       struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
 };
 
 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
+extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
+extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
@@ -299,9 +301,12 @@ struct xfrm_state_afinfo {
        const struct xfrm_type  *type_map[IPPROTO_MAX];
        struct xfrm_mode        *mode_map[XFRM_MODE_MAX];
        int                     (*init_flags)(struct xfrm_state *x);
-       void                    (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
-       void                    (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
-                                               xfrm_address_t *daddr, xfrm_address_t *saddr);
+       void                    (*init_tempsel)(struct xfrm_selector *sel,
+                                               const struct flowi *fl);
+       void                    (*init_temprop)(struct xfrm_state *x,
+                                               const struct xfrm_tmpl *tmpl,
+                                               const xfrm_address_t *daddr,
+                                               const xfrm_address_t *saddr);
        int                     (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
        int                     (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
        int                     (*output)(struct sk_buff *skb);
@@ -332,7 +337,8 @@ struct xfrm_type {
        void                    (*destructor)(struct xfrm_state *);
        int                     (*input)(struct xfrm_state *, struct sk_buff *skb);
        int                     (*output)(struct xfrm_state *, struct sk_buff *pskb);
-       int                     (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
+       int                     (*reject)(struct xfrm_state *, struct sk_buff *,
+                                         const struct flowi *);
        int                     (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
        /* Estimate maximal size of result of transformation of a dgram */
        u32                     (*get_mtu)(struct xfrm_state *, int size);
@@ -501,7 +507,7 @@ struct xfrm_policy {
        struct xfrm_tmpl        xfrm_vec[XFRM_MAX_DEPTH];
 };
 
-static inline struct net *xp_net(struct xfrm_policy *xp)
+static inline struct net *xp_net(const struct xfrm_policy *xp)
 {
        return read_pnet(&xp->xp_net);
 }
@@ -545,13 +551,17 @@ struct xfrm_migrate {
 struct xfrm_mgr {
        struct list_head        list;
        char                    *id;
-       int                     (*notify)(struct xfrm_state *x, struct km_event *c);
+       int                     (*notify)(struct xfrm_state *x, const struct km_event *c);
        int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
        struct xfrm_policy      *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
        int                     (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-       int                     (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
+       int                     (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
        int                     (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
-       int                     (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
+       int                     (*migrate)(const struct xfrm_selector *sel,
+                                          u8 dir, u8 type,
+                                          const struct xfrm_migrate *m,
+                                          int num_bundles,
+                                          const struct xfrm_kmaddress *k);
 };
 
 extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -762,10 +772,11 @@ static inline void xfrm_state_hold(struct xfrm_state *x)
        atomic_inc(&x->refcnt);
 }
 
-static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
+static inline bool addr_match(const void *token1, const void *token2,
+                             int prefixlen)
 {
-       __be32 *a1 = token1;
-       __be32 *a2 = token2;
+       const __be32 *a1 = token1;
+       const __be32 *a2 = token2;
        int pdw;
        int pbi;
 
@@ -774,7 +785,7 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
 
        if (pdw)
                if (memcmp(a1, a2, pdw << 2))
-                       return 0;
+                       return false;
 
        if (pbi) {
                __be32 mask;
@@ -782,14 +793,14 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
                mask = htonl((0xffffffff) << (32 - pbi));
 
                if ((a1[pdw] ^ a2[pdw]) & mask)
-                       return 0;
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
 static __inline__
-__be16 xfrm_flowi_sport(struct flowi *fl)
+__be16 xfrm_flowi_sport(const struct flowi *fl)
 {
        __be16 port;
        switch(fl->proto) {
@@ -816,7 +827,7 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
 }
 
 static __inline__
-__be16 xfrm_flowi_dport(struct flowi *fl)
+__be16 xfrm_flowi_dport(const struct flowi *fl)
 {
        __be16 port;
        switch(fl->proto) {
@@ -839,7 +850,8 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
        return port;
 }
 
-extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+extern int xfrm_selector_match(const struct xfrm_selector *sel,
+                              const struct flowi *fl,
                               unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -947,7 +959,7 @@ secpath_reset(struct sk_buff *skb)
 }
 
 static inline int
-xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
+xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -959,21 +971,21 @@ xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
 }
 
 static inline int
-__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
 {
        return  (tmpl->saddr.a4 &&
                 tmpl->saddr.a4 != x->props.saddr.a4);
 }
 
 static inline int
-__xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
+__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
 {
        return  (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
                 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
 }
 
 static inline int
-xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
+xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -1126,7 +1138,7 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
 #endif
 
 static __inline__
-xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
 {
        switch (family){
        case AF_INET:
@@ -1138,7 +1150,7 @@ xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
 }
 
 static __inline__
-xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
+xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
 {
        switch (family){
        case AF_INET:
@@ -1150,7 +1162,7 @@ xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
 }
 
 static __inline__
-void xfrm_flowi_addr_get(struct flowi *fl,
+void xfrm_flowi_addr_get(const struct flowi *fl,
                         xfrm_address_t *saddr, xfrm_address_t *daddr,
                         unsigned short family)
 {
@@ -1167,8 +1179,8 @@ void xfrm_flowi_addr_get(struct flowi *fl,
 }
 
 static __inline__ int
-__xfrm4_state_addr_check(struct xfrm_state *x,
-                        xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_state_addr_check(const struct xfrm_state *x,
+                        const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        if (daddr->a4 == x->id.daddr.a4 &&
            (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
@@ -1177,8 +1189,8 @@ __xfrm4_state_addr_check(struct xfrm_state *x,
 }
 
 static __inline__ int
-__xfrm6_state_addr_check(struct xfrm_state *x,
-                        xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_state_addr_check(const struct xfrm_state *x,
+                        const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
            (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)|| 
@@ -1189,8 +1201,8 @@ __xfrm6_state_addr_check(struct xfrm_state *x,
 }
 
 static __inline__ int
-xfrm_state_addr_check(struct xfrm_state *x,
-                     xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_state_addr_check(const struct xfrm_state *x,
+                     const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                      unsigned short family)
 {
        switch (family) {
@@ -1203,23 +1215,23 @@ xfrm_state_addr_check(struct xfrm_state *x,
 }
 
 static __inline__ int
-xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
+xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
                           unsigned short family)
 {
        switch (family) {
        case AF_INET:
                return __xfrm4_state_addr_check(x,
-                                               (xfrm_address_t *)&fl->fl4_dst,
-                                               (xfrm_address_t *)&fl->fl4_src);
+                                               (const xfrm_address_t *)&fl->fl4_dst,
+                                               (const xfrm_address_t *)&fl->fl4_src);
        case AF_INET6:
                return __xfrm6_state_addr_check(x,
-                                               (xfrm_address_t *)&fl->fl6_dst,
-                                               (xfrm_address_t *)&fl->fl6_src);
+                                               (const xfrm_address_t *)&fl->fl6_dst,
+                                               (const xfrm_address_t *)&fl->fl6_src);
        }
        return 0;
 }
 
-static inline int xfrm_state_kern(struct xfrm_state *x)
+static inline int xfrm_state_kern(const struct xfrm_state *x)
 {
        return atomic_read(&x->tunnel_users);
 }
@@ -1323,8 +1335,10 @@ extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                           int (*func)(struct xfrm_state *, int, void*), void *);
 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
 extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 
-                                         struct flowi *fl, struct xfrm_tmpl *tmpl,
+extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+                                         const xfrm_address_t *saddr,
+                                         const struct flowi *fl,
+                                         struct xfrm_tmpl *tmpl,
                                          struct xfrm_policy *pol, int *err,
                                          unsigned short family);
 extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
@@ -1337,11 +1351,11 @@ extern void xfrm_state_insert(struct xfrm_state *x);
 extern int xfrm_state_add(struct xfrm_state *x);
 extern int xfrm_state_update(struct xfrm_state *x);
 extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
-                                           xfrm_address_t *daddr, __be32 spi,
+                                           const xfrm_address_t *daddr, __be32 spi,
                                            u8 proto, unsigned short family);
 extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-                                                  xfrm_address_t *daddr,
-                                                  xfrm_address_t *saddr,
+                                                  const xfrm_address_t *daddr,
+                                                  const xfrm_address_t *saddr,
                                                   u8 proto,
                                                   unsigned short family);
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1468,19 +1482,19 @@ u32 xfrm_get_acqseq(void);
 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
 struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
                                 u8 mode, u32 reqid, u8 proto,
-                                xfrm_address_t *daddr,
-                                xfrm_address_t *saddr, int create,
+                                const xfrm_address_t *daddr,
+                                const xfrm_address_t *saddr, int create,
                                 unsigned short family);
 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 
 #ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                     struct xfrm_migrate *m, int num_bundles,
-                     struct xfrm_kmaddress *k);
+extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                     const struct xfrm_migrate *m, int num_bundles,
+                     const struct xfrm_kmaddress *k);
 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
                                              struct xfrm_migrate *m);
-extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
                        struct xfrm_migrate *m, int num_bundles,
                        struct xfrm_kmaddress *k);
 #endif
@@ -1500,10 +1514,10 @@ extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
+extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
                                                   int probe);
 
 struct hash_desc;
@@ -1511,7 +1525,8 @@ struct scatterlist;
 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
                              unsigned int);
 
-static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
+static inline int xfrm_addr_cmp(const xfrm_address_t *a,
+                               const xfrm_address_t *b,
                                int family)
 {
        switch (family) {
@@ -1544,12 +1559,12 @@ static inline int xfrm_aevent_is_on(struct net *net)
 }
 #endif
 
-static inline int xfrm_alg_len(struct xfrm_algo *alg)
+static inline int xfrm_alg_len(const struct xfrm_algo *alg)
 {
        return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
 }
 
-static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
+static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
 {
        return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
 }
@@ -1597,7 +1612,7 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
        return m->v & m->m;
 }
 
-static inline int xfrm_mark_put(struct sk_buff *skb, struct xfrm_mark *m)
+static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
 {
        if (m->m | m->v)
                NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
index e4956244ae505e5fcd43df061395b3437a7e21b0..93950031706678daf5597362c63f570ae0d40d3f 100644 (file)
@@ -74,6 +74,8 @@ static int    audit_initialized;
 int            audit_enabled;
 int            audit_ever_enabled;
 
+EXPORT_SYMBOL_GPL(audit_enabled);
+
 /* Default state when kernel boots without any parameters. */
 static int     audit_default;
 
@@ -671,9 +673,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        pid  = NETLINK_CREDS(skb)->pid;
        uid  = NETLINK_CREDS(skb)->uid;
-       loginuid = NETLINK_CB(skb).loginuid;
-       sessionid = NETLINK_CB(skb).sessionid;
-       sid  = NETLINK_CB(skb).sid;
+       loginuid = audit_get_loginuid(current);
+       sessionid = audit_get_sessionid(current);
+       security_task_getsecid(current, &sid);
        seq  = nlh->nlmsg_seq;
        data = NLMSG_DATA(nlh);
 
index add2819af71bb2a050907a49c2d228fbf0c1d00d..f8277c80d678bfeaefb74ad02805b0a4a2cebaa2 100644 (file)
@@ -1238,6 +1238,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
        for (i = 0; i < rule->field_count; i++) {
                struct audit_field *f = &rule->fields[i];
                int result = 0;
+               u32 sid;
 
                switch (f->type) {
                case AUDIT_PID:
@@ -1250,19 +1251,22 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
                        result = audit_comparator(cb->creds.gid, f->op, f->val);
                        break;
                case AUDIT_LOGINUID:
-                       result = audit_comparator(cb->loginuid, f->op, f->val);
+                       result = audit_comparator(audit_get_loginuid(current),
+                                                 f->op, f->val);
                        break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
                case AUDIT_SUBJ_SEN:
                case AUDIT_SUBJ_CLR:
-                       if (f->lsm_rule)
-                               result = security_audit_rule_match(cb->sid,
+                       if (f->lsm_rule) {
+                               security_task_getsecid(current, &sid);
+                               result = security_audit_rule_match(sid,
                                                                   f->type,
                                                                   f->op,
                                                                   f->lsm_rule,
                                                                   NULL);
+                       }
                        break;
                }
 
index 9033c1c70828c9a9eed27b2f9177f1c26b60f638..094fafe86c964d062781fb82dd596b53984ddac7 100644 (file)
@@ -134,6 +134,10 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
                irq_set_thread_affinity(desc);
        }
 #endif
+       if (desc->affinity_notify) {
+               kref_get(&desc->affinity_notify->kref);
+               schedule_work(&desc->affinity_notify->work);
+       }
        desc->status |= IRQ_AFFINITY_SET;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return 0;
@@ -155,6 +159,79 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 
+static void irq_affinity_notify(struct work_struct *work)
+{
+       struct irq_affinity_notify *notify =
+               container_of(work, struct irq_affinity_notify, work);
+       struct irq_desc *desc = irq_to_desc(notify->irq);
+       cpumask_var_t cpumask;
+       unsigned long flags;
+
+       if (!desc)
+               goto out;
+
+       if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+               goto out;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (desc->status & IRQ_MOVE_PENDING)
+               cpumask_copy(cpumask, desc->pending_mask);
+       else
+#endif
+               cpumask_copy(cpumask, desc->affinity);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       notify->notify(notify, cpumask);
+
+       free_cpumask_var(cpumask);
+out:
+       kref_put(&notify->kref, notify->release);
+}
+
+/**
+ *     irq_set_affinity_notifier - control notification of IRQ affinity changes
+ *     @irq:           Interrupt for which to enable/disable notification
+ *     @notify:        Context for notification, or %NULL to disable
+ *                     notification.  Function pointers must be initialised;
+ *                     the other fields will be initialised by this function.
+ *
+ *     Must be called in process context.  Notification may only be enabled
+ *     after the IRQ is allocated and must be disabled before the IRQ is
+ *     freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_affinity_notify *old_notify;
+       unsigned long flags;
+
+       /* The release function is promised process context */
+       might_sleep();
+
+       if (!desc)
+               return -EINVAL;
+
+       /* Complete initialisation of *notify */
+       if (notify) {
+               notify->irq = irq;
+               kref_init(&notify->kref);
+               INIT_WORK(&notify->work, irq_affinity_notify);
+       }
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       old_notify = desc->affinity_notify;
+       desc->affinity_notify = notify;
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+       if (old_notify)
+               kref_put(&old_notify->kref, old_notify->release);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
+
 #ifndef CONFIG_AUTO_IRQ_AFFINITY
 /*
  * Generic version of the affinity autoselector.
@@ -1004,6 +1081,11 @@ void free_irq(unsigned int irq, void *dev_id)
        if (!desc)
                return;
 
+#ifdef CONFIG_SMP
+       if (WARN_ON(desc->affinity_notify))
+               desc->affinity_notify = NULL;
+#endif
+
        chip_bus_lock(desc);
        kfree(__free_irq(irq, dev_id));
        chip_bus_sync_unlock(desc);
index 0ee67e08ad3ec2486e4d2c7d11681bee406d61e3..8334342e0d0576434f2b906669e034023a4ef601 100644 (file)
@@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
        bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
        depends on EXPERIMENTAL && BROKEN
 
+config CPU_RMAP
+       bool
+       depends on SMP
+
 #
 # Netlink attribute parsing support is select'ed if needed
 #
index cbb774f7d41d09846732d9f674328e0c9cd05086..b73ba01a818a03cab67ac6fddcd1089145fcf470 100644 (file)
@@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
 
 obj-$(CONFIG_AVERAGE) += average.o
 
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
 
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644 (file)
index 0000000..987acfa
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities.  This can be seen as a reverse-map of
+ * CPU affinity.  However, we do not assume that the object affinities
+ * cover all CPUs in the system.  For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+       struct cpu_rmap *rmap;
+       unsigned int cpu;
+       size_t obj_offset;
+
+       /* This is a silly number of objects, and we use u16 indices. */
+       if (size > 0xffff)
+               return NULL;
+
+       /* Offset of object pointer array from base structure */
+       obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+                          sizeof(void *));
+
+       rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+       if (!rmap)
+               return NULL;
+
+       rmap->obj = (void **)((char *)rmap + obj_offset);
+
+       /* Initially assign CPUs to objects on a rota, since we have
+        * no idea where the objects are.  Use infinite distance, so
+        * any object with known distance is preferable.  Include the
+        * CPUs that are not present/online, since we definitely want
+        * any newly-hotplugged CPUs to have some object assigned.
+        */
+       for_each_possible_cpu(cpu) {
+               rmap->near[cpu].index = cpu % size;
+               rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+       }
+
+       rmap->size = size;
+       return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+                               const struct cpumask *mask, u16 dist)
+{
+       int neigh;
+
+       for_each_cpu(neigh, mask) {
+               if (rmap->near[cpu].dist > dist &&
+                   rmap->near[neigh].dist <= dist) {
+                       rmap->near[cpu].index = rmap->near[neigh].index;
+                       rmap->near[cpu].dist = dist;
+                       return true;
+               }
+       }
+       return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+       unsigned index;
+       unsigned int cpu;
+
+       pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+       for_each_possible_cpu(cpu) {
+               index = rmap->near[cpu].index;
+               pr_info("cpu %d -> obj %u (distance %u)\n",
+                       cpu, index, rmap->near[cpu].dist);
+       }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+       u16 index;
+
+       BUG_ON(rmap->used >= rmap->size);
+       index = rmap->used++;
+       rmap->obj[index] = obj;
+       return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+                   const struct cpumask *affinity)
+{
+       cpumask_var_t update_mask;
+       unsigned int cpu;
+
+       if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+               return -ENOMEM;
+
+       /* Invalidate distance for all CPUs for which this used to be
+        * the nearest object.  Mark those CPUs for update.
+        */
+       for_each_online_cpu(cpu) {
+               if (rmap->near[cpu].index == index) {
+                       rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+                       cpumask_set_cpu(cpu, update_mask);
+               }
+       }
+
+       debug_print_rmap(rmap, "after invalidating old distances");
+
+       /* Set distance to 0 for all CPUs in the new affinity mask.
+        * Mark all CPUs within their NUMA nodes for update.
+        */
+       for_each_cpu(cpu, affinity) {
+               rmap->near[cpu].index = index;
+               rmap->near[cpu].dist = 0;
+               cpumask_or(update_mask, update_mask,
+                          cpumask_of_node(cpu_to_node(cpu)));
+       }
+
+       debug_print_rmap(rmap, "after updating neighbours");
+
+       /* Update distances based on topology */
+       for_each_cpu(cpu, update_mask) {
+               if (cpu_rmap_copy_neigh(rmap, cpu,
+                                       topology_thread_cpumask(cpu), 1))
+                       continue;
+               if (cpu_rmap_copy_neigh(rmap, cpu,
+                                       topology_core_cpumask(cpu), 2))
+                       continue;
+               if (cpu_rmap_copy_neigh(rmap, cpu,
+                                       cpumask_of_node(cpu_to_node(cpu)), 3))
+                       continue;
+               /* We could continue into NUMA node distances, but for now
+                * we give up.
+                */
+       }
+
+       debug_print_rmap(rmap, "after copying neighbours");
+
+       free_cpumask_var(update_mask);
+       return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+       struct irq_affinity_notify notify;
+       struct cpu_rmap *rmap;
+       u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+       struct irq_glue *glue;
+       u16 index;
+
+       if (!rmap)
+               return;
+
+       for (index = 0; index < rmap->used; index++) {
+               glue = rmap->obj[index];
+               irq_set_affinity_notifier(glue->notify.irq, NULL);
+       }
+       irq_run_affinity_notifiers();
+
+       kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+       struct irq_glue *glue =
+               container_of(notify, struct irq_glue, notify);
+       int rc;
+
+       rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+       if (rc)
+               pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+       struct irq_glue *glue =
+               container_of(ref, struct irq_glue, notify.kref);
+       kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+       struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       int rc;
+
+       if (!glue)
+               return -ENOMEM;
+       glue->notify.notify = irq_cpu_rmap_notify;
+       glue->notify.release = irq_cpu_rmap_release;
+       glue->rmap = rmap;
+       glue->index = cpu_rmap_add(rmap, glue);
+       rc = irq_set_affinity_notifier(irq, &glue->notify);
+       if (rc)
+               kfree(glue);
+       return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
index 6e64f7c6a2e9265b2afecbe6eb8f09c30bef55d6..7850412f52b7e23f817969fea24a560bcbdda4e8 100644 (file)
@@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev,
 static void vlan_transfer_features(struct net_device *dev,
                                   struct net_device *vlandev)
 {
-       unsigned long old_features = vlandev->features;
+       u32 old_features = vlandev->features;
 
        vlandev->features &= ~dev->vlan_features;
        vlandev->features |= dev->features & dev->vlan_features;
index be737539f34d1be9849a142c2ed176e48c537298..ae610f046de596b0d279f01c0247a68d8aad69c1 100644 (file)
@@ -625,6 +625,19 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
                rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+                                   struct scatterlist *sgl, unsigned int sgc)
+{
+       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       const struct net_device_ops *ops = real_dev->netdev_ops;
+       int rc = 0;
+
+       if (ops->ndo_fcoe_ddp_target)
+               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+       return rc;
+}
 #endif
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
@@ -858,6 +871,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
        .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+       .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
 };
 
index 17c5ba7551a55e79c2c38e22a8fe2fdaa8e00979..29a54ccd213d5709839fbdd5e27a11c4ac5b1188 100644 (file)
@@ -59,7 +59,6 @@
                                                 * safely advertise a maxsize
                                                 * of 64k */
 
-#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
 /**
  * struct p9_trans_rdma - RDMA transport instance
  *
index 72840626284bc74b4fcd3d6564781ff909fae75f..79cabf1ee68bd2db701d2c790f8f64edc79b94d7 100644 (file)
@@ -221,6 +221,12 @@ config RPS
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
+config RFS_ACCEL
+       boolean
+       depends on RPS && GENERIC_HARDIRQS
+       select CPU_RMAP
+       default y
+
 config XPS
        boolean
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
index d257da50fcfb92417cad07bfb60e0377b2f2f933..810a1294eddb63aba3359f5e4324805319b7c598 100644 (file)
@@ -520,9 +520,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
                unlink_clip_vcc(clip_vcc);
                return 0;
        }
-       error = ip_route_output_key(&init_net, &rt, &fl);
-       if (error)
-               return error;
+       rt = ip_route_output_key(&init_net, &fl);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
        neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
        ip_rt_put(rt);
        if (!neigh)
index d936aeccd1942025445289bbcbeb2ede4badff10..2de93d00631be2fd92cdbdda2b7ad40d0d2f4e6a 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
index 3850a3ecf94741a669e5f35d75c1cad7437eec8b..af45d6b2031f978d208aa6cde1dbef6fb93b8b4c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
                               int packet_len,
                               unsigned long send_time,
                               bool directlink,
-                              struct batman_if *if_incoming,
+                              struct hard_iface *if_incoming,
                               struct forw_packet *forw_packet)
 {
        struct batman_packet *batman_packet =
@@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
 /* create a new aggregated packet and add this packet to it */
 static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
                                  unsigned long send_time, bool direct_link,
-                                 struct batman_if *if_incoming,
+                                 struct hard_iface *if_incoming,
                                  int own_packet)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
                            unsigned char *packet_buff, int packet_len,
-                           struct batman_if *if_incoming, char own_packet,
+                           struct hard_iface *if_incoming, char own_packet,
                            unsigned long send_time)
 {
        /**
@@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
 
 /* unpack the aggregated packets and process them one by one */
 void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-                            int packet_len, struct batman_if *if_incoming)
+                            int packet_len, struct hard_iface *if_incoming)
 {
        struct batman_packet *batman_packet;
        int buff_pos = 0;
index 71a91b3da913aa1438a44edb9d1295e8d6679e9d..062204289d1f52c1d3749c551f3c1f5d0dbc0deb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
                            unsigned char *packet_buff, int packet_len,
-                           struct batman_if *if_incoming, char own_packet,
+                           struct hard_iface *if_incoming, char own_packet,
                            unsigned long send_time);
 void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-                            int packet_len, struct batman_if *if_incoming);
+                            int packet_len, struct hard_iface *if_incoming);
 
 #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
index 0ae81d07f1027d720e9b69df9e34fa6b2cbba449..0e9d43509935cfb71e3253a2189be6b1a6f916d4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -52,7 +52,6 @@ static void emit_log_char(struct debug_log *debug_log, char c)
 
 static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
 {
-       int printed_len;
        va_list args;
        static char debug_log_buf[256];
        char *p;
@@ -62,8 +61,7 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
 
        spin_lock_bh(&debug_log->lock);
        va_start(args, fmt);
-       printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
-                                fmt, args);
+       vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
        va_end(args);
 
        for (p = debug_log_buf; *p != 0; p++)
index 72df532b7d5f4cd39b5be09db22530c5e1c17a62..bc9cda3f01e14179cf11124c441a0eff090c3fc4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index cd7bb51825f177065e39e5dcdcc4f7c984b7b983..e449bf6353e01de692ea74186c902e80fae8605e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
                               char *buff)
 {
        struct net_device *net_dev = kobj_to_netdev(kobj);
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        ssize_t length;
 
-       if (!batman_if)
+       if (!hard_iface)
                return 0;
 
-       length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ?
-                        "none" : batman_if->soft_iface->name);
+       length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
+                        "none" : hard_iface->soft_iface->name);
 
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hardif_free_ref(hard_iface);
 
        return length;
 }
@@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
                                char *buff, size_t count)
 {
        struct net_device *net_dev = kobj_to_netdev(kobj);
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        int status_tmp = -1;
-       int ret;
+       int ret = count;
 
-       if (!batman_if)
+       if (!hard_iface)
                return count;
 
        if (buff[count - 1] == '\n')
@@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
        if (strlen(buff) >= IFNAMSIZ) {
                pr_err("Invalid parameter for 'mesh_iface' setting received: "
                       "interface name too long '%s'\n", buff);
-               kref_put(&batman_if->refcount, hardif_free_ref);
+               hardif_free_ref(hard_iface);
                return -EINVAL;
        }
 
@@ -481,30 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
        else
                status_tmp = IF_I_WANT_YOU;
 
-       if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
-           (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) {
-               kref_put(&batman_if->refcount, hardif_free_ref);
-               return count;
-       }
+       if (hard_iface->if_status == status_tmp)
+               goto out;
+
+       if ((hard_iface->soft_iface) &&
+           (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
+               goto out;
 
        if (status_tmp == IF_NOT_IN_USE) {
                rtnl_lock();
-               hardif_disable_interface(batman_if);
+               hardif_disable_interface(hard_iface);
                rtnl_unlock();
-               kref_put(&batman_if->refcount, hardif_free_ref);
-               return count;
+               goto out;
        }
 
        /* if the interface already is in use */
-       if (batman_if->if_status != IF_NOT_IN_USE) {
+       if (hard_iface->if_status != IF_NOT_IN_USE) {
                rtnl_lock();
-               hardif_disable_interface(batman_if);
+               hardif_disable_interface(hard_iface);
                rtnl_unlock();
        }
 
-       ret = hardif_enable_interface(batman_if, buff);
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       ret = hardif_enable_interface(hard_iface, buff);
 
+out:
+       hardif_free_ref(hard_iface);
        return ret;
 }
 
@@ -512,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
                                 char *buff)
 {
        struct net_device *net_dev = kobj_to_netdev(kobj);
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        ssize_t length;
 
-       if (!batman_if)
+       if (!hard_iface)
                return 0;
 
-       switch (batman_if->if_status) {
+       switch (hard_iface->if_status) {
        case IF_TO_BE_REMOVED:
                length = sprintf(buff, "disabling\n");
                break;
@@ -537,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
                break;
        }
 
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hardif_free_ref(hard_iface);
 
        return length;
 }
index 7f186c007b4fc63e910479c334966cf1f901306d..02f1fa7aadfa768f6b2298a3591f2819fb331b46 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index bbcd8f744cdd402bb13ed2a71a969ec9b1220ff6..ad2ca925b3e04dccb8efc3c90a86c11274602245 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index ac54017601b1b4bb5a78369f5a00ba53e12ae365..769c246d1fc165069dc2cb000516291b06355426 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 0065ffb8d96d64b7cce1ed3d00567a652d5a83f1..3cc43558cf9cbb0958d55f4f6ed236f9c3873322 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
 #include <linux/udp.h>
 #include <linux/if_vlan.h>
 
-static void gw_node_free_ref(struct kref *refcount)
+static void gw_node_free_rcu(struct rcu_head *rcu)
 {
        struct gw_node *gw_node;
 
-       gw_node = container_of(refcount, struct gw_node, refcount);
+       gw_node = container_of(rcu, struct gw_node, rcu);
        kfree(gw_node);
 }
 
-static void gw_node_free_rcu(struct rcu_head *rcu)
+static void gw_node_free_ref(struct gw_node *gw_node)
 {
-       struct gw_node *gw_node;
-
-       gw_node = container_of(rcu, struct gw_node, rcu);
-       kref_put(&gw_node->refcount, gw_node_free_ref);
+       if (atomic_dec_and_test(&gw_node->refcount))
+               call_rcu(&gw_node->rcu, gw_node_free_rcu);
 }
 
 void *gw_get_selected(struct bat_priv *bat_priv)
 {
-       struct gw_node *curr_gateway_tmp = bat_priv->curr_gw;
+       struct gw_node *curr_gateway_tmp;
+       struct orig_node *orig_node = NULL;
 
+       rcu_read_lock();
+       curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
        if (!curr_gateway_tmp)
-               return NULL;
+               goto out;
+
+       orig_node = curr_gateway_tmp->orig_node;
+       if (!orig_node)
+               goto out;
 
-       return curr_gateway_tmp->orig_node;
+       if (!atomic_inc_not_zero(&orig_node->refcount))
+               orig_node = NULL;
+
+out:
+       rcu_read_unlock();
+       return orig_node;
 }
 
 void gw_deselect(struct bat_priv *bat_priv)
 {
-       struct gw_node *gw_node = bat_priv->curr_gw;
+       struct gw_node *gw_node;
 
-       bat_priv->curr_gw = NULL;
+       spin_lock_bh(&bat_priv->gw_list_lock);
+       gw_node = rcu_dereference(bat_priv->curr_gw);
+       rcu_assign_pointer(bat_priv->curr_gw, NULL);
+       spin_unlock_bh(&bat_priv->gw_list_lock);
 
        if (gw_node)
-               kref_put(&gw_node->refcount, gw_node_free_ref);
+               gw_node_free_ref(gw_node);
 }
 
-static struct gw_node *gw_select(struct bat_priv *bat_priv,
-                         struct gw_node *new_gw_node)
+static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
 {
-       struct gw_node *curr_gw_node = bat_priv->curr_gw;
+       struct gw_node *curr_gw_node;
 
-       if (new_gw_node)
-               kref_get(&new_gw_node->refcount);
+       if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
+               new_gw_node = NULL;
+
+       spin_lock_bh(&bat_priv->gw_list_lock);
+       curr_gw_node = rcu_dereference(bat_priv->curr_gw);
+       rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+       spin_unlock_bh(&bat_priv->gw_list_lock);
 
-       bat_priv->curr_gw = new_gw_node;
-       return curr_gw_node;
+       if (curr_gw_node)
+               gw_node_free_ref(curr_gw_node);
 }
 
 void gw_election(struct bat_priv *bat_priv)
 {
        struct hlist_node *node;
-       struct gw_node *gw_node, *curr_gw_tmp = NULL, *old_gw_node = NULL;
+       struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
        uint8_t max_tq = 0;
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
        int down, up;
@@ -93,19 +110,23 @@ void gw_election(struct bat_priv *bat_priv)
        if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
                return;
 
-       if (bat_priv->curr_gw)
+       rcu_read_lock();
+       curr_gw = rcu_dereference(bat_priv->curr_gw);
+       if (curr_gw) {
+               rcu_read_unlock();
                return;
+       }
 
-       rcu_read_lock();
        if (hlist_empty(&bat_priv->gw_list)) {
-               rcu_read_unlock();
 
-               if (bat_priv->curr_gw) {
+               if (curr_gw) {
+                       rcu_read_unlock();
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "Removing selected gateway - "
                                "no gateway in range\n");
                        gw_deselect(bat_priv);
-               }
+               } else
+                       rcu_read_unlock();
 
                return;
        }
@@ -154,12 +175,12 @@ void gw_election(struct bat_priv *bat_priv)
                        max_gw_factor = tmp_gw_factor;
        }
 
-       if (bat_priv->curr_gw != curr_gw_tmp) {
-               if ((bat_priv->curr_gw) && (!curr_gw_tmp))
+       if (curr_gw != curr_gw_tmp) {
+               if ((curr_gw) && (!curr_gw_tmp))
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "Removing selected gateway - "
                                "no gateway in range\n");
-               else if ((!bat_priv->curr_gw) && (curr_gw_tmp))
+               else if ((!curr_gw) && (curr_gw_tmp))
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "Adding route to gateway %pM "
                                "(gw_flags: %i, tq: %i)\n",
@@ -174,43 +195,43 @@ void gw_election(struct bat_priv *bat_priv)
                                curr_gw_tmp->orig_node->gw_flags,
                                curr_gw_tmp->orig_node->router->tq_avg);
 
-               old_gw_node = gw_select(bat_priv, curr_gw_tmp);
+               gw_select(bat_priv, curr_gw_tmp);
        }
 
        rcu_read_unlock();
-
-       /* the kfree() has to be outside of the rcu lock */
-       if (old_gw_node)
-               kref_put(&old_gw_node->refcount, gw_node_free_ref);
 }
 
 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
 {
-       struct gw_node *curr_gateway_tmp = bat_priv->curr_gw;
+       struct gw_node *curr_gateway_tmp;
        uint8_t gw_tq_avg, orig_tq_avg;
 
+       rcu_read_lock();
+       curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
        if (!curr_gateway_tmp)
-               return;
+               goto out_rcu;
 
        if (!curr_gateway_tmp->orig_node)
-               goto deselect;
+               goto deselect_rcu;
 
        if (!curr_gateway_tmp->orig_node->router)
-               goto deselect;
+               goto deselect_rcu;
 
        /* this node already is the gateway */
        if (curr_gateway_tmp->orig_node == orig_node)
-               return;
+               goto out_rcu;
 
        if (!orig_node->router)
-               return;
+               goto out_rcu;
 
        gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
+       rcu_read_unlock();
+
        orig_tq_avg = orig_node->router->tq_avg;
 
        /* the TQ value has to be better */
        if (orig_tq_avg < gw_tq_avg)
-               return;
+               goto out;
 
        /**
         * if the routing class is greater than 3 the value tells us how much
@@ -218,15 +239,23 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
         **/
        if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
            (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
-               return;
+               goto out;
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "Restarting gateway selection: better gateway found (tq curr: "
                "%i, tq new: %i)\n",
                gw_tq_avg, orig_tq_avg);
+       goto deselect;
 
+out_rcu:
+       rcu_read_unlock();
+       goto out;
+deselect_rcu:
+       rcu_read_unlock();
 deselect:
        gw_deselect(bat_priv);
+out:
+       return;
 }
 
 static void gw_node_add(struct bat_priv *bat_priv,
@@ -242,7 +271,7 @@ static void gw_node_add(struct bat_priv *bat_priv,
        memset(gw_node, 0, sizeof(struct gw_node));
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
-       kref_init(&gw_node->refcount);
+       atomic_set(&gw_node->refcount, 1);
 
        spin_lock_bh(&bat_priv->gw_list_lock);
        hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
@@ -283,7 +312,7 @@ void gw_node_update(struct bat_priv *bat_priv,
                                "Gateway %pM removed from gateway list\n",
                                orig_node->orig);
 
-                       if (gw_node == bat_priv->curr_gw) {
+                       if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
                                rcu_read_unlock();
                                gw_deselect(bat_priv);
                                return;
@@ -321,11 +350,11 @@ void gw_node_purge(struct bat_priv *bat_priv)
                    atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
                        continue;
 
-               if (bat_priv->curr_gw == gw_node)
+               if (rcu_dereference(bat_priv->curr_gw) == gw_node)
                        gw_deselect(bat_priv);
 
                hlist_del_rcu(&gw_node->list);
-               call_rcu(&gw_node->rcu, gw_node_free_rcu);
+               gw_node_free_ref(gw_node);
        }
 
 
@@ -335,12 +364,16 @@ void gw_node_purge(struct bat_priv *bat_priv)
 static int _write_buffer_text(struct bat_priv *bat_priv,
                              struct seq_file *seq, struct gw_node *gw_node)
 {
-       int down, up;
+       struct gw_node *curr_gw;
+       int down, up, ret;
 
        gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
 
-       return seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
-                      (bat_priv->curr_gw == gw_node ? "=>" : "  "),
+       rcu_read_lock();
+       curr_gw = rcu_dereference(bat_priv->curr_gw);
+
+       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
+                      (curr_gw == gw_node ? "=>" : "  "),
                       gw_node->orig_node->orig,
                       gw_node->orig_node->router->tq_avg,
                       gw_node->orig_node->router->addr,
@@ -350,6 +383,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv,
                       (down > 2048 ? "MBit" : "KBit"),
                       (up > 2048 ? up / 1024 : up),
                       (up > 2048 ? "MBit" : "KBit"));
+
+       rcu_read_unlock();
+       return ret;
 }
 
 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
@@ -470,8 +506,12 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
        if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
                return -1;
 
-       if (!bat_priv->curr_gw)
+       rcu_read_lock();
+       if (!rcu_dereference(bat_priv->curr_gw)) {
+               rcu_read_unlock();
                return 0;
+       }
+       rcu_read_unlock();
 
        return 1;
 }
index 4585e6549844f4a71e90e376728af19a13c7dc56..2aa439124ee338a2772093975a59732ccfee8524 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index b962982f017ee6d13d80d003b2471855935a0e6a..50d3a59a3d73bf25c34ed7db1f37f7bad3a33d7e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 5e728d0b795997fb256341d73b04c1c19c7b44ef..55e527a489fed08494b051e6c6d294f73b6018bb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 4f95777ce0803d79cdbcb285b92efcb77b77fb53..b3058e46ee6ba9bb7bdfc7ef96860eedc9bb67f7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 
 #include <linux/if_arp.h>
 
-/* protect update critical side of if_list - but not the content */
-static DEFINE_SPINLOCK(if_list_lock);
+/* protect update critical side of hardif_list - but not the content */
+static DEFINE_SPINLOCK(hardif_list_lock);
 
-static void hardif_free_rcu(struct rcu_head *rcu)
+
+static int batman_skb_recv(struct sk_buff *skb,
+                          struct net_device *dev,
+                          struct packet_type *ptype,
+                          struct net_device *orig_dev);
+
+void hardif_free_rcu(struct rcu_head *rcu)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
-       batman_if = container_of(rcu, struct batman_if, rcu);
-       dev_put(batman_if->net_dev);
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hard_iface = container_of(rcu, struct hard_iface, rcu);
+       dev_put(hard_iface->net_dev);
+       kfree(hard_iface);
 }
 
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
+struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->net_dev == net_dev)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->net_dev == net_dev &&
+                   atomic_inc_not_zero(&hard_iface->refcount))
                        goto out;
        }
 
-       batman_if = NULL;
+       hard_iface = NULL;
 
 out:
-       if (batman_if)
-               kref_get(&batman_if->refcount);
-
        rcu_read_unlock();
-       return batman_if;
+       return hard_iface;
 }
 
 static int is_valid_iface(struct net_device *net_dev)
@@ -75,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev)
                return 0;
 
        /* no batman over batman */
-#ifdef HAVE_NET_DEVICE_OPS
-       if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
-               return 0;
-#else
-       if (net_dev->hard_start_xmit == interface_tx)
+       if (softif_is_valid(net_dev))
                return 0;
-#endif
 
        /* Device is being bridged */
        /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
@@ -90,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev)
        return 1;
 }
 
-static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
+static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->soft_iface != soft_iface)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               if (batman_if->if_status == IF_ACTIVE)
+               if (hard_iface->if_status == IF_ACTIVE &&
+                   atomic_inc_not_zero(&hard_iface->refcount))
                        goto out;
        }
 
-       batman_if = NULL;
+       hard_iface = NULL;
 
 out:
-       if (batman_if)
-               kref_get(&batman_if->refcount);
-
        rcu_read_unlock();
-       return batman_if;
+       return hard_iface;
 }
 
 static void update_primary_addr(struct bat_priv *bat_priv)
@@ -126,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv)
 }
 
 static void set_primary_if(struct bat_priv *bat_priv,
-                          struct batman_if *batman_if)
+                          struct hard_iface *hard_iface)
 {
        struct batman_packet *batman_packet;
-       struct batman_if *old_if;
+       struct hard_iface *old_if;
 
-       if (batman_if)
-               kref_get(&batman_if->refcount);
+       if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount))
+               hard_iface = NULL;
 
        old_if = bat_priv->primary_if;
-       bat_priv->primary_if = batman_if;
+       bat_priv->primary_if = hard_iface;
 
        if (old_if)
-               kref_put(&old_if->refcount, hardif_free_ref);
+               hardif_free_ref(old_if);
 
        if (!bat_priv->primary_if)
                return;
 
-       batman_packet = (struct batman_packet *)(batman_if->packet_buff);
+       batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
        batman_packet->flags = PRIMARIES_FIRST_HOP;
        batman_packet->ttl = TTL;
 
@@ -156,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv,
        atomic_set(&bat_priv->hna_local_changed, 1);
 }
 
-static bool hardif_is_iface_up(struct batman_if *batman_if)
+static bool hardif_is_iface_up(struct hard_iface *hard_iface)
 {
-       if (batman_if->net_dev->flags & IFF_UP)
+       if (hard_iface->net_dev->flags & IFF_UP)
                return true;
 
        return false;
 }
 
-static void update_mac_addresses(struct batman_if *batman_if)
+static void update_mac_addresses(struct hard_iface *hard_iface)
 {
-       memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
-              batman_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
-              batman_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
+              hard_iface->net_dev->dev_addr, ETH_ALEN);
+       memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
+              hard_iface->net_dev->dev_addr, ETH_ALEN);
 }
 
 static void check_known_mac_addr(struct net_device *net_dev)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if ((batman_if->if_status != IF_ACTIVE) &&
-                   (batman_if->if_status != IF_TO_BE_ACTIVATED))
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if ((hard_iface->if_status != IF_ACTIVE) &&
+                   (hard_iface->if_status != IF_TO_BE_ACTIVATED))
                        continue;
 
-               if (batman_if->net_dev == net_dev)
+               if (hard_iface->net_dev == net_dev)
                        continue;
 
-               if (!compare_orig(batman_if->net_dev->dev_addr,
-                                 net_dev->dev_addr))
+               if (!compare_eth(hard_iface->net_dev->dev_addr,
+                                net_dev->dev_addr))
                        continue;
 
                pr_warning("The newly added mac address (%pM) already exists "
                           "on: %s\n", net_dev->dev_addr,
-                          batman_if->net_dev->name);
+                          hard_iface->net_dev->name);
                pr_warning("It is strongly recommended to keep mac addresses "
                           "unique to avoid problems!\n");
        }
@@ -201,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev)
 int hardif_min_mtu(struct net_device *soft_iface)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        /* allow big frames if all devices are capable to do so
         * (have MTU > 1500 + BAT_HEADER_LEN) */
        int min_mtu = ETH_DATA_LEN;
@@ -210,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface)
                goto out;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if ((batman_if->if_status != IF_ACTIVE) &&
-                   (batman_if->if_status != IF_TO_BE_ACTIVATED))
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if ((hard_iface->if_status != IF_ACTIVE) &&
+                   (hard_iface->if_status != IF_TO_BE_ACTIVATED))
                        continue;
 
-               if (batman_if->soft_iface != soft_iface)
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN,
+               min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
                                min_mtu);
        }
        rcu_read_unlock();
@@ -236,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface)
                soft_iface->mtu = min_mtu;
 }
 
-static void hardif_activate_interface(struct batman_if *batman_if)
+static void hardif_activate_interface(struct hard_iface *hard_iface)
 {
        struct bat_priv *bat_priv;
 
-       if (batman_if->if_status != IF_INACTIVE)
+       if (hard_iface->if_status != IF_INACTIVE)
                return;
 
-       bat_priv = netdev_priv(batman_if->soft_iface);
+       bat_priv = netdev_priv(hard_iface->soft_iface);
 
-       update_mac_addresses(batman_if);
-       batman_if->if_status = IF_TO_BE_ACTIVATED;
+       update_mac_addresses(hard_iface);
+       hard_iface->if_status = IF_TO_BE_ACTIVATED;
 
        /**
         * the first active interface becomes our primary interface or
         * the next active interface after the old primay interface was removed
         */
        if (!bat_priv->primary_if)
-               set_primary_if(bat_priv, batman_if);
+               set_primary_if(bat_priv, hard_iface);
 
-       bat_info(batman_if->soft_iface, "Interface activated: %s\n",
-                batman_if->net_dev->name);
+       bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
+                hard_iface->net_dev->name);
 
-       update_min_mtu(batman_if->soft_iface);
+       update_min_mtu(hard_iface->soft_iface);
        return;
 }
 
-static void hardif_deactivate_interface(struct batman_if *batman_if)
+static void hardif_deactivate_interface(struct hard_iface *hard_iface)
 {
-       if ((batman_if->if_status != IF_ACTIVE) &&
-          (batman_if->if_status != IF_TO_BE_ACTIVATED))
+       if ((hard_iface->if_status != IF_ACTIVE) &&
+           (hard_iface->if_status != IF_TO_BE_ACTIVATED))
                return;
 
-       batman_if->if_status = IF_INACTIVE;
+       hard_iface->if_status = IF_INACTIVE;
 
-       bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
-                batman_if->net_dev->name);
+       bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
+                hard_iface->net_dev->name);
 
-       update_min_mtu(batman_if->soft_iface);
+       update_min_mtu(hard_iface->soft_iface);
 }
 
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
+int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
 {
        struct bat_priv *bat_priv;
        struct batman_packet *batman_packet;
+       struct net_device *soft_iface;
+       int ret;
 
-       if (batman_if->if_status != IF_NOT_IN_USE)
+       if (hard_iface->if_status != IF_NOT_IN_USE)
                goto out;
 
-       batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
+       if (!atomic_inc_not_zero(&hard_iface->refcount))
+               goto out;
 
-       if (!batman_if->soft_iface) {
-               batman_if->soft_iface = softif_create(iface_name);
+       soft_iface = dev_get_by_name(&init_net, iface_name);
 
-               if (!batman_if->soft_iface)
+       if (!soft_iface) {
+               soft_iface = softif_create(iface_name);
+
+               if (!soft_iface) {
+                       ret = -ENOMEM;
                        goto err;
+               }
 
                /* dev_get_by_name() increases the reference counter for us */
-               dev_hold(batman_if->soft_iface);
+               dev_hold(soft_iface);
+       }
+
+       if (!softif_is_valid(soft_iface)) {
+               pr_err("Can't create batman mesh interface %s: "
+                      "already exists as regular interface\n",
+                      soft_iface->name);
+               dev_put(soft_iface);
+               ret = -EINVAL;
+               goto err;
        }
 
-       bat_priv = netdev_priv(batman_if->soft_iface);
-       batman_if->packet_len = BAT_PACKET_LEN;
-       batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
+       hard_iface->soft_iface = soft_iface;
+       bat_priv = netdev_priv(hard_iface->soft_iface);
+       hard_iface->packet_len = BAT_PACKET_LEN;
+       hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
 
-       if (!batman_if->packet_buff) {
-               bat_err(batman_if->soft_iface, "Can't add interface packet "
-                       "(%s): out of memory\n", batman_if->net_dev->name);
+       if (!hard_iface->packet_buff) {
+               bat_err(hard_iface->soft_iface, "Can't add interface packet "
+                       "(%s): out of memory\n", hard_iface->net_dev->name);
+               ret = -ENOMEM;
                goto err;
        }
 
-       batman_packet = (struct batman_packet *)(batman_if->packet_buff);
+       batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
        batman_packet->packet_type = BAT_PACKET;
        batman_packet->version = COMPAT_VERSION;
        batman_packet->flags = 0;
@@ -314,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
        batman_packet->tq = TQ_MAX_VALUE;
        batman_packet->num_hna = 0;
 
-       batman_if->if_num = bat_priv->num_ifaces;
+       hard_iface->if_num = bat_priv->num_ifaces;
        bat_priv->num_ifaces++;
-       batman_if->if_status = IF_INACTIVE;
-       orig_hash_add_if(batman_if, bat_priv->num_ifaces);
+       hard_iface->if_status = IF_INACTIVE;
+       orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
 
-       batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
-       batman_if->batman_adv_ptype.func = batman_skb_recv;
-       batman_if->batman_adv_ptype.dev = batman_if->net_dev;
-       kref_get(&batman_if->refcount);
-       dev_add_pack(&batman_if->batman_adv_ptype);
+       hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
+       hard_iface->batman_adv_ptype.func = batman_skb_recv;
+       hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+       dev_add_pack(&hard_iface->batman_adv_ptype);
 
-       atomic_set(&batman_if->seqno, 1);
-       atomic_set(&batman_if->frag_seqno, 1);
-       bat_info(batman_if->soft_iface, "Adding interface: %s\n",
-                batman_if->net_dev->name);
+       atomic_set(&hard_iface->seqno, 1);
+       atomic_set(&hard_iface->frag_seqno, 1);
+       bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
+                hard_iface->net_dev->name);
 
-       if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
+       if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
                ETH_DATA_LEN + BAT_HEADER_LEN)
-               bat_info(batman_if->soft_iface,
+               bat_info(hard_iface->soft_iface,
                        "The MTU of interface %s is too small (%i) to handle "
                        "the transport of batman-adv packets. Packets going "
                        "over this interface will be fragmented on layer2 "
                        "which could impact the performance. Setting the MTU "
                        "to %zi would solve the problem.\n",
-                       batman_if->net_dev->name, batman_if->net_dev->mtu,
+                       hard_iface->net_dev->name, hard_iface->net_dev->mtu,
                        ETH_DATA_LEN + BAT_HEADER_LEN);
 
-       if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
+       if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
                ETH_DATA_LEN + BAT_HEADER_LEN)
-               bat_info(batman_if->soft_iface,
+               bat_info(hard_iface->soft_iface,
                        "The MTU of interface %s is too small (%i) to handle "
                        "the transport of batman-adv packets. If you experience"
                        " problems getting traffic through try increasing the "
                        "MTU to %zi.\n",
-                       batman_if->net_dev->name, batman_if->net_dev->mtu,
+                       hard_iface->net_dev->name, hard_iface->net_dev->mtu,
                        ETH_DATA_LEN + BAT_HEADER_LEN);
 
-       if (hardif_is_iface_up(batman_if))
-               hardif_activate_interface(batman_if);
+       if (hardif_is_iface_up(hard_iface))
+               hardif_activate_interface(hard_iface);
        else
-               bat_err(batman_if->soft_iface, "Not using interface %s "
+               bat_err(hard_iface->soft_iface, "Not using interface %s "
                        "(retrying later): interface not active\n",
-                       batman_if->net_dev->name);
+                       hard_iface->net_dev->name);
 
        /* begin scheduling originator messages on that interface */
-       schedule_own_packet(batman_if);
+       schedule_own_packet(hard_iface);
 
 out:
        return 0;
 
 err:
-       return -ENOMEM;
+       hardif_free_ref(hard_iface);
+       return ret;
 }
 
-void hardif_disable_interface(struct batman_if *batman_if)
+void hardif_disable_interface(struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 
-       if (batman_if->if_status == IF_ACTIVE)
-               hardif_deactivate_interface(batman_if);
+       if (hard_iface->if_status == IF_ACTIVE)
+               hardif_deactivate_interface(hard_iface);
 
-       if (batman_if->if_status != IF_INACTIVE)
+       if (hard_iface->if_status != IF_INACTIVE)
                return;
 
-       bat_info(batman_if->soft_iface, "Removing interface: %s\n",
-                batman_if->net_dev->name);
-       dev_remove_pack(&batman_if->batman_adv_ptype);
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
+                hard_iface->net_dev->name);
+       dev_remove_pack(&hard_iface->batman_adv_ptype);
 
        bat_priv->num_ifaces--;
-       orig_hash_del_if(batman_if, bat_priv->num_ifaces);
+       orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
 
-       if (batman_if == bat_priv->primary_if) {
-               struct batman_if *new_if;
+       if (hard_iface == bat_priv->primary_if) {
+               struct hard_iface *new_if;
 
-               new_if = get_active_batman_if(batman_if->soft_iface);
+               new_if = hardif_get_active(hard_iface->soft_iface);
                set_primary_if(bat_priv, new_if);
 
                if (new_if)
-                       kref_put(&new_if->refcount, hardif_free_ref);
+                       hardif_free_ref(new_if);
        }
 
-       kfree(batman_if->packet_buff);
-       batman_if->packet_buff = NULL;
-       batman_if->if_status = IF_NOT_IN_USE;
+       kfree(hard_iface->packet_buff);
+       hard_iface->packet_buff = NULL;
+       hard_iface->if_status = IF_NOT_IN_USE;
 
-       /* delete all references to this batman_if */
+       /* delete all references to this hard_iface */
        purge_orig_ref(bat_priv);
-       purge_outstanding_packets(bat_priv, batman_if);
-       dev_put(batman_if->soft_iface);
+       purge_outstanding_packets(bat_priv, hard_iface);
+       dev_put(hard_iface->soft_iface);
 
        /* nobody uses this interface anymore */
        if (!bat_priv->num_ifaces)
-               softif_destroy(batman_if->soft_iface);
+               softif_destroy(hard_iface->soft_iface);
 
-       batman_if->soft_iface = NULL;
+       hard_iface->soft_iface = NULL;
+       hardif_free_ref(hard_iface);
 }
 
-static struct batman_if *hardif_add_interface(struct net_device *net_dev)
+static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        int ret;
 
        ret = is_valid_iface(net_dev);
@@ -423,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
 
        dev_hold(net_dev);
 
-       batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
-       if (!batman_if) {
+       hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
+       if (!hard_iface) {
                pr_err("Can't add interface (%s): out of memory\n",
                       net_dev->name);
                goto release_dev;
        }
 
-       ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
+       ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
        if (ret)
                goto free_if;
 
-       batman_if->if_num = -1;
-       batman_if->net_dev = net_dev;
-       batman_if->soft_iface = NULL;
-       batman_if->if_status = IF_NOT_IN_USE;
-       INIT_LIST_HEAD(&batman_if->list);
-       kref_init(&batman_if->refcount);
+       hard_iface->if_num = -1;
+       hard_iface->net_dev = net_dev;
+       hard_iface->soft_iface = NULL;
+       hard_iface->if_status = IF_NOT_IN_USE;
+       INIT_LIST_HEAD(&hard_iface->list);
+       /* extra reference for return */
+       atomic_set(&hard_iface->refcount, 2);
 
-       check_known_mac_addr(batman_if->net_dev);
+       check_known_mac_addr(hard_iface->net_dev);
 
-       spin_lock(&if_list_lock);
-       list_add_tail_rcu(&batman_if->list, &if_list);
-       spin_unlock(&if_list_lock);
+       spin_lock(&hardif_list_lock);
+       list_add_tail_rcu(&hard_iface->list, &hardif_list);
+       spin_unlock(&hardif_list_lock);
 
-       /* extra reference for return */
-       kref_get(&batman_if->refcount);
-       return batman_if;
+       return hard_iface;
 
 free_if:
-       kfree(batman_if);
+       kfree(hard_iface);
 release_dev:
        dev_put(net_dev);
 out:
        return NULL;
 }
 
-static void hardif_remove_interface(struct batman_if *batman_if)
+static void hardif_remove_interface(struct hard_iface *hard_iface)
 {
        /* first deactivate interface */
-       if (batman_if->if_status != IF_NOT_IN_USE)
-               hardif_disable_interface(batman_if);
+       if (hard_iface->if_status != IF_NOT_IN_USE)
+               hardif_disable_interface(hard_iface);
 
-       if (batman_if->if_status != IF_NOT_IN_USE)
+       if (hard_iface->if_status != IF_NOT_IN_USE)
                return;
 
-       batman_if->if_status = IF_TO_BE_REMOVED;
-       sysfs_del_hardif(&batman_if->hardif_obj);
-       call_rcu(&batman_if->rcu, hardif_free_rcu);
+       hard_iface->if_status = IF_TO_BE_REMOVED;
+       sysfs_del_hardif(&hard_iface->hardif_obj);
+       hardif_free_ref(hard_iface);
 }
 
 void hardif_remove_interfaces(void)
 {
-       struct batman_if *batman_if, *batman_if_tmp;
+       struct hard_iface *hard_iface, *hard_iface_tmp;
        struct list_head if_queue;
 
        INIT_LIST_HEAD(&if_queue);
 
-       spin_lock(&if_list_lock);
-       list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
-               list_del_rcu(&batman_if->list);
-               list_add_tail(&batman_if->list, &if_queue);
+       spin_lock(&hardif_list_lock);
+       list_for_each_entry_safe(hard_iface, hard_iface_tmp,
+                                &hardif_list, list) {
+               list_del_rcu(&hard_iface->list);
+               list_add_tail(&hard_iface->list, &if_queue);
        }
-       spin_unlock(&if_list_lock);
+       spin_unlock(&hardif_list_lock);
 
        rtnl_lock();
-       list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
-               hardif_remove_interface(batman_if);
+       list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
+               hardif_remove_interface(hard_iface);
        }
        rtnl_unlock();
 }
@@ -498,43 +513,43 @@ static int hard_if_event(struct notifier_block *this,
                         unsigned long event, void *ptr)
 {
        struct net_device *net_dev = (struct net_device *)ptr;
-       struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+       struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
        struct bat_priv *bat_priv;
 
-       if (!batman_if && event == NETDEV_REGISTER)
-               batman_if = hardif_add_interface(net_dev);
+       if (!hard_iface && event == NETDEV_REGISTER)
+               hard_iface = hardif_add_interface(net_dev);
 
-       if (!batman_if)
+       if (!hard_iface)
                goto out;
 
        switch (event) {
        case NETDEV_UP:
-               hardif_activate_interface(batman_if);
+               hardif_activate_interface(hard_iface);
                break;
        case NETDEV_GOING_DOWN:
        case NETDEV_DOWN:
-               hardif_deactivate_interface(batman_if);
+               hardif_deactivate_interface(hard_iface);
                break;
        case NETDEV_UNREGISTER:
-               spin_lock(&if_list_lock);
-               list_del_rcu(&batman_if->list);
-               spin_unlock(&if_list_lock);
+               spin_lock(&hardif_list_lock);
+               list_del_rcu(&hard_iface->list);
+               spin_unlock(&hardif_list_lock);
 
-               hardif_remove_interface(batman_if);
+               hardif_remove_interface(hard_iface);
                break;
        case NETDEV_CHANGEMTU:
-               if (batman_if->soft_iface)
-                       update_min_mtu(batman_if->soft_iface);
+               if (hard_iface->soft_iface)
+                       update_min_mtu(hard_iface->soft_iface);
                break;
        case NETDEV_CHANGEADDR:
-               if (batman_if->if_status == IF_NOT_IN_USE)
+               if (hard_iface->if_status == IF_NOT_IN_USE)
                        goto hardif_put;
 
-               check_known_mac_addr(batman_if->net_dev);
-               update_mac_addresses(batman_if);
+               check_known_mac_addr(hard_iface->net_dev);
+               update_mac_addresses(hard_iface);
 
-               bat_priv = netdev_priv(batman_if->soft_iface);
-               if (batman_if == bat_priv->primary_if)
+               bat_priv = netdev_priv(hard_iface->soft_iface);
+               if (hard_iface == bat_priv->primary_if)
                        update_primary_addr(bat_priv);
                break;
        default:
@@ -542,22 +557,23 @@ static int hard_if_event(struct notifier_block *this,
        };
 
 hardif_put:
-       kref_put(&batman_if->refcount, hardif_free_ref);
+       hardif_free_ref(hard_iface);
 out:
        return NOTIFY_DONE;
 }
 
 /* receive a packet with the batman ethertype coming on a hard
  * interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *ptype, struct net_device *orig_dev)
+static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+                          struct packet_type *ptype,
+                          struct net_device *orig_dev)
 {
        struct bat_priv *bat_priv;
        struct batman_packet *batman_packet;
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        int ret;
 
-       batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
+       hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
        skb = skb_share_check(skb, GFP_ATOMIC);
 
        /* skb was released by skb_share_check() */
@@ -573,16 +589,16 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                                || !skb_mac_header(skb)))
                goto err_free;
 
-       if (!batman_if->soft_iface)
+       if (!hard_iface->soft_iface)
                goto err_free;
 
-       bat_priv = netdev_priv(batman_if->soft_iface);
+       bat_priv = netdev_priv(hard_iface->soft_iface);
 
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto err_free;
 
        /* discard frames on not active interfaces */
-       if (batman_if->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != IF_ACTIVE)
                goto err_free;
 
        batman_packet = (struct batman_packet *)skb->data;
@@ -600,32 +616,32 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        switch (batman_packet->packet_type) {
                /* batman originator packet */
        case BAT_PACKET:
-               ret = recv_bat_packet(skb, batman_if);
+               ret = recv_bat_packet(skb, hard_iface);
                break;
 
                /* batman icmp packet */
        case BAT_ICMP:
-               ret = recv_icmp_packet(skb, batman_if);
+               ret = recv_icmp_packet(skb, hard_iface);
                break;
 
                /* unicast packet */
        case BAT_UNICAST:
-               ret = recv_unicast_packet(skb, batman_if);
+               ret = recv_unicast_packet(skb, hard_iface);
                break;
 
                /* fragmented unicast packet */
        case BAT_UNICAST_FRAG:
-               ret = recv_ucast_frag_packet(skb, batman_if);
+               ret = recv_ucast_frag_packet(skb, hard_iface);
                break;
 
                /* broadcast packet */
        case BAT_BCAST:
-               ret = recv_bcast_packet(skb, batman_if);
+               ret = recv_bcast_packet(skb, hard_iface);
                break;
 
                /* vis packet */
        case BAT_VIS:
-               ret = recv_vis_packet(skb, batman_if);
+               ret = recv_vis_packet(skb, hard_iface);
                break;
        default:
                ret = NET_RX_DROP;
index 30ec3b8db45983d31e70be621a78fc7c21c133a0..a9ddf36e51c878a64d5b7b6c4a73077d80caee48 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 
 extern struct notifier_block hard_if_notifier;
 
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
-void hardif_disable_interface(struct batman_if *batman_if);
+struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev);
+int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name);
+void hardif_disable_interface(struct hard_iface *hard_iface);
 void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
-                               struct net_device *dev,
-                               struct packet_type *ptype,
-                               struct net_device *orig_dev);
 int hardif_min_mtu(struct net_device *soft_iface);
 void update_min_mtu(struct net_device *soft_iface);
+void hardif_free_rcu(struct rcu_head *rcu);
 
-static inline void hardif_free_ref(struct kref *refcount)
+static inline void hardif_free_ref(struct hard_iface *hard_iface)
 {
-       struct batman_if *batman_if;
-
-       batman_if = container_of(refcount, struct batman_if, refcount);
-       kfree(batman_if);
+       if (atomic_dec_and_test(&hard_iface->refcount))
+               call_rcu(&hard_iface->rcu, hardif_free_rcu);
 }
 
 #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
index 26e623eb9def53240a09383bf582ea2e98482726..c5213d8f2cca2f83120aebd1ada7e5d31b4ca2a4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash)
 {
        int i;
 
-       for (i = 0 ; i < hash->size; i++)
+       for (i = 0 ; i < hash->size; i++) {
                INIT_HLIST_HEAD(&hash->table[i]);
+               spin_lock_init(&hash->list_locks[i]);
+       }
 }
 
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash)
 {
+       kfree(hash->list_locks);
        kfree(hash->table);
        kfree(hash);
 }
@@ -43,20 +46,25 @@ struct hashtable_t *hash_new(int size)
 {
        struct hashtable_t *hash;
 
-       hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC);
-
+       hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC);
        if (!hash)
                return NULL;
 
-       hash->size = size;
        hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
+       if (!hash->table)
+               goto free_hash;
 
-       if (!hash->table) {
-               kfree(hash);
-               return NULL;
-       }
+       hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC);
+       if (!hash->list_locks)
+               goto free_table;
 
+       hash->size = size;
        hash_init(hash);
-
        return hash;
+
+free_table:
+       kfree(hash->table);
+free_hash:
+       kfree(hash);
+       return NULL;
 }
index 09216ade16f1a92a8f907af1192deafc902f03d9..434822b27473683bada12866c5ce3b61ece6711e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
  * compare 2 element datas for their keys,
  * return 0 if same and not 0 if not
  * same */
-typedef int (*hashdata_compare_cb)(void *, void *);
+typedef int (*hashdata_compare_cb)(struct hlist_node *, void *);
 
 /* the hashfunction, should return an index
  * based on the key in the data of the first
  * argument and the size the second */
 typedef int (*hashdata_choose_cb)(void *, int);
-typedef void (*hashdata_free_cb)(void *, void *);
-
-struct element_t {
-       void *data;             /* pointer to the data */
-       struct hlist_node hlist;        /* bucket list pointer */
-};
+typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
 
 struct hashtable_t {
-       struct hlist_head *table;   /* the hashtable itself, with the buckets */
+       struct hlist_head *table;   /* the hashtable itself with the buckets */
+       spinlock_t *list_locks;     /* spinlock for each hash list entry */
        int size;                   /* size of hashtable */
 };
 
 /* allocates and clears the hash */
 struct hashtable_t *hash_new(int size);
 
-/* remove element if you already found the element you want to delete and don't
- * need the overhead to find it again with hash_remove().  But usually, you
- * don't want to use this function, as it fiddles with hash-internals. */
-void *hash_remove_element(struct hashtable_t *hash, struct element_t *elem);
-
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash);
 
@@ -64,21 +55,22 @@ static inline void hash_delete(struct hashtable_t *hash,
                               hashdata_free_cb free_cb, void *arg)
 {
        struct hlist_head *head;
-       struct hlist_node *walk, *safe;
-       struct element_t *bucket;
+       struct hlist_node *node, *node_tmp;
+       spinlock_t *list_lock; /* spinlock to protect write access */
        int i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
 
-               hlist_for_each_safe(walk, safe, head) {
-                       bucket = hlist_entry(walk, struct element_t, hlist);
-                       if (free_cb)
-                               free_cb(bucket->data, arg);
+               spin_lock_bh(list_lock);
+               hlist_for_each_safe(node, node_tmp, head) {
+                       hlist_del_rcu(node);
 
-                       hlist_del(walk);
-                       kfree(bucket);
+                       if (free_cb)
+                               free_cb(node, arg);
                }
+               spin_unlock_bh(list_lock);
        }
 
        hash_destroy(hash);
@@ -87,35 +79,41 @@ static inline void hash_delete(struct hashtable_t *hash,
 /* adds data to the hashtable. returns 0 on success, -1 on error */
 static inline int hash_add(struct hashtable_t *hash,
                           hashdata_compare_cb compare,
-                          hashdata_choose_cb choose, void *data)
+                          hashdata_choose_cb choose,
+                          void *data, struct hlist_node *data_node)
 {
        int index;
        struct hlist_head *head;
-       struct hlist_node *walk, *safe;
-       struct element_t *bucket;
+       struct hlist_node *node;
+       spinlock_t *list_lock; /* spinlock to protect write access */
 
        if (!hash)
-               return -1;
+               goto err;
 
        index = choose(data, hash->size);
        head = &hash->table[index];
+       list_lock = &hash->list_locks[index];
+
+       rcu_read_lock();
+       __hlist_for_each_rcu(node, head) {
+               if (!compare(node, data))
+                       continue;
 
-       hlist_for_each_safe(walk, safe, head) {
-               bucket = hlist_entry(walk, struct element_t, hlist);
-               if (compare(bucket->data, data))
-                       return -1;
+               goto err_unlock;
        }
+       rcu_read_unlock();
 
        /* no duplicate found in list, add new element */
-       bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
-
-       if (!bucket)
-               return -1;
-
-       bucket->data = data;
-       hlist_add_head(&bucket->hlist, head);
+       spin_lock_bh(list_lock);
+       hlist_add_head_rcu(data_node, head);
+       spin_unlock_bh(list_lock);
 
        return 0;
+
+err_unlock:
+       rcu_read_unlock();
+err:
+       return -1;
 }
 
 /* removes data from hash, if found. returns pointer do data on success, so you
@@ -127,50 +125,25 @@ static inline void *hash_remove(struct hashtable_t *hash,
                                hashdata_choose_cb choose, void *data)
 {
        size_t index;
-       struct hlist_node *walk;
-       struct element_t *bucket;
+       struct hlist_node *node;
        struct hlist_head *head;
-       void *data_save;
+       void *data_save = NULL;
 
        index = choose(data, hash->size);
        head = &hash->table[index];
 
-       hlist_for_each_entry(bucket, walk, head, hlist) {
-               if (compare(bucket->data, data)) {
-                       data_save = bucket->data;
-                       hlist_del(walk);
-                       kfree(bucket);
-                       return data_save;
-               }
-       }
-
-       return NULL;
-}
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-static inline void *hash_find(struct hashtable_t *hash,
-                             hashdata_compare_cb compare,
-                             hashdata_choose_cb choose, void *keydata)
-{
-       int index;
-       struct hlist_head *head;
-       struct hlist_node *walk;
-       struct element_t *bucket;
-
-       if (!hash)
-               return NULL;
-
-       index = choose(keydata , hash->size);
-       head = &hash->table[index];
+       spin_lock_bh(&hash->list_locks[index]);
+       hlist_for_each(node, head) {
+               if (!compare(node, data))
+                       continue;
 
-       hlist_for_each(walk, head) {
-               bucket = hlist_entry(walk, struct element_t, hlist);
-               if (compare(bucket->data, keydata))
-                       return bucket->data;
+               data_save = node;
+               hlist_del_rcu(node);
+               break;
        }
+       spin_unlock_bh(&hash->list_locks[index]);
 
-       return NULL;
+       return data_save;
 }
 
 #endif /* _NET_BATMAN_ADV_HASH_H_ */
index ecf6d7ffab2ededc79c4a4d119163bad9662899c..34ce56c358e5ee750f13395eee1f395d0029979d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -24,7 +24,6 @@
 #include <linux/slab.h>
 #include "icmp_socket.h"
 #include "send.h"
-#include "types.h"
 #include "hash.h"
 #include "originator.h"
 #include "hard-interface.h"
@@ -157,10 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        struct sk_buff *skb;
        struct icmp_packet_rr *icmp_packet;
 
-       struct orig_node *orig_node;
-       struct batman_if *batman_if;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        size_t packet_len = sizeof(struct icmp_packet);
-       uint8_t dstaddr[ETH_ALEN];
 
        if (len < sizeof(struct icmp_packet)) {
                bat_dbg(DBG_BATMAN, bat_priv,
@@ -220,47 +218,52 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto dst_unreach;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  icmp_packet->dst));
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
 
        if (!orig_node)
                goto unlock;
 
-       if (!orig_node->router)
+       neigh_node = orig_node->router;
+
+       if (!neigh_node)
                goto unlock;
 
-       batman_if = orig_node->router->if_incoming;
-       memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
 
-       if (!batman_if)
+       if (!neigh_node->if_incoming)
                goto dst_unreach;
 
-       if (batman_if->if_status != IF_ACTIVE)
+       if (neigh_node->if_incoming->if_status != IF_ACTIVE)
                goto dst_unreach;
 
        memcpy(icmp_packet->orig,
               bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 
        if (packet_len == sizeof(struct icmp_packet_rr))
-               memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
-
-
-       send_skb_packet(skb, batman_if, dstaddr);
+               memcpy(icmp_packet->rr,
+                      neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
 
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
        goto out;
 
 unlock:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
 dst_unreach:
        icmp_packet->msg_type = DESTINATION_UNREACHABLE;
        bat_socket_add_packet(socket_client, icmp_packet, packet_len);
 free_skb:
        kfree_skb(skb);
 out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return len;
 }
 
index bf9b348cde2786cf4469105f2c1979fbf318e293..462b190fa1019bc795e0ce6b648e739d5c0b7b52 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
 #define _NET_BATMAN_ADV_ICMP_SOCKET_H_
 
-#include "types.h"
-
 #define ICMP_SOCKET "socket"
 
 void bat_socket_init(void);
index b827f6a158cb8df28c2f1dab8f97bff07a6f868c..709b33bbdf438fe9680d3eaa09547ef0e1d90c62 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #include "translation-table.h"
 #include "hard-interface.h"
 #include "gateway_client.h"
-#include "types.h"
 #include "vis.h"
 #include "hash.h"
 
-struct list_head if_list;
+struct list_head hardif_list;
 
 unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
@@ -42,7 +41,7 @@ struct workqueue_struct *bat_event_workqueue;
 
 static int __init batman_init(void)
 {
-       INIT_LIST_HEAD(&if_list);
+       INIT_LIST_HEAD(&hardif_list);
 
        /* the name should not be longer than 10 chars - see
         * http://lwn.net/Articles/23634/ */
@@ -80,7 +79,6 @@ int mesh_init(struct net_device *soft_iface)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
 
-       spin_lock_init(&bat_priv->orig_hash_lock);
        spin_lock_init(&bat_priv->forw_bat_list_lock);
        spin_lock_init(&bat_priv->forw_bcast_list_lock);
        spin_lock_init(&bat_priv->hna_lhash_lock);
@@ -155,14 +153,14 @@ void dec_module_count(void)
 
 int is_my_mac(uint8_t *addr)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->if_status != IF_ACTIVE)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->if_status != IF_ACTIVE)
                        continue;
 
-               if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
+               if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
                        rcu_read_unlock();
                        return 1;
                }
index 65106fb61b8fce83c442d27e1eb848ab8e92ef67..dc248697de71591c317cf7f7b032e743877a3d96 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,9 +22,6 @@
 #ifndef _NET_BATMAN_ADV_MAIN_H_
 #define _NET_BATMAN_ADV_MAIN_H_
 
-/* Kernel Programming */
-#define LINUX
-
 #define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
                      "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
 #define DRIVER_DESC   "B.A.T.M.A.N. advanced"
@@ -54,7 +51,6 @@
 
 #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
 
-#define PACKBUFF_SIZE 2000
 #define LOG_BUF_LEN 8192         /* has to be a power of 2 */
 
 #define VIS_INTERVAL 5000      /* 5 seconds */
 #define DBG_ROUTES 2   /* route or hna added / changed / deleted */
 #define DBG_ALL 3
 
-#define LOG_BUF_LEN 8192          /* has to be a power of 2 */
-
 
 /*
  *  Vis
  */
 
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
 /*
  * Kernel headers
  */
 #define REVISION_VERSION_STR " "REVISION_VERSION
 #endif
 
-extern struct list_head if_list;
+extern struct list_head hardif_list;
 
 extern unsigned char broadcast_addr[];
 extern struct workqueue_struct *bat_event_workqueue;
@@ -158,13 +150,6 @@ static inline void bat_dbg(char type __always_unused,
 }
 #endif
 
-#define bat_warning(net_dev, fmt, arg...)                              \
-       do {                                                            \
-               struct net_device *_netdev = (net_dev);                 \
-               struct bat_priv *_batpriv = netdev_priv(_netdev);       \
-               bat_dbg(DBG_ALL, _batpriv, fmt, ## arg);                \
-               pr_warning("%s: " fmt, _netdev->name, ## arg);          \
-       } while (0)
 #define bat_info(net_dev, fmt, arg...)                                 \
        do {                                                            \
                struct net_device *_netdev = (net_dev);                 \
@@ -180,4 +165,14 @@ static inline void bat_dbg(char type __always_unused,
                pr_err("%s: " fmt, _netdev->name, ## arg);              \
        } while (0)
 
+/**
+ * returns 1 if they are the same ethernet addr
+ *
+ * note: can't use compare_ether_addr() as it requires aligned memory
+ */
+static inline int compare_eth(void *data1, void *data2)
+{
+       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
index 6b7fb6b7e6f9b97e3a45b9fce87b7f6ce70faebd..0b9133022d2dd58f3f91d55940667ede6dab9e50 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -44,24 +44,36 @@ int originator_init(struct bat_priv *bat_priv)
        if (bat_priv->orig_hash)
                return 1;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        bat_priv->orig_hash = hash_new(1024);
 
        if (!bat_priv->orig_hash)
                goto err;
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
        start_purge_timer(bat_priv);
        return 1;
 
 err:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
        return 0;
 }
 
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
-               uint8_t *neigh, struct batman_if *if_incoming)
+static void neigh_node_free_rcu(struct rcu_head *rcu)
+{
+       struct neigh_node *neigh_node;
+
+       neigh_node = container_of(rcu, struct neigh_node, rcu);
+       kfree(neigh_node);
+}
+
+void neigh_node_free_ref(struct neigh_node *neigh_node)
+{
+       if (atomic_dec_and_test(&neigh_node->refcount))
+               call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
+}
+
+struct neigh_node *create_neighbor(struct orig_node *orig_node,
+                                  struct orig_node *orig_neigh_node,
+                                  uint8_t *neigh,
+                                  struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct neigh_node *neigh_node;
@@ -73,50 +85,94 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
        if (!neigh_node)
                return NULL;
 
-       INIT_LIST_HEAD(&neigh_node->list);
+       INIT_HLIST_NODE(&neigh_node->list);
+       INIT_LIST_HEAD(&neigh_node->bonding_list);
 
        memcpy(neigh_node->addr, neigh, ETH_ALEN);
        neigh_node->orig_node = orig_neigh_node;
        neigh_node->if_incoming = if_incoming;
 
-       list_add_tail(&neigh_node->list, &orig_node->neigh_list);
+       /* extra reference for return */
+       atomic_set(&neigh_node->refcount, 2);
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+       spin_unlock_bh(&orig_node->neigh_list_lock);
        return neigh_node;
 }
 
-static void free_orig_node(void *data, void *arg)
+static void orig_node_free_rcu(struct rcu_head *rcu)
 {
-       struct list_head *list_pos, *list_pos_tmp;
-       struct neigh_node *neigh_node;
-       struct orig_node *orig_node = (struct orig_node *)data;
-       struct bat_priv *bat_priv = (struct bat_priv *)arg;
+       struct hlist_node *node, *node_tmp;
+       struct neigh_node *neigh_node, *tmp_neigh_node;
+       struct orig_node *orig_node;
 
-       /* for all neighbors towards this originator ... */
-       list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
-               neigh_node = list_entry(list_pos, struct neigh_node, list);
+       orig_node = container_of(rcu, struct orig_node, rcu);
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+
+       /* for all bonding members ... */
+       list_for_each_entry_safe(neigh_node, tmp_neigh_node,
+                                &orig_node->bond_list, bonding_list) {
+               list_del_rcu(&neigh_node->bonding_list);
+               neigh_node_free_ref(neigh_node);
+       }
 
-               list_del(list_pos);
-               kfree(neigh_node);
+       /* for all neighbors towards this originator ... */
+       hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+                                 &orig_node->neigh_list, list) {
+               hlist_del_rcu(&neigh_node->list);
+               neigh_node_free_ref(neigh_node);
        }
 
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
        frag_list_free(&orig_node->frag_list);
-       hna_global_del_orig(bat_priv, orig_node, "originator timed out");
+       hna_global_del_orig(orig_node->bat_priv, orig_node,
+                           "originator timed out");
 
        kfree(orig_node->bcast_own);
        kfree(orig_node->bcast_own_sum);
        kfree(orig_node);
 }
 
+void orig_node_free_ref(struct orig_node *orig_node)
+{
+       if (atomic_dec_and_test(&orig_node->refcount))
+               call_rcu(&orig_node->rcu, orig_node_free_rcu);
+}
+
 void originator_free(struct bat_priv *bat_priv)
 {
-       if (!bat_priv->orig_hash)
+       struct hashtable_t *hash = bat_priv->orig_hash;
+       struct hlist_node *node, *node_tmp;
+       struct hlist_head *head;
+       spinlock_t *list_lock; /* spinlock to protect write access */
+       struct orig_node *orig_node;
+       int i;
+
+       if (!hash)
                return;
 
        cancel_delayed_work_sync(&bat_priv->orig_work);
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
        bat_priv->orig_hash = NULL;
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
+
+               spin_lock_bh(list_lock);
+               hlist_for_each_entry_safe(orig_node, node, node_tmp,
+                                         head, hash_entry) {
+
+                       hlist_del_rcu(node);
+                       orig_node_free_ref(orig_node);
+               }
+               spin_unlock_bh(list_lock);
+       }
+
+       hash_destroy(hash);
 }
 
 /* this function finds or creates an originator entry for the given
@@ -127,10 +183,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        int size;
        int hash_added;
 
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  addr));
-
+       orig_node = orig_hash_find(bat_priv, addr);
        if (orig_node)
                return orig_node;
 
@@ -141,8 +194,16 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        if (!orig_node)
                return NULL;
 
-       INIT_LIST_HEAD(&orig_node->neigh_list);
+       INIT_HLIST_HEAD(&orig_node->neigh_list);
+       INIT_LIST_HEAD(&orig_node->bond_list);
+       spin_lock_init(&orig_node->ogm_cnt_lock);
+       spin_lock_init(&orig_node->bcast_seqno_lock);
+       spin_lock_init(&orig_node->neigh_list_lock);
+
+       /* extra reference for return */
+       atomic_set(&orig_node->refcount, 2);
 
+       orig_node->bat_priv = bat_priv;
        memcpy(orig_node->orig, addr, ETH_ALEN);
        orig_node->router = NULL;
        orig_node->hna_buff = NULL;
@@ -151,6 +212,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        orig_node->batman_seqno_reset = jiffies - 1
                                        - msecs_to_jiffies(RESET_PROTECTION_MS);
 
+       atomic_set(&orig_node->bond_candidates, 0);
+
        size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
 
        orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
@@ -166,8 +229,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
        if (!orig_node->bcast_own_sum)
                goto free_bcast_own;
 
-       hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
-                             orig_node);
+       hash_added = hash_add(bat_priv->orig_hash, compare_orig,
+                             choose_orig, orig_node, &orig_node->hash_entry);
        if (hash_added < 0)
                goto free_bcast_own_sum;
 
@@ -185,23 +248,30 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
                                 struct orig_node *orig_node,
                                 struct neigh_node **best_neigh_node)
 {
-       struct list_head *list_pos, *list_pos_tmp;
+       struct hlist_node *node, *node_tmp;
        struct neigh_node *neigh_node;
        bool neigh_purged = false;
 
        *best_neigh_node = NULL;
 
+       spin_lock_bh(&orig_node->neigh_list_lock);
+
        /* for all neighbors towards this originator ... */
-       list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
-               neigh_node = list_entry(list_pos, struct neigh_node, list);
+       hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+                                 &orig_node->neigh_list, list) {
 
                if ((time_after(jiffies,
                        neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
                    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
+                   (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
                    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
 
-                       if (neigh_node->if_incoming->if_status ==
-                                                       IF_TO_BE_REMOVED)
+                       if ((neigh_node->if_incoming->if_status ==
+                                                               IF_INACTIVE) ||
+                           (neigh_node->if_incoming->if_status ==
+                                                       IF_NOT_IN_USE) ||
+                           (neigh_node->if_incoming->if_status ==
+                                                       IF_TO_BE_REMOVED))
                                bat_dbg(DBG_BATMAN, bat_priv,
                                        "neighbor purge: originator %pM, "
                                        "neighbor: %pM, iface: %s\n",
@@ -215,14 +285,18 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
                                        (neigh_node->last_valid / HZ));
 
                        neigh_purged = true;
-                       list_del(list_pos);
-                       kfree(neigh_node);
+
+                       hlist_del_rcu(&neigh_node->list);
+                       bonding_candidate_del(orig_node, neigh_node);
+                       neigh_node_free_ref(neigh_node);
                } else {
                        if ((!*best_neigh_node) ||
                            (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
                                *best_neigh_node = neigh_node;
                }
        }
+
+       spin_unlock_bh(&orig_node->neigh_list_lock);
        return neigh_purged;
 }
 
@@ -245,9 +319,6 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
                                      best_neigh_node,
                                      orig_node->hna_buff,
                                      orig_node->hna_buff_len);
-                       /* update bonding candidates, we could have lost
-                        * some candidates. */
-                       update_bonding_candidates(bat_priv, orig_node);
                }
        }
 
@@ -257,40 +328,38 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
 static void _purge_orig(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk, *safe;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
+       spinlock_t *list_lock; /* spinlock to protect write access */
        struct orig_node *orig_node;
        int i;
 
        if (!hash)
                return;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        /* for all origins... */
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
+               list_lock = &hash->list_locks[i];
 
-               hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
-                       orig_node = bucket->data;
-
+               spin_lock_bh(list_lock);
+               hlist_for_each_entry_safe(orig_node, node, node_tmp,
+                                         head, hash_entry) {
                        if (purge_orig_node(bat_priv, orig_node)) {
                                if (orig_node->gw_flags)
                                        gw_node_delete(bat_priv, orig_node);
-                               hlist_del(walk);
-                               kfree(bucket);
-                               free_orig_node(orig_node, bat_priv);
+                               hlist_del_rcu(node);
+                               orig_node_free_ref(orig_node);
+                               continue;
                        }
 
                        if (time_after(jiffies, orig_node->last_frag_packet +
                                                msecs_to_jiffies(FRAG_TIMEOUT)))
                                frag_list_free(&orig_node->frag_list);
                }
+               spin_unlock_bh(list_lock);
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-
        gw_node_purge(bat_priv);
        gw_election(bat_priv);
 
@@ -318,9 +387,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct neigh_node *neigh_node;
        int batman_count = 0;
@@ -348,14 +416,11 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
                   "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
                   "outgoingIF", "Potential nexthops");
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        if (!orig_node->router)
                                continue;
 
@@ -374,8 +439,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
                                   neigh_node->addr,
                                   neigh_node->if_incoming->net_dev->name);
 
-                       list_for_each_entry(neigh_node, &orig_node->neigh_list,
-                                           list) {
+                       hlist_for_each_entry_rcu(neigh_node, node_tmp,
+                                                &orig_node->neigh_list, list) {
                                seq_printf(seq, " %pM (%3i)", neigh_node->addr,
                                                neigh_node->tq_avg);
                        }
@@ -383,10 +448,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
                        seq_printf(seq, "\n");
                        batman_count++;
                }
+               rcu_read_unlock();
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-
        if ((batman_count == 0))
                seq_printf(seq, "No batman nodes in range ...\n");
 
@@ -423,36 +487,36 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
        return 0;
 }
 
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
+int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
-       int i;
+       int i, ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       ret = orig_node_add_if(orig_node, max_if_num);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
-                       if (orig_node_add_if(orig_node, max_if_num) == -1)
+                       if (ret == -1)
                                goto err;
                }
+               rcu_read_unlock();
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
        return 0;
 
 err:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
        return -ENOMEM;
 }
 
@@ -508,57 +572,55 @@ free_own_sum:
        return 0;
 }
 
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
+int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
-       struct batman_if *batman_if_tmp;
+       struct hard_iface *hard_iface_tmp;
        struct orig_node *orig_node;
        int i, ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
                        ret = orig_node_del_if(orig_node, max_if_num,
-                                       batman_if->if_num);
+                                       hard_iface->if_num);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
                        if (ret == -1)
                                goto err;
                }
+               rcu_read_unlock();
        }
 
        /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
-               if (batman_if_tmp->if_status == IF_NOT_IN_USE)
+       list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
+               if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
                        continue;
 
-               if (batman_if == batman_if_tmp)
+               if (hard_iface == hard_iface_tmp)
                        continue;
 
-               if (batman_if->soft_iface != batman_if_tmp->soft_iface)
+               if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
                        continue;
 
-               if (batman_if_tmp->if_num > batman_if->if_num)
-                       batman_if_tmp->if_num--;
+               if (hard_iface_tmp->if_num > hard_iface->if_num)
+                       hard_iface_tmp->if_num--;
        }
        rcu_read_unlock();
 
-       batman_if->if_num = -1;
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       hard_iface->if_num = -1;
        return 0;
 
 err:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       rcu_read_unlock();
        return -ENOMEM;
 }
index d474ceb2a4eb56193723943c8d7a09e1efa5107b..5cc011057da1a8f2f89ebb7782d9aa79cb6983b2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
 #define _NET_BATMAN_ADV_ORIGINATOR_H_
 
+#include "hash.h"
+
 int originator_init(struct bat_priv *bat_priv);
 void originator_free(struct bat_priv *bat_priv);
 void purge_orig_ref(struct bat_priv *bat_priv);
+void orig_node_free_ref(struct orig_node *orig_node);
 struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
-               uint8_t *neigh, struct batman_if *if_incoming);
+struct neigh_node *create_neighbor(struct orig_node *orig_node,
+                                  struct orig_node *orig_neigh_node,
+                                  uint8_t *neigh,
+                                  struct hard_iface *if_incoming);
+void neigh_node_free_ref(struct neigh_node *neigh_node);
 int orig_seq_print_text(struct seq_file *seq, void *offset);
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
+int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
+int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
 
 
 /* returns 1 if they are the same originator */
-static inline int compare_orig(void *data1, void *data2)
+static inline int compare_orig(struct hlist_node *node, void *data2)
 {
+       void *data1 = container_of(node, struct orig_node, hash_entry);
+
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
@@ -61,4 +68,35 @@ static inline int choose_orig(void *data, int32_t size)
        return hash % size;
 }
 
+static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
+                                              void *data)
+{
+       struct hashtable_t *hash = bat_priv->orig_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct orig_node *orig_node, *orig_node_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               if (!compare_eth(orig_node, data))
+                       continue;
+
+               if (!atomic_inc_not_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node_tmp = orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node_tmp;
+}
+
 #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
index 2284e8129cb26b8d5554538cd71b7722deea3f13..e7571879af3f28b9460145408223ed5a8bcd252d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -50,6 +50,7 @@
 
 /* fragmentation defines */
 #define UNI_FRAG_HEAD 0x01
+#define UNI_FRAG_LARGETAIL 0x02
 
 struct batman_packet {
        uint8_t  packet_type;
index defd37c9be1fe57e8ebca6330e41feef9a446f0b..5bb6a619afeebe2a10089c74ce51621a970368b4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 6b0cb9aaeba56edf3a282d34163309dff672c5dd..0395b27418645e91f9184157fd641f9916d6323c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 8828eddd3f72bc7c704c994bd20a15065ccd7af7..c172f5d0e05a20d54d2fb68c346833eb14645825 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -28,7 +28,6 @@
 #include "icmp_socket.h"
 #include "translation-table.h"
 #include "originator.h"
-#include "types.h"
 #include "ring_buffer.h"
 #include "vis.h"
 #include "aggregation.h"
 #include "gateway_client.h"
 #include "unicast.h"
 
-void slide_own_bcast_window(struct batman_if *batman_if)
+void slide_own_bcast_window(struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        unsigned long *word;
        int i;
        size_t word_index;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-                       word_index = batman_if->if_num * NUM_WORDS;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       word_index = hard_iface->if_num * NUM_WORDS;
                        word = &(orig_node->bcast_own[word_index]);
 
                        bit_get_packet(bat_priv, word, 1, 0);
-                       orig_node->bcast_own_sum[batman_if->if_num] =
+                       orig_node->bcast_own_sum[hard_iface->if_num] =
                                bit_packet_count(word);
+                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
                }
+               rcu_read_unlock();
        }
-
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
 }
 
 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
@@ -90,6 +87,8 @@ static void update_route(struct bat_priv *bat_priv,
                         struct neigh_node *neigh_node,
                         unsigned char *hna_buff, int hna_buff_len)
 {
+       struct neigh_node *neigh_node_tmp;
+
        /* route deleted */
        if ((orig_node->router) && (!neigh_node)) {
 
@@ -116,7 +115,12 @@ static void update_route(struct bat_priv *bat_priv,
                        orig_node->router->addr);
        }
 
+       if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
+               neigh_node = NULL;
+       neigh_node_tmp = orig_node->router;
        orig_node->router = neigh_node;
+       if (neigh_node_tmp)
+               neigh_node_free_ref(neigh_node_tmp);
 }
 
 
@@ -139,73 +143,93 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
 static int is_bidirectional_neigh(struct orig_node *orig_node,
                                struct orig_node *orig_neigh_node,
                                struct batman_packet *batman_packet,
-                               struct batman_if *if_incoming)
+                               struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-       struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+       struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
+       struct hlist_node *node;
        unsigned char total_count;
+       uint8_t orig_eq_count, neigh_rq_count, tq_own;
+       int tq_asym_penalty, ret = 0;
 
        if (orig_node == orig_neigh_node) {
-               list_for_each_entry(tmp_neigh_node,
-                                   &orig_node->neigh_list,
-                                   list) {
-
-                       if (compare_orig(tmp_neigh_node->addr,
-                                        orig_neigh_node->orig) &&
-                           (tmp_neigh_node->if_incoming == if_incoming))
-                               neigh_node = tmp_neigh_node;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                        &orig_node->neigh_list, list) {
+
+                       if (!compare_eth(tmp_neigh_node->addr,
+                                        orig_neigh_node->orig))
+                               continue;
+
+                       if (tmp_neigh_node->if_incoming != if_incoming)
+                               continue;
+
+                       if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+                               continue;
+
+                       neigh_node = tmp_neigh_node;
                }
+               rcu_read_unlock();
 
                if (!neigh_node)
                        neigh_node = create_neighbor(orig_node,
                                                     orig_neigh_node,
                                                     orig_neigh_node->orig,
                                                     if_incoming);
-               /* create_neighbor failed, return 0 */
                if (!neigh_node)
-                       return 0;
+                       goto out;
 
                neigh_node->last_valid = jiffies;
        } else {
                /* find packet count of corresponding one hop neighbor */
-               list_for_each_entry(tmp_neigh_node,
-                                   &orig_neigh_node->neigh_list, list) {
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                        &orig_neigh_node->neigh_list, list) {
 
-                       if (compare_orig(tmp_neigh_node->addr,
-                                        orig_neigh_node->orig) &&
-                           (tmp_neigh_node->if_incoming == if_incoming))
-                               neigh_node = tmp_neigh_node;
+                       if (!compare_eth(tmp_neigh_node->addr,
+                                        orig_neigh_node->orig))
+                               continue;
+
+                       if (tmp_neigh_node->if_incoming != if_incoming)
+                               continue;
+
+                       if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+                               continue;
+
+                       neigh_node = tmp_neigh_node;
                }
+               rcu_read_unlock();
 
                if (!neigh_node)
                        neigh_node = create_neighbor(orig_neigh_node,
                                                     orig_neigh_node,
                                                     orig_neigh_node->orig,
                                                     if_incoming);
-               /* create_neighbor failed, return 0 */
                if (!neigh_node)
-                       return 0;
+                       goto out;
        }
 
        orig_node->last_valid = jiffies;
 
+       spin_lock_bh(&orig_node->ogm_cnt_lock);
+       orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
+       neigh_rq_count = neigh_node->real_packet_count;
+       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+
        /* pay attention to not get a value bigger than 100 % */
-       total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
-                      neigh_node->real_packet_count ?
-                      neigh_node->real_packet_count :
-                      orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
+       total_count = (orig_eq_count > neigh_rq_count ?
+                      neigh_rq_count : orig_eq_count);
 
        /* if we have too few packets (too less data) we set tq_own to zero */
        /* if we receive too few packets it is not considered bidirectional */
        if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
-           (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
-               orig_neigh_node->tq_own = 0;
+           (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
+               tq_own = 0;
        else
                /* neigh_node->real_packet_count is never zero as we
                 * only purge old information when getting new
                 * information */
-               orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
-                       neigh_node->real_packet_count;
+               tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
 
        /*
         * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
@@ -213,20 +237,16 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
         * punishes asymmetric links more.  This will give a value
         * between 0 and TQ_MAX_VALUE
         */
-       orig_neigh_node->tq_asym_penalty =
-               TQ_MAX_VALUE -
-               (TQ_MAX_VALUE *
-                (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
-                (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
-                (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
-               (TQ_LOCAL_WINDOW_SIZE *
-                TQ_LOCAL_WINDOW_SIZE *
-                TQ_LOCAL_WINDOW_SIZE);
-
-       batman_packet->tq = ((batman_packet->tq *
-                             orig_neigh_node->tq_own *
-                             orig_neigh_node->tq_asym_penalty) /
-                            (TQ_MAX_VALUE * TQ_MAX_VALUE));
+       tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
+                               (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+                               (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+                               (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
+                                       (TQ_LOCAL_WINDOW_SIZE *
+                                        TQ_LOCAL_WINDOW_SIZE *
+                                        TQ_LOCAL_WINDOW_SIZE);
+
+       batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
+                                               (TQ_MAX_VALUE * TQ_MAX_VALUE));
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "bidirectional: "
@@ -234,34 +254,141 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
                "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
                "total tq: %3i\n",
                orig_node->orig, orig_neigh_node->orig, total_count,
-               neigh_node->real_packet_count, orig_neigh_node->tq_own,
-               orig_neigh_node->tq_asym_penalty, batman_packet->tq);
+               neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
 
        /* if link has the minimum required transmission quality
         * consider it bidirectional */
        if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
-               return 1;
+               ret = 1;
 
-       return 0;
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       return ret;
+}
+
+/* caller must hold the neigh_list_lock */
+void bonding_candidate_del(struct orig_node *orig_node,
+                          struct neigh_node *neigh_node)
+{
+       /* this neighbor is not part of our candidate list */
+       if (list_empty(&neigh_node->bonding_list))
+               goto out;
+
+       list_del_rcu(&neigh_node->bonding_list);
+       INIT_LIST_HEAD(&neigh_node->bonding_list);
+       neigh_node_free_ref(neigh_node);
+       atomic_dec(&orig_node->bond_candidates);
+
+out:
+       return;
+}
+
+static void bonding_candidate_add(struct orig_node *orig_node,
+                                 struct neigh_node *neigh_node)
+{
+       struct hlist_node *node;
+       struct neigh_node *tmp_neigh_node;
+       uint8_t best_tq, interference_candidate = 0;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+
+       /* only consider if it has the same primary address ...  */
+       if (!compare_eth(orig_node->orig,
+                        neigh_node->orig_node->primary_addr))
+               goto candidate_del;
+
+       if (!orig_node->router)
+               goto candidate_del;
+
+       best_tq = orig_node->router->tq_avg;
+
+       /* ... and is good enough to be considered */
+       if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
+               goto candidate_del;
+
+       /**
+        * check if we have another candidate with the same mac address or
+        * interface. If we do, we won't select this candidate because of
+        * possible interference.
+        */
+       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                &orig_node->neigh_list, list) {
+
+               if (tmp_neigh_node == neigh_node)
+                       continue;
+
+               /* we only care if the other candidate is even
+               * considered as candidate. */
+               if (list_empty(&tmp_neigh_node->bonding_list))
+                       continue;
+
+               if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
+                   (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
+                       interference_candidate = 1;
+                       break;
+               }
+       }
+
+       /* don't care further if it is an interference candidate */
+       if (interference_candidate)
+               goto candidate_del;
+
+       /* this neighbor already is part of our candidate list */
+       if (!list_empty(&neigh_node->bonding_list))
+               goto out;
+
+       if (!atomic_inc_not_zero(&neigh_node->refcount))
+               goto out;
+
+       list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
+       atomic_inc(&orig_node->bond_candidates);
+       goto out;
+
+candidate_del:
+       bonding_candidate_del(orig_node, neigh_node);
+
+out:
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+       return;
+}
+
+/* copy primary address for bonding */
+static void bonding_save_primary(struct orig_node *orig_node,
+                                struct orig_node *orig_neigh_node,
+                                struct batman_packet *batman_packet)
+{
+       if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
+               return;
+
+       memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
 }
 
 static void update_orig(struct bat_priv *bat_priv,
                        struct orig_node *orig_node,
                        struct ethhdr *ethhdr,
                        struct batman_packet *batman_packet,
-                       struct batman_if *if_incoming,
+                       struct hard_iface *if_incoming,
                        unsigned char *hna_buff, int hna_buff_len,
                        char is_duplicate)
 {
        struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+       struct orig_node *orig_node_tmp;
+       struct hlist_node *node;
        int tmp_hna_buff_len;
+       uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
 
        bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
                "Searching and updating originator entry of received packet\n");
 
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-               if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
-                   (tmp_neigh_node->if_incoming == if_incoming)) {
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                &orig_node->neigh_list, list) {
+               if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
+                   (tmp_neigh_node->if_incoming == if_incoming) &&
+                    atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+                       if (neigh_node)
+                               neigh_node_free_ref(neigh_node);
                        neigh_node = tmp_neigh_node;
                        continue;
                }
@@ -280,16 +407,20 @@ static void update_orig(struct bat_priv *bat_priv,
 
                orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
                if (!orig_tmp)
-                       return;
+                       goto unlock;
 
                neigh_node = create_neighbor(orig_node, orig_tmp,
                                             ethhdr->h_source, if_incoming);
+
+               orig_node_free_ref(orig_tmp);
                if (!neigh_node)
-                       return;
+                       goto unlock;
        } else
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Updating existing last-hop neighbor of originator\n");
 
+       rcu_read_unlock();
+
        orig_node->flags = batman_packet->flags;
        neigh_node->last_valid = jiffies;
 
@@ -303,6 +434,8 @@ static void update_orig(struct bat_priv *bat_priv,
                neigh_node->last_ttl = batman_packet->ttl;
        }
 
+       bonding_candidate_add(orig_node, neigh_node);
+
        tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
                            batman_packet->num_hna * ETH_ALEN : hna_buff_len);
 
@@ -319,10 +452,22 @@ static void update_orig(struct bat_priv *bat_priv,
        /* if the TQ is the same and the link not more symetric we
         * won't consider it either */
        if ((orig_node->router) &&
-            ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
-            (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
-             >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
-               goto update_hna;
+            (neigh_node->tq_avg == orig_node->router->tq_avg)) {
+               orig_node_tmp = orig_node->router->orig_node;
+               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               bcast_own_sum_orig =
+                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+               orig_node_tmp = neigh_node->orig_node;
+               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               bcast_own_sum_neigh =
+                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+               if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+                       goto update_hna;
+       }
 
        update_routes(bat_priv, orig_node, neigh_node,
                      hna_buff, tmp_hna_buff_len);
@@ -343,6 +488,14 @@ update_gw:
            (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
            (atomic_read(&bat_priv->gw_sel_class) > 2))
                gw_check_election(bat_priv, orig_node);
+
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
 }
 
 /* checks whether the host restarted and is in the protection time.
@@ -380,34 +533,38 @@ static int window_protected(struct bat_priv *bat_priv,
  */
 static char count_real_packets(struct ethhdr *ethhdr,
                               struct batman_packet *batman_packet,
-                              struct batman_if *if_incoming)
+                              struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct orig_node *orig_node;
        struct neigh_node *tmp_neigh_node;
+       struct hlist_node *node;
        char is_duplicate = 0;
        int32_t seq_diff;
        int need_update = 0;
-       int set_mark;
+       int set_mark, ret = -1;
 
        orig_node = get_orig_node(bat_priv, batman_packet->orig);
        if (!orig_node)
                return 0;
 
+       spin_lock_bh(&orig_node->ogm_cnt_lock);
        seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
 
        /* signalize caller that the packet is to be dropped. */
        if (window_protected(bat_priv, seq_diff,
                             &orig_node->batman_seqno_reset))
-               return -1;
+               goto out;
 
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+                                &orig_node->neigh_list, list) {
 
                is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
                                               orig_node->last_real_seqno,
                                               batman_packet->seqno);
 
-               if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
+               if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
                    (tmp_neigh_node->if_incoming == if_incoming))
                        set_mark = 1;
                else
@@ -421,6 +578,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
                tmp_neigh_node->real_packet_count =
                        bit_packet_count(tmp_neigh_node->real_bits);
        }
+       rcu_read_unlock();
 
        if (need_update) {
                bat_dbg(DBG_BATMAN, bat_priv,
@@ -429,123 +587,21 @@ static char count_real_packets(struct ethhdr *ethhdr,
                orig_node->last_real_seqno = batman_packet->seqno;
        }
 
-       return is_duplicate;
-}
-
-/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
-                                struct orig_node *orig_node,
-                                struct orig_node *orig_neigh_node,
-                                struct batman_packet *batman_packet)
+       ret = is_duplicate;
 
-{
-       if (batman_packet->flags & PRIMARIES_FIRST_HOP)
-               memcpy(orig_neigh_node->primary_addr,
-                      orig_node->orig, ETH_ALEN);
-
-       return;
-}
-
-/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
-                              struct orig_node *orig_node)
-{
-       int candidates;
-       int interference_candidate;
-       int best_tq;
-       struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
-       struct neigh_node *first_candidate, *last_candidate;
-
-       /* update the candidates for this originator */
-       if (!orig_node->router) {
-               orig_node->bond.candidates = 0;
-               return;
-       }
-
-       best_tq = orig_node->router->tq_avg;
-
-       /* update bond.candidates */
-
-       candidates = 0;
-
-       /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
-        * as "bonding partner" */
-
-       /* first, zero the list */
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-               tmp_neigh_node->next_bond_candidate = NULL;
-       }
-
-       first_candidate = NULL;
-       last_candidate = NULL;
-       list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-
-               /* only consider if it has the same primary address ...  */
-               if (memcmp(orig_node->orig,
-                               tmp_neigh_node->orig_node->primary_addr,
-                               ETH_ALEN) != 0)
-                       continue;
-
-               /* ... and is good enough to be considered */
-               if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
-                       continue;
-
-               /* check if we have another candidate with the same
-                * mac address or interface. If we do, we won't
-                * select this candidate because of possible interference. */
-
-               interference_candidate = 0;
-               list_for_each_entry(tmp_neigh_node2,
-                               &orig_node->neigh_list, list) {
-
-                       if (tmp_neigh_node2 == tmp_neigh_node)
-                               continue;
-
-                       /* we only care if the other candidate is even
-                        * considered as candidate. */
-                       if (!tmp_neigh_node2->next_bond_candidate)
-                               continue;
-
-
-                       if ((tmp_neigh_node->if_incoming ==
-                               tmp_neigh_node2->if_incoming)
-                               || (memcmp(tmp_neigh_node->addr,
-                               tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
-
-                               interference_candidate = 1;
-                               break;
-                       }
-               }
-               /* don't care further if it is an interference candidate */
-               if (interference_candidate)
-                       continue;
-
-               if (!first_candidate) {
-                       first_candidate = tmp_neigh_node;
-                       tmp_neigh_node->next_bond_candidate = first_candidate;
-               } else
-                       tmp_neigh_node->next_bond_candidate = last_candidate;
-
-               last_candidate = tmp_neigh_node;
-
-               candidates++;
-       }
-
-       if (candidates > 0) {
-               first_candidate->next_bond_candidate = last_candidate;
-               orig_node->bond.selected = first_candidate;
-       }
-
-       orig_node->bond.candidates = candidates;
+out:
+       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+       orig_node_free_ref(orig_node);
+       return ret;
 }
 
 void receive_bat_packet(struct ethhdr *ethhdr,
-                               struct batman_packet *batman_packet,
-                               unsigned char *hna_buff, int hna_buff_len,
-                               struct batman_if *if_incoming)
+                       struct batman_packet *batman_packet,
+                       unsigned char *hna_buff, int hna_buff_len,
+                       struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        struct orig_node *orig_neigh_node, *orig_node;
        char has_directlink_flag;
        char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
@@ -573,8 +629,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
 
        has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 
-       is_single_hop_neigh = (compare_orig(ethhdr->h_source,
-                                           batman_packet->orig) ? 1 : 0);
+       is_single_hop_neigh = (compare_eth(ethhdr->h_source,
+                                          batman_packet->orig) ? 1 : 0);
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
@@ -587,26 +643,26 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                has_directlink_flag);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->if_status != IF_ACTIVE)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->if_status != IF_ACTIVE)
                        continue;
 
-               if (batman_if->soft_iface != if_incoming->soft_iface)
+               if (hard_iface->soft_iface != if_incoming->soft_iface)
                        continue;
 
-               if (compare_orig(ethhdr->h_source,
-                                batman_if->net_dev->dev_addr))
+               if (compare_eth(ethhdr->h_source,
+                               hard_iface->net_dev->dev_addr))
                        is_my_addr = 1;
 
-               if (compare_orig(batman_packet->orig,
-                                batman_if->net_dev->dev_addr))
+               if (compare_eth(batman_packet->orig,
+                               hard_iface->net_dev->dev_addr))
                        is_my_orig = 1;
 
-               if (compare_orig(batman_packet->prev_sender,
-                                batman_if->net_dev->dev_addr))
+               if (compare_eth(batman_packet->prev_sender,
+                               hard_iface->net_dev->dev_addr))
                        is_my_oldorig = 1;
 
-               if (compare_orig(ethhdr->h_source, broadcast_addr))
+               if (compare_eth(ethhdr->h_source, broadcast_addr))
                        is_broadcast = 1;
        }
        rcu_read_unlock();
@@ -638,7 +694,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                int offset;
 
                orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
-
                if (!orig_neigh_node)
                        return;
 
@@ -647,18 +702,22 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                /* if received seqno equals last send seqno save new
                 * seqno for bidirectional check */
                if (has_directlink_flag &&
-                   compare_orig(if_incoming->net_dev->dev_addr,
-                                batman_packet->orig) &&
+                   compare_eth(if_incoming->net_dev->dev_addr,
+                               batman_packet->orig) &&
                    (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
                        offset = if_incoming->if_num * NUM_WORDS;
+
+                       spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
                        word = &(orig_neigh_node->bcast_own[offset]);
                        bit_mark(word, 0);
                        orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
                                bit_packet_count(word);
+                       spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
                }
 
                bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
                        "originator packet from myself (via neighbor)\n");
+               orig_node_free_ref(orig_neigh_node);
                return;
        }
 
@@ -679,27 +738,27 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: packet within seqno protection time "
                        "(sender: %pM)\n", ethhdr->h_source);
-               return;
+               goto out;
        }
 
        if (batman_packet->tq == 0) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: originator packet with tq equal 0\n");
-               return;
+               goto out;
        }
 
        /* avoid temporary routing loops */
        if ((orig_node->router) &&
            (orig_node->router->orig_node->router) &&
-           (compare_orig(orig_node->router->addr,
-                         batman_packet->prev_sender)) &&
-           !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
-           (compare_orig(orig_node->router->addr,
-                         orig_node->router->orig_node->router->addr))) {
+           (compare_eth(orig_node->router->addr,
+                        batman_packet->prev_sender)) &&
+           !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
+           (compare_eth(orig_node->router->addr,
+                        orig_node->router->orig_node->router->addr))) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: ignoring all rebroadcast packets that "
                        "may make me loop (sender: %pM)\n", ethhdr->h_source);
-               return;
+               goto out;
        }
 
        /* if sender is a direct neighbor the sender mac equals
@@ -708,19 +767,21 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                           orig_node :
                           get_orig_node(bat_priv, ethhdr->h_source));
        if (!orig_neigh_node)
-               return;
+               goto out;
 
        /* drop packet if sender is not a direct neighbor and if we
         * don't route towards it */
        if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: OGM via unknown neighbor!\n");
-               return;
+               goto out_neigh;
        }
 
        is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
                                                batman_packet, if_incoming);
 
+       bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
+
        /* update ranking if it is not a duplicate or has the same
         * seqno and similar ttl as the non-duplicate */
        if (is_bidirectional &&
@@ -730,10 +791,6 @@ void receive_bat_packet(struct ethhdr *ethhdr,
                update_orig(bat_priv, orig_node, ethhdr, batman_packet,
                            if_incoming, hna_buff, hna_buff_len, is_duplicate);
 
-       mark_bonding_address(bat_priv, orig_node,
-                            orig_neigh_node, batman_packet);
-       update_bonding_candidates(bat_priv, orig_node);
-
        /* is single hop (direct) neighbor */
        if (is_single_hop_neigh) {
 
@@ -743,31 +800,36 @@ void receive_bat_packet(struct ethhdr *ethhdr,
 
                bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
                        "rebroadcast neighbor packet with direct link flag\n");
-               return;
+               goto out_neigh;
        }
 
        /* multihop originator */
        if (!is_bidirectional) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: not received via bidirectional link\n");
-               return;
+               goto out_neigh;
        }
 
        if (is_duplicate) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "Drop packet: duplicate packet received\n");
-               return;
+               goto out_neigh;
        }
 
        bat_dbg(DBG_BATMAN, bat_priv,
                "Forwarding packet: rebroadcast originator packet\n");
        schedule_forward_packet(orig_node, ethhdr, batman_packet,
                                0, hna_buff_len, if_incoming);
+
+out_neigh:
+       if ((orig_neigh_node) && (!is_single_hop_neigh))
+               orig_node_free_ref(orig_neigh_node);
+out:
+       orig_node_free_ref(orig_node);
 }
 
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
+int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
        struct ethhdr *ethhdr;
 
        /* drop packet if it has not necessary minimum size */
@@ -794,12 +856,10 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        receive_aggr_bat_packet(ethhdr,
                                skb->data,
                                skb_headlen(skb),
-                               batman_if);
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+                               hard_iface);
 
        kfree_skb(skb);
        return NET_RX_SUCCESS;
@@ -808,135 +868,144 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
                               struct sk_buff *skb, size_t icmp_len)
 {
-       struct orig_node *orig_node;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        struct icmp_packet_rr *icmp_packet;
-       struct ethhdr *ethhdr;
-       struct batman_if *batman_if;
-       int ret;
-       uint8_t dstaddr[ETH_ALEN];
+       int ret = NET_RX_DROP;
 
        icmp_packet = (struct icmp_packet_rr *)skb->data;
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* add data to device queue */
        if (icmp_packet->msg_type != ECHO_REQUEST) {
                bat_socket_receive_packet(icmp_packet, icmp_len);
-               return NET_RX_DROP;
+               goto out;
        }
 
        if (!bat_priv->primary_if)
-               return NET_RX_DROP;
+               goto out;
 
        /* answer echo request (ping) */
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  icmp_packet->orig));
-       ret = NET_RX_DROP;
-
-       if ((orig_node) && (orig_node->router)) {
-
-               /* don't lock while sending the packets ... we therefore
-                * copy the required data before sending */
-               batman_if = orig_node->router->if_incoming;
-               memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-
-               /* create a copy of the skb, if needed, to modify it. */
-               if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-                       return NET_RX_DROP;
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
 
-               icmp_packet = (struct icmp_packet_rr *)skb->data;
-               ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       if (!orig_node)
+               goto unlock;
 
-               memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig,
-                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-               icmp_packet->msg_type = ECHO_REPLY;
-               icmp_packet->ttl = TTL;
+       neigh_node = orig_node->router;
 
-               send_skb_packet(skb, batman_if, dstaddr);
-               ret = NET_RX_SUCCESS;
+       if (!neigh_node)
+               goto unlock;
 
-       } else
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
+
+       rcu_read_unlock();
+
+       /* create a copy of the skb, if needed, to modify it. */
+       if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+               goto out;
+
+       icmp_packet = (struct icmp_packet_rr *)skb->data;
+
+       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+       memcpy(icmp_packet->orig,
+               bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+       icmp_packet->msg_type = ECHO_REPLY;
+       icmp_packet->ttl = TTL;
 
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
-                                 struct sk_buff *skb, size_t icmp_len)
+                                 struct sk_buff *skb)
 {
-       struct orig_node *orig_node;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        struct icmp_packet *icmp_packet;
-       struct ethhdr *ethhdr;
-       struct batman_if *batman_if;
-       int ret;
-       uint8_t dstaddr[ETH_ALEN];
+       int ret = NET_RX_DROP;
 
        icmp_packet = (struct icmp_packet *)skb->data;
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* send TTL exceeded if packet is an echo request (traceroute) */
        if (icmp_packet->msg_type != ECHO_REQUEST) {
                pr_debug("Warning - can't forward icmp packet from %pM to "
                         "%pM: ttl exceeded\n", icmp_packet->orig,
                         icmp_packet->dst);
-               return NET_RX_DROP;
+               goto out;
        }
 
        if (!bat_priv->primary_if)
-               return NET_RX_DROP;
+               goto out;
 
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              icmp_packet->orig));
-       ret = NET_RX_DROP;
-
-       if ((orig_node) && (orig_node->router)) {
-
-               /* don't lock while sending the packets ... we therefore
-                * copy the required data before sending */
-               batman_if = orig_node->router->if_incoming;
-               memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-
-               /* create a copy of the skb, if needed, to modify it. */
-               if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-                       return NET_RX_DROP;
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
 
-               icmp_packet = (struct icmp_packet *) skb->data;
-               ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       if (!orig_node)
+               goto unlock;
 
-               memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig,
-                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-               icmp_packet->msg_type = TTL_EXCEEDED;
-               icmp_packet->ttl = TTL;
+       neigh_node = orig_node->router;
 
-               send_skb_packet(skb, batman_if, dstaddr);
-               ret = NET_RX_SUCCESS;
+       if (!neigh_node)
+               goto unlock;
 
-       } else
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
 
+       rcu_read_unlock();
+
+       /* create a copy of the skb, if needed, to modify it. */
+       if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+               goto out;
+
+       icmp_packet = (struct icmp_packet *)skb->data;
+
+       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+       memcpy(icmp_packet->orig,
+               bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+       icmp_packet->msg_type = TTL_EXCEEDED;
+       icmp_packet->ttl = TTL;
+
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct icmp_packet_rr *icmp_packet;
        struct ethhdr *ethhdr;
-       struct orig_node *orig_node;
-       struct batman_if *batman_if;
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        int hdr_size = sizeof(struct icmp_packet);
-       int ret;
-       uint8_t dstaddr[ETH_ALEN];
+       int ret = NET_RX_DROP;
 
        /**
         * we truncate all incoming icmp packets if they don't match our size
@@ -946,21 +1015,21 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
 
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return NET_RX_DROP;
+               goto out;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
-               return NET_RX_DROP;
+               goto out;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
-               return NET_RX_DROP;
+               goto out;
 
        /* not for me */
        if (!is_my_mac(ethhdr->h_dest))
-               return NET_RX_DROP;
+               goto out;
 
        icmp_packet = (struct icmp_packet_rr *)skb->data;
 
@@ -978,53 +1047,61 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
 
        /* TTL exceeded */
        if (icmp_packet->ttl < 2)
-               return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
-
-       ret = NET_RX_DROP;
+               return recv_icmp_ttl_exceeded(bat_priv, skb);
 
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              icmp_packet->dst));
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
 
-       if ((orig_node) && (orig_node->router)) {
+       if (!orig_node)
+               goto unlock;
 
-               /* don't lock while sending the packets ... we therefore
-                * copy the required data before sending */
-               batman_if = orig_node->router->if_incoming;
-               memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       neigh_node = orig_node->router;
 
-               /* create a copy of the skb, if needed, to modify it. */
-               if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-                       return NET_RX_DROP;
+       if (!neigh_node)
+               goto unlock;
 
-               icmp_packet = (struct icmp_packet_rr *)skb->data;
-               ethhdr = (struct ethhdr *)skb_mac_header(skb);
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
 
-               /* decrement ttl */
-               icmp_packet->ttl--;
+       rcu_read_unlock();
 
-               /* route it */
-               send_skb_packet(skb, batman_if, dstaddr);
-               ret = NET_RX_SUCCESS;
+       /* create a copy of the skb, if needed, to modify it. */
+       if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+               goto out;
 
-       } else
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
+       icmp_packet = (struct icmp_packet_rr *)skb->data;
+
+       /* decrement ttl */
+       icmp_packet->ttl--;
 
+       /* route it */
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
+
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 /* find a suitable router for this originator, and use
- * bonding if possible. */
+ * bonding if possible. increases the found neighbors
+ * refcount.*/
 struct neigh_node *find_router(struct bat_priv *bat_priv,
                               struct orig_node *orig_node,
-                              struct batman_if *recv_if)
+                              struct hard_iface *recv_if)
 {
        struct orig_node *primary_orig_node;
        struct orig_node *router_orig;
-       struct neigh_node *router, *first_candidate, *best_router;
+       struct neigh_node *router, *first_candidate, *tmp_neigh_node;
        static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
        int bonding_enabled;
 
@@ -1036,78 +1113,128 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
 
        /* without bonding, the first node should
         * always choose the default router. */
-
        bonding_enabled = atomic_read(&bat_priv->bonding);
 
-       if ((!recv_if) && (!bonding_enabled))
-               return orig_node->router;
-
+       rcu_read_lock();
+       /* select default router to output */
+       router = orig_node->router;
        router_orig = orig_node->router->orig_node;
+       if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
+               rcu_read_unlock();
+               return NULL;
+       }
+
+       if ((!recv_if) && (!bonding_enabled))
+               goto return_router;
 
        /* if we have something in the primary_addr, we can search
         * for a potential bonding candidate. */
-       if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
-               return orig_node->router;
+       if (compare_eth(router_orig->primary_addr, zero_mac))
+               goto return_router;
 
        /* find the orig_node which has the primary interface. might
         * even be the same as our router_orig in many cases */
 
-       if (memcmp(router_orig->primary_addr,
-                               router_orig->orig, ETH_ALEN) == 0) {
+       if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
                primary_orig_node = router_orig;
        } else {
-               primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
-                                              choose_orig,
-                                              router_orig->primary_addr);
-
+               primary_orig_node = orig_hash_find(bat_priv,
+                                                  router_orig->primary_addr);
                if (!primary_orig_node)
-                       return orig_node->router;
+                       goto return_router;
+
+               orig_node_free_ref(primary_orig_node);
        }
 
        /* with less than 2 candidates, we can't do any
         * bonding and prefer the original router. */
-
-       if (primary_orig_node->bond.candidates < 2)
-               return orig_node->router;
+       if (atomic_read(&primary_orig_node->bond_candidates) < 2)
+               goto return_router;
 
 
        /* all nodes between should choose a candidate which
         * is is not on the interface where the packet came
         * in. */
-       first_candidate = primary_orig_node->bond.selected;
-       router = first_candidate;
+
+       neigh_node_free_ref(router);
+       first_candidate = NULL;
+       router = NULL;
 
        if (bonding_enabled) {
                /* in the bonding case, send the packets in a round
                 * robin fashion over the remaining interfaces. */
-               do {
+
+               list_for_each_entry_rcu(tmp_neigh_node,
+                               &primary_orig_node->bond_list, bonding_list) {
+                       if (!first_candidate)
+                               first_candidate = tmp_neigh_node;
                        /* recv_if == NULL on the first node. */
-                       if (router->if_incoming != recv_if)
+                       if (tmp_neigh_node->if_incoming != recv_if &&
+                           atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+                               router = tmp_neigh_node;
                                break;
+                       }
+               }
+
+               /* use the first candidate if nothing was found. */
+               if (!router && first_candidate &&
+                   atomic_inc_not_zero(&first_candidate->refcount))
+                       router = first_candidate;
 
-                       router = router->next_bond_candidate;
-               } while (router != first_candidate);
+               if (!router) {
+                       rcu_read_unlock();
+                       return NULL;
+               }
 
-               primary_orig_node->bond.selected = router->next_bond_candidate;
+               /* selected should point to the next element
+                * after the current router */
+               spin_lock_bh(&primary_orig_node->neigh_list_lock);
+               /* this is a list_move(), which unfortunately
+                * does not exist as rcu version */
+               list_del_rcu(&primary_orig_node->bond_list);
+               list_add_rcu(&primary_orig_node->bond_list,
+                               &router->bonding_list);
+               spin_unlock_bh(&primary_orig_node->neigh_list_lock);
 
        } else {
                /* if bonding is disabled, use the best of the
                 * remaining candidates which are not using
                 * this interface. */
-               best_router = first_candidate;
+               list_for_each_entry_rcu(tmp_neigh_node,
+                       &primary_orig_node->bond_list, bonding_list) {
+                       if (!first_candidate)
+                               first_candidate = tmp_neigh_node;
 
-               do {
                        /* recv_if == NULL on the first node. */
-                       if ((router->if_incoming != recv_if) &&
-                               (router->tq_avg > best_router->tq_avg))
-                                       best_router = router;
+                       if (tmp_neigh_node->if_incoming == recv_if)
+                               continue;
 
-                       router = router->next_bond_candidate;
-               } while (router != first_candidate);
+                       if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+                               continue;
 
-               router = best_router;
-       }
+                       /* if we don't have a router yet
+                        * or this one is better, choose it. */
+                       if ((!router) ||
+                           (tmp_neigh_node->tq_avg > router->tq_avg)) {
+                               /* decrement refcount of
+                                * previously selected router */
+                               if (router)
+                                       neigh_node_free_ref(router);
+
+                               router = tmp_neigh_node;
+                               atomic_inc_not_zero(&router->refcount);
+                       }
+
+                       neigh_node_free_ref(tmp_neigh_node);
+               }
 
+               /* use the first candidate if nothing was found. */
+               if (!router && first_candidate &&
+                   atomic_inc_not_zero(&first_candidate->refcount))
+                       router = first_candidate;
+       }
+return_router:
+       rcu_read_unlock();
        return router;
 }
 
@@ -1136,17 +1263,14 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
        return 0;
 }
 
-int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
-                        int hdr_size)
+int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct orig_node *orig_node;
-       struct neigh_node *router;
-       struct batman_if *batman_if;
-       uint8_t dstaddr[ETH_ALEN];
+       struct orig_node *orig_node = NULL;
+       struct neigh_node *neigh_node = NULL;
        struct unicast_packet *unicast_packet;
        struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
-       int ret;
+       int ret = NET_RX_DROP;
        struct sk_buff *new_skb;
 
        unicast_packet = (struct unicast_packet *)skb->data;
@@ -1156,53 +1280,51 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
                pr_debug("Warning - can't forward unicast packet from %pM to "
                         "%pM: ttl exceeded\n", ethhdr->h_source,
                         unicast_packet->dest);
-               return NET_RX_DROP;
+               goto out;
        }
 
        /* get routing information */
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              unicast_packet->dest));
-
-       router = find_router(bat_priv, orig_node, recv_if);
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
 
-       if (!router) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+       if (!orig_node)
+               goto unlock;
 
-       /* don't lock while sending the packets ... we therefore
-        * copy the required data before sending */
+       rcu_read_unlock();
 
-       batman_if = router->if_incoming;
-       memcpy(dstaddr, router->addr, ETH_ALEN);
+       /* find_router() increases neigh_nodes refcount if found. */
+       neigh_node = find_router(bat_priv, orig_node, recv_if);
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!neigh_node)
+               goto out;
 
        /* create a copy of the skb, if needed, to modify it. */
        if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-               return NET_RX_DROP;
+               goto out;
 
        unicast_packet = (struct unicast_packet *)skb->data;
 
        if (unicast_packet->packet_type == BAT_UNICAST &&
            atomic_read(&bat_priv->fragmentation) &&
-           skb->len > batman_if->net_dev->mtu)
-               return frag_send_skb(skb, bat_priv, batman_if,
-                                    dstaddr);
+           skb->len > neigh_node->if_incoming->net_dev->mtu) {
+               ret = frag_send_skb(skb, bat_priv,
+                                   neigh_node->if_incoming, neigh_node->addr);
+               goto out;
+       }
 
        if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
-           2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
+           frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
 
                ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
 
                if (ret == NET_RX_DROP)
-                       return NET_RX_DROP;
+                       goto out;
 
                /* packet was buffered for late merge */
-               if (!new_skb)
-                       return NET_RX_SUCCESS;
+               if (!new_skb) {
+                       ret = NET_RX_SUCCESS;
+                       goto out;
+               }
 
                skb = new_skb;
                unicast_packet = (struct unicast_packet *)skb->data;
@@ -1212,12 +1334,21 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
        unicast_packet->ttl--;
 
        /* route it */
-       send_skb_packet(skb, batman_if, dstaddr);
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = NET_RX_SUCCESS;
+       goto out;
 
-       return NET_RX_SUCCESS;
+unlock:
+       rcu_read_unlock();
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       return ret;
 }
 
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct unicast_packet *unicast_packet;
        int hdr_size = sizeof(struct unicast_packet);
@@ -1233,10 +1364,10 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
                return NET_RX_SUCCESS;
        }
 
-       return route_unicast_packet(skb, recv_if, hdr_size);
+       return route_unicast_packet(skb, recv_if);
 }
 
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct unicast_frag_packet *unicast_packet;
@@ -1266,89 +1397,96 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
                return NET_RX_SUCCESS;
        }
 
-       return route_unicast_packet(skb, recv_if, hdr_size);
+       return route_unicast_packet(skb, recv_if);
 }
 
 
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct orig_node *orig_node;
+       struct orig_node *orig_node = NULL;
        struct bcast_packet *bcast_packet;
        struct ethhdr *ethhdr;
        int hdr_size = sizeof(struct bcast_packet);
+       int ret = NET_RX_DROP;
        int32_t seq_diff;
 
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return NET_RX_DROP;
+               goto out;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with broadcast indication but unicast recipient */
        if (!is_broadcast_ether_addr(ethhdr->h_dest))
-               return NET_RX_DROP;
+               goto out;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
-               return NET_RX_DROP;
+               goto out;
 
        /* ignore broadcasts sent by myself */
        if (is_my_mac(ethhdr->h_source))
-               return NET_RX_DROP;
+               goto out;
 
        bcast_packet = (struct bcast_packet *)skb->data;
 
        /* ignore broadcasts originated by myself */
        if (is_my_mac(bcast_packet->orig))
-               return NET_RX_DROP;
+               goto out;
 
        if (bcast_packet->ttl < 2)
-               return NET_RX_DROP;
+               goto out;
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                    hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                              bcast_packet->orig));
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
 
-       if (!orig_node) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+       if (!orig_node)
+               goto rcu_unlock;
+
+       rcu_read_unlock();
+
+       spin_lock_bh(&orig_node->bcast_seqno_lock);
 
        /* check whether the packet is a duplicate */
-       if (get_bit_status(orig_node->bcast_bits,
-                          orig_node->last_bcast_seqno,
-                          ntohl(bcast_packet->seqno))) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+       if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+                          ntohl(bcast_packet->seqno)))
+               goto spin_unlock;
 
        seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
 
        /* check whether the packet is old and the host just restarted. */
        if (window_protected(bat_priv, seq_diff,
-                            &orig_node->bcast_seqno_reset)) {
-               spin_unlock_bh(&bat_priv->orig_hash_lock);
-               return NET_RX_DROP;
-       }
+                            &orig_node->bcast_seqno_reset))
+               goto spin_unlock;
 
        /* mark broadcast in flood history, update window position
         * if required. */
        if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
                orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       spin_unlock_bh(&orig_node->bcast_seqno_lock);
+
        /* rebroadcast packet */
        add_bcast_packet_to_list(bat_priv, skb);
 
        /* broadcast for me */
        interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+       ret = NET_RX_SUCCESS;
+       goto out;
 
-       return NET_RX_SUCCESS;
+rcu_unlock:
+       rcu_read_unlock();
+       goto out;
+spin_unlock:
+       spin_unlock_bh(&orig_node->bcast_seqno_lock);
+out:
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       return ret;
 }
 
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
+int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct vis_packet *vis_packet;
        struct ethhdr *ethhdr;
index f108f230bfdbb88476a55a8278b8001f725f0620..b5a064c88a4f132025faa45c0b5ef17ad32b001a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #ifndef _NET_BATMAN_ADV_ROUTING_H_
 #define _NET_BATMAN_ADV_ROUTING_H_
 
-#include "types.h"
-
-void slide_own_bcast_window(struct batman_if *batman_if);
+void slide_own_bcast_window(struct hard_iface *hard_iface);
 void receive_bat_packet(struct ethhdr *ethhdr,
                                struct batman_packet *batman_packet,
                                unsigned char *hna_buff, int hna_buff_len,
-                               struct batman_if *if_incoming);
+                               struct hard_iface *if_incoming);
 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
                   struct neigh_node *neigh_node, unsigned char *hna_buff,
                   int hna_buff_len);
-int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
-                        int hdr_size);
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
+int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 struct neigh_node *find_router(struct bat_priv *bat_priv,
-               struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
-                              struct orig_node *orig_node);
+                              struct orig_node *orig_node,
+                              struct hard_iface *recv_if);
+void bonding_candidate_del(struct orig_node *orig_node,
+                          struct neigh_node *neigh_node);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
index b89b9f7709ae7d30f6929a405d2e74be82856343..d49e54d932af7e302fd8f862bfccfb0727708386 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -25,7 +25,6 @@
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
-#include "types.h"
 #include "vis.h"
 #include "aggregation.h"
 #include "gateway_common.h"
@@ -49,7 +48,7 @@ static unsigned long own_send_time(struct bat_priv *bat_priv)
 }
 
 /* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
+static unsigned long forward_send_time(void)
 {
        return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
 }
@@ -57,20 +56,20 @@ static unsigned long forward_send_time(struct bat_priv *bat_priv)
 /* send out an already prepared packet to the given address via the
  * specified batman interface */
 int send_skb_packet(struct sk_buff *skb,
-                               struct batman_if *batman_if,
+                               struct hard_iface *hard_iface,
                                uint8_t *dst_addr)
 {
        struct ethhdr *ethhdr;
 
-       if (batman_if->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != IF_ACTIVE)
                goto send_skb_err;
 
-       if (unlikely(!batman_if->net_dev))
+       if (unlikely(!hard_iface->net_dev))
                goto send_skb_err;
 
-       if (!(batman_if->net_dev->flags & IFF_UP)) {
+       if (!(hard_iface->net_dev->flags & IFF_UP)) {
                pr_warning("Interface %s is not up - can't send packet via "
-                          "that interface!\n", batman_if->net_dev->name);
+                          "that interface!\n", hard_iface->net_dev->name);
                goto send_skb_err;
        }
 
@@ -81,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb,
        skb_reset_mac_header(skb);
 
        ethhdr = (struct ethhdr *) skb_mac_header(skb);
-       memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
        ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
 
@@ -89,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb,
        skb->priority = TC_PRIO_CONTROL;
        skb->protocol = __constant_htons(ETH_P_BATMAN);
 
-       skb->dev = batman_if->net_dev;
+       skb->dev = hard_iface->net_dev;
 
        /* dev_queue_xmit() returns a negative result on error.  However on
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
@@ -103,16 +102,16 @@ send_skb_err:
 
 /* Send a packet to a given interface */
 static void send_packet_to_if(struct forw_packet *forw_packet,
-                             struct batman_if *batman_if)
+                             struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        char *fwd_str;
        uint8_t packet_num;
        int16_t buff_pos;
        struct batman_packet *batman_packet;
        struct sk_buff *skb;
 
-       if (batman_if->if_status != IF_ACTIVE)
+       if (hard_iface->if_status != IF_ACTIVE)
                return;
 
        packet_num = 0;
@@ -127,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
                /* we might have aggregated direct link packets with an
                 * ordinary base packet */
                if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-                   (forw_packet->if_incoming == batman_if))
+                   (forw_packet->if_incoming == hard_iface))
                        batman_packet->flags |= DIRECTLINK;
                else
                        batman_packet->flags &= ~DIRECTLINK;
@@ -143,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
                        batman_packet->tq, batman_packet->ttl,
                        (batman_packet->flags & DIRECTLINK ?
                         "on" : "off"),
-                       batman_if->net_dev->name, batman_if->net_dev->dev_addr);
+                       hard_iface->net_dev->name,
+                       hard_iface->net_dev->dev_addr);
 
                buff_pos += sizeof(struct batman_packet) +
                        (batman_packet->num_hna * ETH_ALEN);
@@ -155,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
        /* create clone because function is called more than once */
        skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
        if (skb)
-               send_skb_packet(skb, batman_if, broadcast_addr);
+               send_skb_packet(skb, hard_iface, broadcast_addr);
 }
 
 /* send a batman packet */
 static void send_packet(struct forw_packet *forw_packet)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        struct net_device *soft_iface;
        struct bat_priv *bat_priv;
        struct batman_packet *batman_packet =
@@ -205,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet)
 
        /* broadcast on every interface */
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->soft_iface != soft_iface)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               send_packet_to_if(forw_packet, batman_if);
+               send_packet_to_if(forw_packet, hard_iface);
        }
        rcu_read_unlock();
 }
 
 static void rebuild_batman_packet(struct bat_priv *bat_priv,
-                                 struct batman_if *batman_if)
+                                 struct hard_iface *hard_iface)
 {
        int new_len;
        unsigned char *new_buff;
@@ -227,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
 
        /* keep old buffer if kmalloc should fail */
        if (new_buff) {
-               memcpy(new_buff, batman_if->packet_buff,
+               memcpy(new_buff, hard_iface->packet_buff,
                       sizeof(struct batman_packet));
                batman_packet = (struct batman_packet *)new_buff;
 
@@ -235,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
                                new_buff + sizeof(struct batman_packet),
                                new_len - sizeof(struct batman_packet));
 
-               kfree(batman_if->packet_buff);
-               batman_if->packet_buff = new_buff;
-               batman_if->packet_len = new_len;
+               kfree(hard_iface->packet_buff);
+               hard_iface->packet_buff = new_buff;
+               hard_iface->packet_len = new_len;
        }
 }
 
-void schedule_own_packet(struct batman_if *batman_if)
+void schedule_own_packet(struct hard_iface *hard_iface)
 {
-       struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
+       struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        unsigned long send_time;
        struct batman_packet *batman_packet;
        int vis_server;
 
-       if ((batman_if->if_status == IF_NOT_IN_USE) ||
-           (batman_if->if_status == IF_TO_BE_REMOVED))
+       if ((hard_iface->if_status == IF_NOT_IN_USE) ||
+           (hard_iface->if_status == IF_TO_BE_REMOVED))
                return;
 
        vis_server = atomic_read(&bat_priv->vis_mode);
@@ -261,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if)
         * outdated packets (especially uninitialized mac addresses) in the
         * packet queue
         */
-       if (batman_if->if_status == IF_TO_BE_ACTIVATED)
-               batman_if->if_status = IF_ACTIVE;
+       if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = IF_ACTIVE;
 
        /* if local hna has changed and interface is a primary interface */
        if ((atomic_read(&bat_priv->hna_local_changed)) &&
-           (batman_if == bat_priv->primary_if))
-               rebuild_batman_packet(bat_priv, batman_if);
+           (hard_iface == bat_priv->primary_if))
+               rebuild_batman_packet(bat_priv, hard_iface);
 
        /**
         * NOTE: packet_buff might just have been re-allocated in
         * rebuild_batman_packet()
         */
-       batman_packet = (struct batman_packet *)batman_if->packet_buff;
+       batman_packet = (struct batman_packet *)hard_iface->packet_buff;
 
        /* change sequence number to network order */
        batman_packet->seqno =
-               htonl((uint32_t)atomic_read(&batman_if->seqno));
+               htonl((uint32_t)atomic_read(&hard_iface->seqno));
 
        if (vis_server == VIS_TYPE_SERVER_SYNC)
                batman_packet->flags |= VIS_SERVER;
        else
                batman_packet->flags &= ~VIS_SERVER;
 
-       if ((batman_if == bat_priv->primary_if) &&
+       if ((hard_iface == bat_priv->primary_if) &&
            (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
                batman_packet->gw_flags =
                                (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
        else
                batman_packet->gw_flags = 0;
 
-       atomic_inc(&batman_if->seqno);
+       atomic_inc(&hard_iface->seqno);
 
-       slide_own_bcast_window(batman_if);
+       slide_own_bcast_window(hard_iface);
        send_time = own_send_time(bat_priv);
        add_bat_packet_to_list(bat_priv,
-                              batman_if->packet_buff,
-                              batman_if->packet_len,
-                              batman_if, 1, send_time);
+                              hard_iface->packet_buff,
+                              hard_iface->packet_len,
+                              hard_iface, 1, send_time);
 }
 
 void schedule_forward_packet(struct orig_node *orig_node,
                             struct ethhdr *ethhdr,
                             struct batman_packet *batman_packet,
                             uint8_t directlink, int hna_buff_len,
-                            struct batman_if *if_incoming)
+                            struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        unsigned char in_tq, in_ttl, tq_avg = 0;
@@ -327,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
        if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
 
                /* rebroadcast ogm of best ranking neighbor as is */
-               if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
+               if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
                        batman_packet->tq = orig_node->router->tq_avg;
 
                        if (orig_node->router->last_ttl)
@@ -356,7 +356,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
        else
                batman_packet->flags &= ~DIRECTLINK;
 
-       send_time = forward_send_time(bat_priv);
+       send_time = forward_send_time();
        add_bat_packet_to_list(bat_priv,
                               (unsigned char *)batman_packet,
                               sizeof(struct batman_packet) + hna_buff_len,
@@ -444,7 +444,7 @@ out:
 
 static void send_outstanding_bcast_packet(struct work_struct *work)
 {
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        struct delayed_work *delayed_work =
                container_of(work, struct delayed_work, work);
        struct forw_packet *forw_packet =
@@ -462,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
 
        /* rebroadcast packet */
        rcu_read_lock();
-       list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if (batman_if->soft_iface != soft_iface)
+       list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+               if (hard_iface->soft_iface != soft_iface)
                        continue;
 
                /* send a copy of the saved skb */
                skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
                if (skb1)
-                       send_skb_packet(skb1, batman_if, broadcast_addr);
+                       send_skb_packet(skb1, hard_iface, broadcast_addr);
        }
        rcu_read_unlock();
 
@@ -522,15 +522,15 @@ out:
 }
 
 void purge_outstanding_packets(struct bat_priv *bat_priv,
-                              struct batman_if *batman_if)
+                              struct hard_iface *hard_iface)
 {
        struct forw_packet *forw_packet;
        struct hlist_node *tmp_node, *safe_tmp_node;
 
-       if (batman_if)
+       if (hard_iface)
                bat_dbg(DBG_BATMAN, bat_priv,
                        "purge_outstanding_packets(): %s\n",
-                       batman_if->net_dev->name);
+                       hard_iface->net_dev->name);
        else
                bat_dbg(DBG_BATMAN, bat_priv,
                        "purge_outstanding_packets()\n");
@@ -544,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * if purge_outstanding_packets() was called with an argmument
                 * we delete only packets belonging to the given interface
                 */
-               if ((batman_if) &&
-                   (forw_packet->if_incoming != batman_if))
+               if ((hard_iface) &&
+                   (forw_packet->if_incoming != hard_iface))
                        continue;
 
                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -568,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * if purge_outstanding_packets() was called with an argmument
                 * we delete only packets belonging to the given interface
                 */
-               if ((batman_if) &&
-                   (forw_packet->if_incoming != batman_if))
+               if ((hard_iface) &&
+                   (forw_packet->if_incoming != hard_iface))
                        continue;
 
                spin_unlock_bh(&bat_priv->forw_bat_list_lock);
index c4cefa8e4f85d99388427d175e16e8c7d499bff6..7b2ff19c05e77f6ea814649b019c2a05163f607e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #ifndef _NET_BATMAN_ADV_SEND_H_
 #define _NET_BATMAN_ADV_SEND_H_
 
-#include "types.h"
-
 int send_skb_packet(struct sk_buff *skb,
-                               struct batman_if *batman_if,
+                               struct hard_iface *hard_iface,
                                uint8_t *dst_addr);
-void schedule_own_packet(struct batman_if *batman_if);
+void schedule_own_packet(struct hard_iface *hard_iface);
 void schedule_forward_packet(struct orig_node *orig_node,
                             struct ethhdr *ethhdr,
                             struct batman_packet *batman_packet,
                             uint8_t directlink, int hna_buff_len,
-                            struct batman_if *if_outgoing);
+                            struct hard_iface *if_outgoing);
 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
 void send_outstanding_bat_packet(struct work_struct *work);
 void purge_outstanding_packets(struct bat_priv *bat_priv,
-                              struct batman_if *batman_if);
+                              struct hard_iface *hard_iface);
 
 #endif /* _NET_BATMAN_ADV_SEND_H_ */
index e89ede192ed0c445364ecd0ff794ba6c71055e35..9ed26140a2696aa19d87534e15b4609d5901b66b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
 #include "send.h"
 #include "bat_debugfs.h"
 #include "translation-table.h"
-#include "types.h"
 #include "hash.h"
 #include "gateway_common.h"
 #include "gateway_client.h"
-#include "send.h"
 #include "bat_sysfs.h"
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include "unicast.h"
-#include "routing.h"
 
 
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -79,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
        return 0;
 }
 
-static void softif_neigh_free_ref(struct kref *refcount)
+static void softif_neigh_free_rcu(struct rcu_head *rcu)
 {
        struct softif_neigh *softif_neigh;
 
-       softif_neigh = container_of(refcount, struct softif_neigh, refcount);
+       softif_neigh = container_of(rcu, struct softif_neigh, rcu);
        kfree(softif_neigh);
 }
 
-static void softif_neigh_free_rcu(struct rcu_head *rcu)
+static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
 {
-       struct softif_neigh *softif_neigh;
-
-       softif_neigh = container_of(rcu, struct softif_neigh, rcu);
-       kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
+       if (atomic_dec_and_test(&softif_neigh->refcount))
+               call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
 }
 
 void softif_neigh_purge(struct bat_priv *bat_priv)
@@ -119,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
                                 softif_neigh->addr, softif_neigh->vid);
                        softif_neigh_tmp = bat_priv->softif_neigh;
                        bat_priv->softif_neigh = NULL;
-                       kref_put(&softif_neigh_tmp->refcount,
-                                softif_neigh_free_ref);
+                       softif_neigh_free_ref(softif_neigh_tmp);
                }
 
-               call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
+               softif_neigh_free_ref(softif_neigh);
        }
 
        spin_unlock_bh(&bat_priv->softif_neigh_lock);
@@ -138,14 +132,17 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
        rcu_read_lock();
        hlist_for_each_entry_rcu(softif_neigh, node,
                                 &bat_priv->softif_neigh_list, list) {
-               if (memcmp(softif_neigh->addr, addr, ETH_ALEN) != 0)
+               if (!compare_eth(softif_neigh->addr, addr))
                        continue;
 
                if (softif_neigh->vid != vid)
                        continue;
 
+               if (!atomic_inc_not_zero(&softif_neigh->refcount))
+                       continue;
+
                softif_neigh->last_seen = jiffies;
-               goto found;
+               goto out;
        }
 
        softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
@@ -155,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
        memcpy(softif_neigh->addr, addr, ETH_ALEN);
        softif_neigh->vid = vid;
        softif_neigh->last_seen = jiffies;
-       kref_init(&softif_neigh->refcount);
+       /* initialize with 2 - caller decrements counter by one */
+       atomic_set(&softif_neigh->refcount, 2);
 
        INIT_HLIST_NODE(&softif_neigh->list);
        spin_lock_bh(&bat_priv->softif_neigh_lock);
        hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
        spin_unlock_bh(&bat_priv->softif_neigh_lock);
 
-found:
-       kref_get(&softif_neigh->refcount);
 out:
        rcu_read_unlock();
        return softif_neigh;
@@ -175,8 +171,6 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct softif_neigh *softif_neigh;
        struct hlist_node *node;
-       size_t buf_size, pos;
-       char *buff;
 
        if (!bat_priv->primary_if) {
                return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -186,33 +180,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 
        seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
 
-       buf_size = 1;
-       /* Estimate length for: "   xx:xx:xx:xx:xx:xx\n" */
        rcu_read_lock();
        hlist_for_each_entry_rcu(softif_neigh, node,
                                 &bat_priv->softif_neigh_list, list)
-               buf_size += 30;
-       rcu_read_unlock();
-
-       buff = kmalloc(buf_size, GFP_ATOMIC);
-       if (!buff)
-               return -ENOMEM;
-
-       buff[0] = '\0';
-       pos = 0;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(softif_neigh, node,
-                                &bat_priv->softif_neigh_list, list) {
-               pos += snprintf(buff + pos, 31, "%s %pM (vid: %d)\n",
+               seq_printf(seq, "%s %pM (vid: %d)\n",
                                bat_priv->softif_neigh == softif_neigh
                                ? "=>" : "  ", softif_neigh->addr,
                                softif_neigh->vid);
-       }
        rcu_read_unlock();
 
-       seq_printf(seq, "%s", buff);
-       kfree(buff);
        return 0;
 }
 
@@ -267,7 +243,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
                         softif_neigh->addr, softif_neigh->vid);
                softif_neigh_tmp = bat_priv->softif_neigh;
                bat_priv->softif_neigh = softif_neigh;
-               kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref);
+               softif_neigh_free_ref(softif_neigh_tmp);
                /* we need to hold the additional reference */
                goto err;
        }
@@ -285,7 +261,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
        }
 
 out:
-       kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
+       softif_neigh_free_ref(softif_neigh);
 err:
        kfree_skb(skb);
        return;
@@ -438,7 +414,7 @@ end:
 }
 
 void interface_rx(struct net_device *soft_iface,
-                 struct sk_buff *skb, struct batman_if *recv_if,
+                 struct sk_buff *skb, struct hard_iface *recv_if,
                  int hdr_size)
 {
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -486,7 +462,7 @@ void interface_rx(struct net_device *soft_iface,
 
                memcpy(unicast_packet->dest,
                       bat_priv->softif_neigh->addr, ETH_ALEN);
-               ret = route_unicast_packet(skb, recv_if, hdr_size);
+               ret = route_unicast_packet(skb, recv_if);
                if (ret == NET_RX_DROP)
                        goto dropped;
 
@@ -646,6 +622,19 @@ void softif_destroy(struct net_device *soft_iface)
        unregister_netdevice(soft_iface);
 }
 
+int softif_is_valid(struct net_device *net_dev)
+{
+#ifdef HAVE_NET_DEVICE_OPS
+       if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
+               return 1;
+#else
+       if (net_dev->hard_start_xmit == interface_tx)
+               return 1;
+#endif
+
+       return 0;
+}
+
 /* ethtool */
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
index 02b77334d10d5889ba72077aa6db29b4e1c8dfb3..4789b6f2a0b3f9cdcd393d380d48ea8f95618b62 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -27,9 +27,10 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
 void softif_neigh_purge(struct bat_priv *bat_priv);
 int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
 void interface_rx(struct net_device *soft_iface,
-                 struct sk_buff *skb, struct batman_if *recv_if,
+                 struct sk_buff *skb, struct hard_iface *recv_if,
                  int hdr_size);
 struct net_device *softif_create(char *name);
 void softif_destroy(struct net_device *soft_iface);
+int softif_is_valid(struct net_device *net_dev);
 
 #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
index a633b5a435e2ca21f4ef6062cbf62a2c1f8619c8..8d15b48d1692455c7a71c34c8d1031fc426bca14 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,7 +22,6 @@
 #include "main.h"
 #include "translation-table.h"
 #include "soft-interface.h"
-#include "types.h"
 #include "hash.h"
 #include "originator.h"
 
@@ -31,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
                                 struct hna_global_entry *hna_global_entry,
                                 char *message);
 
+/* returns 1 if they are the same mac addr */
+static int compare_lhna(struct hlist_node *node, void *data2)
+{
+       void *data1 = container_of(node, struct hna_local_entry, hash_entry);
+
+       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
+/* returns 1 if they are the same mac addr */
+static int compare_ghna(struct hlist_node *node, void *data2)
+{
+       void *data1 = container_of(node, struct hna_global_entry, hash_entry);
+
+       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
 static void hna_local_start_timer(struct bat_priv *bat_priv)
 {
        INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
        queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
 }
 
+static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
+                                                  void *data)
+{
+       struct hashtable_t *hash = bat_priv->hna_local_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
+               if (!compare_eth(hna_local_entry, data))
+                       continue;
+
+               hna_local_entry_tmp = hna_local_entry;
+               break;
+       }
+       rcu_read_unlock();
+
+       return hna_local_entry_tmp;
+}
+
+static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
+                                                    void *data)
+{
+       struct hashtable_t *hash = bat_priv->hna_global_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct hna_global_entry *hna_global_entry;
+       struct hna_global_entry *hna_global_entry_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
+               if (!compare_eth(hna_global_entry, data))
+                       continue;
+
+               hna_global_entry_tmp = hna_global_entry;
+               break;
+       }
+       rcu_read_unlock();
+
+       return hna_global_entry_tmp;
+}
+
 int hna_local_init(struct bat_priv *bat_priv)
 {
        if (bat_priv->hna_local_hash)
@@ -61,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
        int required_bytes;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
-       hna_local_entry =
-               ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
-                                                    compare_orig, choose_orig,
-                                                    addr));
+       hna_local_entry = hna_local_hash_find(bat_priv, addr);
        spin_unlock_bh(&bat_priv->hna_lhash_lock);
 
        if (hna_local_entry) {
@@ -100,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
        hna_local_entry->last_seen = jiffies;
 
        /* the batman interface mac address should never be purged */
-       if (compare_orig(addr, soft_iface->dev_addr))
+       if (compare_eth(addr, soft_iface->dev_addr))
                hna_local_entry->never_purge = 1;
        else
                hna_local_entry->never_purge = 0;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
-       hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
-                hna_local_entry);
+       hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
+                hna_local_entry, &hna_local_entry->hash_entry);
        bat_priv->num_local_hna++;
        atomic_set(&bat_priv->hna_local_changed, 1);
 
@@ -117,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
        /* remove address from global hash if present */
        spin_lock_bh(&bat_priv->hna_ghash_lock);
 
-       hna_global_entry = ((struct hna_global_entry *)
-                               hash_find(bat_priv->hna_global_hash,
-                                         compare_orig, choose_orig, addr));
+       hna_global_entry = hna_global_hash_find(bat_priv, addr);
 
        if (hna_global_entry)
                _hna_global_del_orig(bat_priv, hna_global_entry,
@@ -133,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
 {
        struct hashtable_t *hash = bat_priv->hna_local_hash;
        struct hna_local_entry *hna_local_entry;
-       struct element_t *bucket;
-       int i;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       int count = 0;
+       int i, count = 0;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(hna_local_entry, node,
+                                        head, hash_entry) {
                        if (buff_len < (count + 1) * ETH_ALEN)
                                break;
 
-                       hna_local_entry = bucket->data;
                        memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
                               ETH_ALEN);
 
                        count++;
                }
+               rcu_read_unlock();
        }
 
        /* if we did not get all new local hnas see you next time  ;-) */
@@ -171,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->hna_local_hash;
        struct hna_local_entry *hna_local_entry;
-       int i;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        size_t buf_size, pos;
        char *buff;
+       int i;
 
        if (!bat_priv->primary_if) {
                return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -195,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each(walk, head)
+               rcu_read_lock();
+               __hlist_for_each_rcu(node, head)
                        buf_size += 21;
+               rcu_read_unlock();
        }
 
        buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -204,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
                spin_unlock_bh(&bat_priv->hna_lhash_lock);
                return -ENOMEM;
        }
+
        buff[0] = '\0';
        pos = 0;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       hna_local_entry = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(hna_local_entry, node,
+                                        head, hash_entry) {
                        pos += snprintf(buff + pos, 22, " * %pM\n",
                                        hna_local_entry->addr);
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_bh(&bat_priv->hna_lhash_lock);
@@ -225,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
        return 0;
 }
 
-static void _hna_local_del(void *data, void *arg)
+static void _hna_local_del(struct hlist_node *node, void *arg)
 {
        struct bat_priv *bat_priv = (struct bat_priv *)arg;
+       void *data = container_of(node, struct hna_local_entry, hash_entry);
 
        kfree(data);
        bat_priv->num_local_hna--;
@@ -241,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
        bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
                hna_local_entry->addr, message);
 
-       hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
+       hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
                    hna_local_entry->addr);
-       _hna_local_del(hna_local_entry, bat_priv);
+       _hna_local_del(&hna_local_entry->hash_entry, bat_priv);
 }
 
 void hna_local_remove(struct bat_priv *bat_priv,
@@ -253,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
-       hna_local_entry = (struct hna_local_entry *)
-               hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
-                         addr);
+       hna_local_entry = hna_local_hash_find(bat_priv, addr);
 
        if (hna_local_entry)
                hna_local_del(bat_priv, hna_local_entry, message);
@@ -271,27 +339,29 @@ static void hna_local_purge(struct work_struct *work)
                container_of(delayed_work, struct bat_priv, hna_work);
        struct hashtable_t *hash = bat_priv->hna_local_hash;
        struct hna_local_entry *hna_local_entry;
-       int i;
-       struct hlist_node *walk, *safe;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
        unsigned long timeout;
+       int i;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
-                       hna_local_entry = bucket->data;
+               hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
+                                         head, hash_entry) {
+                       if (hna_local_entry->never_purge)
+                               continue;
 
                        timeout = hna_local_entry->last_seen;
                        timeout += LOCAL_HNA_TIMEOUT * HZ;
 
-                       if ((!hna_local_entry->never_purge) &&
-                           time_after(jiffies, timeout))
-                               hna_local_del(bat_priv, hna_local_entry,
-                                       "address timed out");
+                       if (time_before(jiffies, timeout))
+                               continue;
+
+                       hna_local_del(bat_priv, hna_local_entry,
+                                     "address timed out");
                }
        }
 
@@ -335,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
                spin_lock_bh(&bat_priv->hna_ghash_lock);
 
                hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
-               hna_global_entry = (struct hna_global_entry *)
-                       hash_find(bat_priv->hna_global_hash, compare_orig,
-                                 choose_orig, hna_ptr);
+               hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
 
                if (!hna_global_entry) {
                        spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -357,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
                                hna_global_entry->addr, orig_node->orig);
 
                        spin_lock_bh(&bat_priv->hna_ghash_lock);
-                       hash_add(bat_priv->hna_global_hash, compare_orig,
-                                choose_orig, hna_global_entry);
+                       hash_add(bat_priv->hna_global_hash, compare_ghna,
+                                choose_orig, hna_global_entry,
+                                &hna_global_entry->hash_entry);
 
                }
 
@@ -369,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
                spin_lock_bh(&bat_priv->hna_lhash_lock);
 
                hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
-               hna_local_entry = (struct hna_local_entry *)
-                       hash_find(bat_priv->hna_local_hash, compare_orig,
-                                 choose_orig, hna_ptr);
+               hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
 
                if (hna_local_entry)
                        hna_local_del(bat_priv, hna_local_entry,
@@ -401,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->hna_global_hash;
        struct hna_global_entry *hna_global_entry;
-       int i;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        size_t buf_size, pos;
        char *buff;
+       int i;
 
        if (!bat_priv->primary_if) {
                return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -424,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each(walk, head)
+               rcu_read_lock();
+               __hlist_for_each_rcu(node, head)
                        buf_size += 43;
+               rcu_read_unlock();
        }
 
        buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -439,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       hna_global_entry = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(hna_global_entry, node,
+                                        head, hash_entry) {
                        pos += snprintf(buff + pos, 44,
                                        " * %pM via %pM\n",
                                        hna_global_entry->addr,
                                        hna_global_entry->orig_node->orig);
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -465,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
                hna_global_entry->addr, hna_global_entry->orig_node->orig,
                message);
 
-       hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
+       hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
                    hna_global_entry->addr);
        kfree(hna_global_entry);
 }
@@ -484,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
 
        while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
                hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
-               hna_global_entry = (struct hna_global_entry *)
-                       hash_find(bat_priv->hna_global_hash, compare_orig,
-                                 choose_orig, hna_ptr);
+               hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
 
                if ((hna_global_entry) &&
                    (hna_global_entry->orig_node == orig_node))
@@ -503,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
        orig_node->hna_buff = NULL;
 }
 
-static void hna_global_del(void *data, void *arg)
+static void hna_global_del(struct hlist_node *node, void *arg)
 {
+       void *data = container_of(node, struct hna_global_entry, hash_entry);
+
        kfree(data);
 }
 
@@ -520,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv)
 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
 {
        struct hna_global_entry *hna_global_entry;
+       struct orig_node *orig_node = NULL;
 
        spin_lock_bh(&bat_priv->hna_ghash_lock);
-       hna_global_entry = (struct hna_global_entry *)
-                               hash_find(bat_priv->hna_global_hash,
-                                         compare_orig, choose_orig, addr);
-       spin_unlock_bh(&bat_priv->hna_ghash_lock);
+       hna_global_entry = hna_global_hash_find(bat_priv, addr);
 
        if (!hna_global_entry)
-               return NULL;
+               goto out;
 
-       return hna_global_entry->orig_node;
+       if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
+               goto out;
+
+       orig_node = hna_global_entry->orig_node;
+
+out:
+       spin_unlock_bh(&bat_priv->hna_ghash_lock);
+       return orig_node;
 }
index 10c4c5c319b623ba0354e5907b322ce05dd3b64e..f19931ca1457530e10c679354b479e4eb2425675 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-#include "types.h"
-
 int hna_local_init(struct bat_priv *bat_priv);
 void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
 void hna_local_remove(struct bat_priv *bat_priv,
index bf3f6f5a12c4466c0ca86140650fe3d195d450ec..83445cf0cc9f35472dbd6c331f3f823b38f41f92 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -33,7 +33,7 @@
         sizeof(struct bcast_packet))))
 
 
-struct batman_if {
+struct hard_iface {
        struct list_head list;
        int16_t if_num;
        char if_status;
@@ -43,7 +43,7 @@ struct batman_if {
        unsigned char *packet_buff;
        int packet_len;
        struct kobject *hardif_obj;
-       struct kref refcount;
+       atomic_t refcount;
        struct packet_type batman_adv_ptype;
        struct net_device *soft_iface;
        struct rcu_head rcu;
@@ -70,8 +70,6 @@ struct orig_node {
        struct neigh_node *router;
        unsigned long *bcast_own;
        uint8_t *bcast_own_sum;
-       uint8_t tq_own;
-       int tq_asym_penalty;
        unsigned long last_valid;
        unsigned long bcast_seqno_reset;
        unsigned long batman_seqno_reset;
@@ -83,20 +81,28 @@ struct orig_node {
        uint8_t last_ttl;
        unsigned long bcast_bits[NUM_WORDS];
        uint32_t last_bcast_seqno;
-       struct list_head neigh_list;
+       struct hlist_head neigh_list;
        struct list_head frag_list;
+       spinlock_t neigh_list_lock; /* protects neighbor list */
+       atomic_t refcount;
+       struct rcu_head rcu;
+       struct hlist_node hash_entry;
+       struct bat_priv *bat_priv;
        unsigned long last_frag_packet;
-       struct {
-               uint8_t candidates;
-               struct neigh_node *selected;
-       } bond;
+       spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum,
+                                 * neigh_node->real_bits,
+                                 * neigh_node->real_packet_count */
+       spinlock_t bcast_seqno_lock; /* protects bcast_bits,
+                                     *  last_bcast_seqno */
+       atomic_t bond_candidates;
+       struct list_head bond_list;
 };
 
 struct gw_node {
        struct hlist_node list;
        struct orig_node *orig_node;
        unsigned long deleted;
-       struct kref refcount;
+       atomic_t refcount;
        struct rcu_head rcu;
 };
 
@@ -105,18 +111,20 @@ struct gw_node {
  *     @last_valid: when last packet via this neighbor was received
  */
 struct neigh_node {
-       struct list_head list;
+       struct hlist_node list;
        uint8_t addr[ETH_ALEN];
        uint8_t real_packet_count;
        uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
        uint8_t tq_index;
        uint8_t tq_avg;
        uint8_t last_ttl;
-       struct neigh_node *next_bond_candidate;
+       struct list_head bonding_list;
        unsigned long last_valid;
        unsigned long real_bits[NUM_WORDS];
+       atomic_t refcount;
+       struct rcu_head rcu;
        struct orig_node *orig_node;
-       struct batman_if *if_incoming;
+       struct hard_iface *if_incoming;
 };
 
 
@@ -140,7 +148,7 @@ struct bat_priv {
        struct hlist_head softif_neigh_list;
        struct softif_neigh *softif_neigh;
        struct debug_log *debug_log;
-       struct batman_if *primary_if;
+       struct hard_iface *primary_if;
        struct kobject *mesh_obj;
        struct dentry *debug_dir;
        struct hlist_head forw_bat_list;
@@ -151,12 +159,11 @@ struct bat_priv {
        struct hashtable_t *hna_local_hash;
        struct hashtable_t *hna_global_hash;
        struct hashtable_t *vis_hash;
-       spinlock_t orig_hash_lock; /* protects orig_hash */
        spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
        spinlock_t forw_bcast_list_lock; /* protects  */
        spinlock_t hna_lhash_lock; /* protects hna_local_hash */
        spinlock_t hna_ghash_lock; /* protects hna_global_hash */
-       spinlock_t gw_list_lock; /* protects gw_list */
+       spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
        spinlock_t vis_hash_lock; /* protects vis_hash */
        spinlock_t vis_list_lock; /* protects vis_info::recv_list */
        spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
@@ -165,7 +172,7 @@ struct bat_priv {
        struct delayed_work hna_work;
        struct delayed_work orig_work;
        struct delayed_work vis_work;
-       struct gw_node *curr_gw;
+       struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
        struct vis_info *my_vis_info;
 };
 
@@ -188,11 +195,13 @@ struct hna_local_entry {
        uint8_t addr[ETH_ALEN];
        unsigned long last_seen;
        char never_purge;
+       struct hlist_node hash_entry;
 };
 
 struct hna_global_entry {
        uint8_t addr[ETH_ALEN];
        struct orig_node *orig_node;
+       struct hlist_node hash_entry;
 };
 
 /**
@@ -208,7 +217,7 @@ struct forw_packet {
        uint32_t direct_link_flags;
        uint8_t num_packets;
        struct delayed_work delayed_work;
-       struct batman_if *if_incoming;
+       struct hard_iface *if_incoming;
 };
 
 /* While scanning for vis-entries of a particular vis-originator
@@ -242,6 +251,7 @@ struct vis_info {
                             * from.  we should not reply to them. */
        struct list_head send_list;
        struct kref refcount;
+       struct hlist_node hash_entry;
        struct bat_priv *bat_priv;
        /* this packet might be part of the vis send queue. */
        struct sk_buff *skb_packet;
@@ -264,7 +274,7 @@ struct softif_neigh {
        uint8_t addr[ETH_ALEN];
        unsigned long last_seen;
        short vid;
-       struct kref refcount;
+       atomic_t refcount;
        struct rcu_head rcu;
 };
 
index d1a611322549d925ac9123bd02e66367cd6d1da6..19f84bd443af3fa3d13ac5c0e6b4f2bab3aca092 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Andreas Langer
  *
@@ -39,8 +39,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
                (struct unicast_frag_packet *)skb->data;
        struct sk_buff *tmp_skb;
        struct unicast_packet *unicast_packet;
-       int hdr_len = sizeof(struct unicast_packet),
-           uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
+       int hdr_len = sizeof(struct unicast_packet);
+       int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
 
        /* set skb to the first part and tmp_skb to the second part */
        if (up->flags & UNI_FRAG_HEAD) {
@@ -183,15 +183,10 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
                (struct unicast_frag_packet *)skb->data;
 
        *new_skb = NULL;
-       spin_lock_bh(&bat_priv->orig_hash_lock);
-       orig_node = ((struct orig_node *)
-                   hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
-                             unicast_packet->orig));
 
-       if (!orig_node) {
-               pr_debug("couldn't find originator in orig_hash\n");
+       orig_node = orig_hash_find(bat_priv, unicast_packet->orig);
+       if (!orig_node)
                goto out;
-       }
 
        orig_node->last_frag_packet = jiffies;
 
@@ -215,21 +210,24 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
        /* if not, merge failed */
        if (*new_skb)
                ret = NET_RX_SUCCESS;
-out:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
 
+out:
+       if (orig_node)
+               orig_node_free_ref(orig_node);
        return ret;
 }
 
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-                 struct batman_if *batman_if, uint8_t dstaddr[])
+                 struct hard_iface *hard_iface, uint8_t dstaddr[])
 {
        struct unicast_packet tmp_uc, *unicast_packet;
        struct sk_buff *frag_skb;
        struct unicast_frag_packet *frag1, *frag2;
        int uc_hdr_len = sizeof(struct unicast_packet);
        int ucf_hdr_len = sizeof(struct unicast_frag_packet);
-       int data_len = skb->len;
+       int data_len = skb->len - uc_hdr_len;
+       int large_tail = 0;
+       uint16_t seqno;
 
        if (!bat_priv->primary_if)
                goto dropped;
@@ -237,10 +235,11 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
        frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
        if (!frag_skb)
                goto dropped;
+       skb_reserve(frag_skb, ucf_hdr_len);
 
        unicast_packet = (struct unicast_packet *) skb->data;
        memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
-       skb_split(skb, frag_skb, data_len / 2);
+       skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
 
        if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
            my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
@@ -258,16 +257,18 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
        memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
        memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
 
-       frag1->flags |= UNI_FRAG_HEAD;
-       frag2->flags &= ~UNI_FRAG_HEAD;
+       if (data_len & 1)
+               large_tail = UNI_FRAG_LARGETAIL;
+
+       frag1->flags = UNI_FRAG_HEAD | large_tail;
+       frag2->flags = large_tail;
 
-       frag1->seqno = htons((uint16_t)atomic_inc_return(
-                            &batman_if->frag_seqno));
-       frag2->seqno = htons((uint16_t)atomic_inc_return(
-                            &batman_if->frag_seqno));
+       seqno = atomic_add_return(2, &hard_iface->frag_seqno);
+       frag1->seqno = htons(seqno - 1);
+       frag2->seqno = htons(seqno);
 
-       send_skb_packet(skb, batman_if, dstaddr);
-       send_skb_packet(frag_skb, batman_if, dstaddr);
+       send_skb_packet(skb, hard_iface, dstaddr);
+       send_skb_packet(frag_skb, hard_iface, dstaddr);
        return NET_RX_SUCCESS;
 
 drop_frag:
@@ -282,44 +283,36 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
        struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
        struct unicast_packet *unicast_packet;
        struct orig_node *orig_node;
-       struct batman_if *batman_if;
-       struct neigh_node *router;
+       struct neigh_node *neigh_node;
        int data_len = skb->len;
-       uint8_t dstaddr[6];
-
-       spin_lock_bh(&bat_priv->orig_hash_lock);
+       int ret = 1;
 
        /* get routing information */
-       if (is_multicast_ether_addr(ethhdr->h_dest))
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
                orig_node = (struct orig_node *)gw_get_selected(bat_priv);
-       else
-               orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                          compare_orig,
-                                                          choose_orig,
-                                                          ethhdr->h_dest));
-
-       /* check for hna host */
-       if (!orig_node)
-               orig_node = transtable_search(bat_priv, ethhdr->h_dest);
-
-       router = find_router(bat_priv, orig_node, NULL);
-
-       if (!router)
-               goto unlock;
+               if (orig_node)
+                       goto find_router;
+       }
 
-       /* don't lock while sending the packets ... we therefore
-               * copy the required data before sending */
+       /* check for hna host - increases orig_node refcount */
+       orig_node = transtable_search(bat_priv, ethhdr->h_dest);
 
-       batman_if = router->if_incoming;
-       memcpy(dstaddr, router->addr, ETH_ALEN);
+find_router:
+       /**
+        * find_router():
+        *  - if orig_node is NULL it returns NULL
+        *  - increases neigh_nodes refcount if found.
+        */
+       neigh_node = find_router(bat_priv, orig_node, NULL);
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!neigh_node)
+               goto out;
 
-       if (batman_if->if_status != IF_ACTIVE)
-               goto dropped;
+       if (neigh_node->if_incoming->if_status != IF_ACTIVE)
+               goto out;
 
        if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
-               goto dropped;
+               goto out;
 
        unicast_packet = (struct unicast_packet *)skb->data;
 
@@ -333,18 +326,24 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
 
        if (atomic_read(&bat_priv->fragmentation) &&
            data_len + sizeof(struct unicast_packet) >
-           batman_if->net_dev->mtu) {
+                               neigh_node->if_incoming->net_dev->mtu) {
                /* send frag skb decreases ttl */
                unicast_packet->ttl++;
-               return frag_send_skb(skb, bat_priv, batman_if,
-                                    dstaddr);
+               ret = frag_send_skb(skb, bat_priv,
+                                   neigh_node->if_incoming, neigh_node->addr);
+               goto out;
        }
-       send_skb_packet(skb, batman_if, dstaddr);
-       return 0;
 
-unlock:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-dropped:
-       kfree_skb(skb);
-       return 1;
+       send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+       ret = 0;
+       goto out;
+
+out:
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       if (ret == 1)
+               kfree_skb(skb);
+       return ret;
 }
index e32b7867a9a4a08828cb534b7e7e76b7777da74b..16ad7a9242b54f44d2c98ca8f75d893c6de79929 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Andreas Langer
  *
@@ -22,6 +22,8 @@
 #ifndef _NET_BATMAN_ADV_UNICAST_H_
 #define _NET_BATMAN_ADV_UNICAST_H_
 
+#include "packet.h"
+
 #define FRAG_TIMEOUT 10000     /* purge frag list entrys after time in ms */
 #define FRAG_BUFFER_SIZE 6     /* number of list elements in buffer */
 
@@ -30,6 +32,27 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
 void frag_list_free(struct list_head *head);
 int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-                 struct batman_if *batman_if, uint8_t dstaddr[]);
+                 struct hard_iface *hard_iface, uint8_t dstaddr[]);
+
+static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+{
+       struct unicast_frag_packet *unicast_packet;
+       int uneven_correction = 0;
+       unsigned int merged_size;
+
+       unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+       if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
+               if (unicast_packet->flags & UNI_FRAG_HEAD)
+                       uneven_correction = 1;
+               else
+                       uneven_correction = -1;
+       }
+
+       merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+       merged_size += sizeof(struct unicast_packet) + uneven_correction;
+
+       return merged_size <= mtu;
+}
 
 #endif /* _NET_BATMAN_ADV_UNICAST_H_ */
index de1022cacaf7ee570a08b2d584862b7e11d5bd19..f90212f420828f9b59ad8924b021ef23644da66b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -68,15 +68,16 @@ static void free_info(struct kref *ref)
 }
 
 /* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(void *data1, void *data2)
+static int vis_info_cmp(struct hlist_node *node, void *data2)
 {
        struct vis_info *d1, *d2;
        struct vis_packet *p1, *p2;
-       d1 = data1;
+
+       d1 = container_of(node, struct vis_info, hash_entry);
        d2 = data2;
        p1 = (struct vis_packet *)d1->skb_packet->data;
        p2 = (struct vis_packet *)d2->skb_packet->data;
-       return compare_orig(p1->vis_orig, p2->vis_orig);
+       return compare_eth(p1->vis_orig, p2->vis_orig);
 }
 
 /* hash function to choose an entry in a hash table of given size */
@@ -104,6 +105,34 @@ static int vis_info_choose(void *data, int size)
        return hash % size;
 }
 
+static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
+                                     void *data)
+{
+       struct hashtable_t *hash = bat_priv->vis_hash;
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct vis_info *vis_info, *vis_info_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = vis_info_choose(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
+               if (!vis_info_cmp(node, data))
+                       continue;
+
+               vis_info_tmp = vis_info;
+               break;
+       }
+       rcu_read_unlock();
+
+       return vis_info_tmp;
+}
+
 /* insert interface to the list of interfaces of one originator, if it
  * does not already exist in the list */
 static void vis_data_insert_interface(const uint8_t *interface,
@@ -114,7 +143,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
        struct hlist_node *pos;
 
        hlist_for_each_entry(entry, pos, if_list, list) {
-               if (compare_orig(entry->addr, (void *)interface))
+               if (compare_eth(entry->addr, (void *)interface))
                        return;
        }
 
@@ -166,7 +195,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
        /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
        if (primary && entry->quality == 0)
                return sprintf(buff, "HNA %pM, ", entry->dest);
-       else if (compare_orig(entry->src, src))
+       else if (compare_eth(entry->src, src))
                return sprintf(buff, "TQ %pM %d, ", entry->dest,
                               entry->quality);
 
@@ -175,9 +204,8 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
 
 int vis_seq_print_text(struct seq_file *seq, void *offset)
 {
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct vis_info *info;
        struct vis_packet *packet;
        struct vis_info_entry *entries;
@@ -203,8 +231,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       info = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(info, node, head, hash_entry) {
                        packet = (struct vis_packet *)info->skb_packet->data;
                        entries = (struct vis_info_entry *)
                                ((char *)packet + sizeof(struct vis_packet));
@@ -213,7 +241,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                if (entries[j].quality == 0)
                                        continue;
                                compare =
-                                compare_orig(entries[j].src, packet->vis_orig);
+                                compare_eth(entries[j].src, packet->vis_orig);
                                vis_data_insert_interface(entries[j].src,
                                                          &vis_if_list,
                                                          compare);
@@ -223,7 +251,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                buf_size += 18 + 26 * packet->entries;
 
                                /* add primary/secondary records */
-                               if (compare_orig(entry->addr, packet->vis_orig))
+                               if (compare_eth(entry->addr, packet->vis_orig))
                                        buf_size +=
                                          vis_data_count_prim_sec(&vis_if_list);
 
@@ -236,6 +264,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                kfree(entry);
                        }
                }
+               rcu_read_unlock();
        }
 
        buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -249,8 +278,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       info = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(info, node, head, hash_entry) {
                        packet = (struct vis_packet *)info->skb_packet->data;
                        entries = (struct vis_info_entry *)
                                ((char *)packet + sizeof(struct vis_packet));
@@ -259,7 +288,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                if (entries[j].quality == 0)
                                        continue;
                                compare =
-                                compare_orig(entries[j].src, packet->vis_orig);
+                                compare_eth(entries[j].src, packet->vis_orig);
                                vis_data_insert_interface(entries[j].src,
                                                          &vis_if_list,
                                                          compare);
@@ -277,7 +306,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                                        entry->primary);
 
                                /* add primary/secondary records */
-                               if (compare_orig(entry->addr, packet->vis_orig))
+                               if (compare_eth(entry->addr, packet->vis_orig))
                                        buff_pos +=
                                         vis_data_read_prim_sec(buff + buff_pos,
                                                                &vis_if_list);
@@ -291,6 +320,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
                                kfree(entry);
                        }
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_bh(&bat_priv->vis_hash_lock);
@@ -345,7 +375,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
 
        spin_lock_bh(&bat_priv->vis_list_lock);
        list_for_each_entry(entry, recv_list, list) {
-               if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
+               if (compare_eth(entry->mac, mac)) {
                        spin_unlock_bh(&bat_priv->vis_list_lock);
                        return 1;
                }
@@ -381,8 +411,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
                                                     sizeof(struct vis_packet));
 
        memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
-       old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
-                            &search_elem);
+       old_info = vis_hash_find(bat_priv, &search_elem);
        kfree_skb(search_elem.skb_packet);
 
        if (old_info) {
@@ -442,7 +471,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
 
        /* try to add it */
        hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
-                             info);
+                             info, &info->hash_entry);
        if (hash_added < 0) {
                /* did not work (for some reason) */
                kref_put(&info->refcount, free_info);
@@ -529,9 +558,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
                                struct vis_info *info)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct vis_packet *packet;
        int best_tq = -1, i;
@@ -541,16 +569,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        if ((orig_node) && (orig_node->router) &&
-                       (orig_node->flags & VIS_SERVER) &&
-                       (orig_node->router->tq_avg > best_tq)) {
+                           (orig_node->flags & VIS_SERVER) &&
+                           (orig_node->router->tq_avg > best_tq)) {
                                best_tq = orig_node->router->tq_avg;
                                memcpy(packet->target_orig, orig_node->orig,
                                       ETH_ALEN);
                        }
                }
+               rcu_read_unlock();
        }
 
        return best_tq;
@@ -573,9 +602,8 @@ static bool vis_packet_full(struct vis_info *info)
 static int generate_vis_packet(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct neigh_node *neigh_node;
        struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
@@ -587,7 +615,6 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        info->first_seen = jiffies;
        packet->vis_type = atomic_read(&bat_priv->vis_mode);
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
        packet->ttl = TTL;
        packet->seqno = htonl(ntohl(packet->seqno) + 1);
@@ -597,23 +624,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
                best_tq = find_best_vis_server(bat_priv, info);
 
-               if (best_tq < 0) {
-                       spin_unlock_bh(&bat_priv->orig_hash_lock);
+               if (best_tq < 0)
                        return -1;
-               }
        }
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        neigh_node = orig_node->router;
 
                        if (!neigh_node)
                                continue;
 
-                       if (!compare_orig(neigh_node->addr, orig_node->orig))
+                       if (!compare_eth(neigh_node->addr, orig_node->orig))
                                continue;
 
                        if (neigh_node->if_incoming->if_status != IF_ACTIVE)
@@ -632,23 +657,19 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
                        entry->quality = neigh_node->tq_avg;
                        packet->entries++;
 
-                       if (vis_packet_full(info)) {
-                               spin_unlock_bh(&bat_priv->orig_hash_lock);
-                               return 0;
-                       }
+                       if (vis_packet_full(info))
+                               goto unlock;
                }
+               rcu_read_unlock();
        }
 
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
-
        hash = bat_priv->hna_local_hash;
 
        spin_lock_bh(&bat_priv->hna_lhash_lock);
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       hna_local_entry = bucket->data;
+               hlist_for_each_entry(hna_local_entry, node, head, hash_entry) {
                        entry = (struct vis_info_entry *)
                                        skb_put(info->skb_packet,
                                                sizeof(*entry));
@@ -666,6 +687,10 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
 
        spin_unlock_bh(&bat_priv->hna_lhash_lock);
        return 0;
+
+unlock:
+       rcu_read_unlock();
+       return 0;
 }
 
 /* free old vis packets. Must be called with this vis_hash_lock
@@ -674,25 +699,22 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
 {
        int i;
        struct hashtable_t *hash = bat_priv->vis_hash;
-       struct hlist_node *walk, *safe;
+       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct vis_info *info;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
-                       info = bucket->data;
-
+               hlist_for_each_entry_safe(info, node, node_tmp,
+                                         head, hash_entry) {
                        /* never purge own data. */
                        if (info == bat_priv->my_vis_info)
                                continue;
 
                        if (time_after(jiffies,
                                       info->first_seen + VIS_TIMEOUT * HZ)) {
-                               hlist_del(walk);
-                               kfree(bucket);
+                               hlist_del(node);
                                send_list_del(info);
                                kref_put(&info->refcount, free_info);
                        }
@@ -704,27 +726,24 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
                                 struct vis_info *info)
 {
        struct hashtable_t *hash = bat_priv->orig_hash;
-       struct hlist_node *walk;
+       struct hlist_node *node;
        struct hlist_head *head;
-       struct element_t *bucket;
        struct orig_node *orig_node;
        struct vis_packet *packet;
        struct sk_buff *skb;
-       struct batman_if *batman_if;
+       struct hard_iface *hard_iface;
        uint8_t dstaddr[ETH_ALEN];
        int i;
 
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        packet = (struct vis_packet *)info->skb_packet->data;
 
        /* send to all routers in range. */
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry(bucket, walk, head, hlist) {
-                       orig_node = bucket->data;
-
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
                        /* if it's a vis server and reachable, send it. */
                        if ((!orig_node) || (!orig_node->router))
                                continue;
@@ -737,54 +756,61 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
                                continue;
 
                        memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-                       batman_if = orig_node->router->if_incoming;
+                       hard_iface = orig_node->router->if_incoming;
                        memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-                       spin_unlock_bh(&bat_priv->orig_hash_lock);
 
                        skb = skb_clone(info->skb_packet, GFP_ATOMIC);
                        if (skb)
-                               send_skb_packet(skb, batman_if, dstaddr);
+                               send_skb_packet(skb, hard_iface, dstaddr);
 
-                       spin_lock_bh(&bat_priv->orig_hash_lock);
                }
-
+               rcu_read_unlock();
        }
-
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
 }
 
 static void unicast_vis_packet(struct bat_priv *bat_priv,
                               struct vis_info *info)
 {
        struct orig_node *orig_node;
+       struct neigh_node *neigh_node = NULL;
        struct sk_buff *skb;
        struct vis_packet *packet;
-       struct batman_if *batman_if;
-       uint8_t dstaddr[ETH_ALEN];
 
-       spin_lock_bh(&bat_priv->orig_hash_lock);
        packet = (struct vis_packet *)info->skb_packet->data;
-       orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-                                                  compare_orig, choose_orig,
-                                                  packet->target_orig));
 
-       if ((!orig_node) || (!orig_node->router))
-               goto out;
+       rcu_read_lock();
+       orig_node = orig_hash_find(bat_priv, packet->target_orig);
 
-       /* don't lock while sending the packets ... we therefore
-        * copy the required data before sending */
-       batman_if = orig_node->router->if_incoming;
-       memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (!orig_node)
+               goto unlock;
+
+       neigh_node = orig_node->router;
+
+       if (!neigh_node)
+               goto unlock;
+
+       if (!atomic_inc_not_zero(&neigh_node->refcount)) {
+               neigh_node = NULL;
+               goto unlock;
+       }
+
+       rcu_read_unlock();
 
        skb = skb_clone(info->skb_packet, GFP_ATOMIC);
        if (skb)
-               send_skb_packet(skb, batman_if, dstaddr);
+               send_skb_packet(skb, neigh_node->if_incoming,
+                               neigh_node->addr);
 
-       return;
+       goto out;
 
+unlock:
+       rcu_read_unlock();
 out:
-       spin_unlock_bh(&bat_priv->orig_hash_lock);
+       if (neigh_node)
+               neigh_node_free_ref(neigh_node);
+       if (orig_node)
+               orig_node_free_ref(orig_node);
+       return;
 }
 
 /* only send one vis packet. called from send_vis_packets() */
@@ -896,7 +922,8 @@ int vis_init(struct bat_priv *bat_priv)
        INIT_LIST_HEAD(&bat_priv->vis_send_list);
 
        hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
-                             bat_priv->my_vis_info);
+                             bat_priv->my_vis_info,
+                             &bat_priv->my_vis_info->hash_entry);
        if (hash_added < 0) {
                pr_err("Can't add own vis packet into hash\n");
                /* not in hash, need to remove it manually. */
@@ -918,10 +945,11 @@ err:
 }
 
 /* Decrease the reference count on a hash item info */
-static void free_info_ref(void *data, void *arg)
+static void free_info_ref(struct hlist_node *node, void *arg)
 {
-       struct vis_info *info = data;
+       struct vis_info *info;
 
+       info = container_of(node, struct vis_info, hash_entry);
        send_list_del(info);
        kref_put(&info->refcount, free_info);
 }
index 2c3b33089a9bf24b4b00d2d718f828c7806dfb09..31b820d07f232ac474911e2cd159a54fed73612c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index ed371684c133d184e9d68f87b85ea171f6107a13..6ae5ec50858744daed011d49041e89030e921d60 100644 (file)
@@ -27,31 +27,27 @@ menuconfig BT
          compile it as module (bluetooth).
 
          To use Linux Bluetooth subsystem, you will need several user-space
-         utilities like hciconfig and hcid.  These utilities and updates to
-         Bluetooth kernel modules are provided in the BlueZ packages.
-         For more information, see <http://www.bluez.org/>.
+         utilities like hciconfig and bluetoothd.  These utilities and updates
+         to Bluetooth kernel modules are provided in the BlueZ packages.  For
+         more information, see <http://www.bluez.org/>.
+
+if BT != n
 
 config BT_L2CAP
-       tristate "L2CAP protocol support"
-       depends on BT
+       bool "L2CAP protocol support"
        select CRC16
        help
          L2CAP (Logical Link Control and Adaptation Protocol) provides
          connection oriented and connection-less data transport.  L2CAP
          support is required for most Bluetooth applications.
 
-         Say Y here to compile L2CAP support into the kernel or say M to
-         compile it as module (l2cap).
-
 config BT_SCO
-       tristate "SCO links support"
-       depends on BT
+       bool "SCO links support"
        help
          SCO link provides voice transport over Bluetooth.  SCO support is
          required for voice applications like Headset and Audio.
 
-         Say Y here to compile SCO support into the kernel or say M to
-         compile it as module (sco).
+endif
 
 source "net/bluetooth/rfcomm/Kconfig"
 
index 250f954f0213eb0cbb8ac9042d9ee558f49f414d..f04fe9a9d6342bbded032e652c80115d998ab959 100644 (file)
@@ -3,11 +3,11 @@
 #
 
 obj-$(CONFIG_BT)       += bluetooth.o
-obj-$(CONFIG_BT_L2CAP) += l2cap.o
-obj-$(CONFIG_BT_SCO)   += sco.o
 obj-$(CONFIG_BT_RFCOMM)        += rfcomm/
 obj-$(CONFIG_BT_BNEP)  += bnep/
 obj-$(CONFIG_BT_CMTP)  += cmtp/
 obj-$(CONFIG_BT_HIDP)  += hidp/
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-$(CONFIG_BT_L2CAP)   += l2cap_core.o l2cap_sock.o
+bluetooth-$(CONFIG_BT_SCO)     += sco.o
index c4cf3f595004d3c549e3bfb66e2f138732130c6c..88af9eb9aa48b0239463f155adee871047984fe2 100644 (file)
@@ -40,7 +40,7 @@
 
 #include <net/bluetooth/bluetooth.h>
 
-#define VERSION "2.15"
+#define VERSION "2.16"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO   8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
 
        BT_DBG("parent %p", parent);
 
+       local_bh_disable();
        list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
                sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
 
-               lock_sock(sk);
+               bh_lock_sock(sk);
 
                /* FIXME: Is this check still needed */
                if (sk->sk_state == BT_CLOSED) {
-                       release_sock(sk);
+                       bh_unlock_sock(sk);
                        bt_accept_unlink(sk);
                        continue;
                }
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
                        bt_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
-                       release_sock(sk);
+
+                       bh_unlock_sock(sk);
+                       local_bh_enable();
                        return sk;
                }
 
-               release_sock(sk);
+               bh_unlock_sock(sk);
        }
+       local_bh_enable();
+
        return NULL;
 }
 EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
 
-       if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
+       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb) {
                if (sk->sk_shutdown & RCV_SHUTDOWN)
                        return 0;
                return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if (copied >= target)
                                break;
 
-                       if ((err = sock_error(sk)) != 0)
+                       err = sock_error(sk);
+                       if (err)
                                break;
                        if (sk->sk_shutdown & RCV_SHUTDOWN)
                                break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
        return 0;
 }
 
-unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
        unsigned int mask = 0;
@@ -538,13 +545,41 @@ static int __init bt_init(void)
 
        BT_INFO("HCI device and connection manager initialized");
 
-       hci_sock_init();
+       err = hci_sock_init();
+       if (err < 0)
+               goto error;
+
+       err = l2cap_init();
+       if (err < 0) {
+               hci_sock_cleanup();
+               goto sock_err;
+       }
+
+       err = sco_init();
+       if (err < 0) {
+               l2cap_exit();
+               goto sock_err;
+       }
 
        return 0;
+
+sock_err:
+       hci_sock_cleanup();
+
+error:
+       sock_unregister(PF_BLUETOOTH);
+       bt_sysfs_cleanup();
+
+       return err;
 }
 
 static void __exit bt_exit(void)
 {
+
+       sco_exit();
+
+       l2cap_exit();
+
        hci_sock_cleanup();
 
        sock_unregister(PF_BLUETOOTH);
index 5868597534e529b4c33d1e603cbe38bed740a4ad..03d4d1245d583164e9a623caece340684300a848 100644 (file)
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
 {
        char flt[50] = "";
 
-       l2cap_load();
-
 #ifdef CONFIG_BT_BNEP_PROTO_FILTER
        strcat(flt, "protocol ");
 #endif
index 2862f53b66b15b8b680322b32c5196f502adc7f0..d935da71ab3b57ff7c46962f339b257f39e57952 100644 (file)
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
                        sockfd_put(nsock);
                        return -EBADFD;
                }
+               ca.device[sizeof(ca.device)-1] = 0;
 
                err = bnep_add_connection(&ca, nsock);
                if (!err) {
index 3487cfe74aecad8d69f007db8e23537226ff3d3a..67cff810c77d19c4755634d7cf4f3093a68f0d9a 100644 (file)
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
 
        BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
 
-       if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) {
+       skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for interoperability packet");
                return;
        }
index 8e5f292529accd5784482c72a2090816a1ab7df6..964ea9126f9fc5ac2b6a84647ecbc2ecefc8b2ed 100644 (file)
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
 
        size = (skb) ? skb->len + count : count;
 
-       if (!(nskb = alloc_skb(size, GFP_ATOMIC))) {
+       nskb = alloc_skb(size, GFP_ATOMIC);
+       if (!nskb) {
                BT_ERR("Can't allocate memory for CAPI message");
                return;
        }
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
 
        BT_DBG("session %p", session);
 
-       if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
+       nskb = alloc_skb(session->mtu, GFP_ATOMIC);
+       if (!nskb) {
                BT_ERR("Can't allocate memory for new frame");
                return;
        }
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
        while ((skb = skb_dequeue(&session->transmit))) {
                struct cmtp_scb *scb = (void *) skb->cb;
 
-               if ((tail = (session->mtu - nskb->len)) < 5) {
+               tail = session->mtu - nskb->len;
+               if (tail < 5) {
                        cmtp_send_frame(session, nskb->data, nskb->len);
                        skb_trim(nskb, 0);
                        tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
 
 static int __init cmtp_init(void)
 {
-       l2cap_load();
-
        BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
 
        cmtp_init_sockets();
index 99cd8d9d891b475c57925be6ae54a88240f7adc8..a050a69849012d9870bdb71cb9b120df11b18f3b 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+static void hci_le_connect(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct hci_cp_le_create_conn cp;
+
+       conn->state = BT_CONNECT;
+       conn->out = 1;
+       conn->link_mode |= HCI_LM_MASTER;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.scan_interval = cpu_to_le16(0x0004);
+       cp.scan_window = cpu_to_le16(0x0004);
+       bacpy(&cp.peer_addr, &conn->dst);
+       cp.conn_interval_min = cpu_to_le16(0x0008);
+       cp.conn_interval_max = cpu_to_le16(0x0100);
+       cp.supervision_timeout = cpu_to_le16(0x0064);
+       cp.min_ce_len = cpu_to_le16(0x0001);
+       cp.max_ce_len = cpu_to_le16(0x0001);
+
+       hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+}
+
+static void hci_le_connect_cancel(struct hci_conn *conn)
+{
+       hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+}
+
 void hci_acl_connect(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
        hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
 }
 
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+                                       u16 latency, u16 to_multiplier)
+{
+       struct hci_cp_le_conn_update cp;
+       struct hci_dev *hdev = conn->hdev;
+
+       memset(&cp, 0, sizeof(cp));
+
+       cp.handle               = cpu_to_le16(conn->handle);
+       cp.conn_interval_min    = cpu_to_le16(min);
+       cp.conn_interval_max    = cpu_to_le16(max);
+       cp.conn_latency         = cpu_to_le16(latency);
+       cp.supervision_timeout  = cpu_to_le16(to_multiplier);
+       cp.min_ce_len           = cpu_to_le16(0x0001);
+       cp.max_ce_len           = cpu_to_le16(0x0001);
+
+       hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_le_conn_update);
+
 /* Device _must_ be locked */
 void hci_sco_setup(struct hci_conn *conn, __u8 status)
 {
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
        switch (conn->state) {
        case BT_CONNECT:
        case BT_CONNECT2:
-               if (conn->type == ACL_LINK && conn->out)
-                       hci_acl_connect_cancel(conn);
+               if (conn->out) {
+                       if (conn->type == ACL_LINK)
+                               hci_acl_connect_cancel(conn);
+                       else if (conn->type == LE_LINK)
+                               hci_le_connect_cancel(conn);
+               }
                break;
        case BT_CONFIG:
        case BT_CONNECTED:
@@ -234,6 +285,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        conn->mode  = HCI_CM_ACTIVE;
        conn->state = BT_OPEN;
        conn->auth_type = HCI_AT_GENERAL_BONDING;
+       conn->io_capability = hdev->io_capability;
 
        conn->power_save = 1;
        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +347,11 @@ int hci_conn_del(struct hci_conn *conn)
 
                /* Unacked frames */
                hdev->acl_cnt += conn->sent;
+       } else if (conn->type == LE_LINK) {
+               if (hdev->le_pkts)
+                       hdev->le_cnt += conn->sent;
+               else
+                       hdev->acl_cnt += conn->sent;
        } else {
                struct hci_conn *acl = conn->link;
                if (acl) {
@@ -360,15 +417,30 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 }
 EXPORT_SYMBOL(hci_get_route);
 
-/* Create SCO or ACL connection.
+/* Create SCO, ACL or LE connection.
  * Device _must_ be locked */
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
 {
        struct hci_conn *acl;
        struct hci_conn *sco;
+       struct hci_conn *le;
 
        BT_DBG("%s dst %s", hdev->name, batostr(dst));
 
+       if (type == LE_LINK) {
+               le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+               if (!le)
+                       le = hci_conn_add(hdev, LE_LINK, dst);
+               if (!le)
+                       return NULL;
+               if (le->state == BT_OPEN)
+                       hci_le_connect(le);
+
+               hci_conn_hold(le);
+
+               return le;
+       }
+
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
                acl = hci_conn_add(hdev, ACL_LINK, dst);
index 9c4541bc488ab9b2a74dda2809929f842b9b97e6..b372fb8bcdcfdbbcbb7c190ce9b12baff125c12d 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
 #include <linux/rfkill.h>
+#include <linux/timer.h>
 #include <net/sock.h>
 
 #include <asm/system.h>
@@ -50,6 +51,8 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+#define AUTO_OFF_TIMEOUT 2000
+
 static void hci_cmd_task(unsigned long arg);
 static void hci_rx_task(unsigned long arg);
 static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
 {
        BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
 
-       /* If the request has set req_last_cmd (typical for multi-HCI
-        * command requests) check if the completed command matches
-        * this, and if not just return. Single HCI command requests
-        * typically leave req_last_cmd as 0 */
-       if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
+       /* If this is the init phase check if the completed command matches
+        * the last init command, and if not just return.
+        */
+       if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
                return;
 
        if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
 
 /* Execute request and wait for completion. */
 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
-                               unsigned long opt, __u32 timeout)
+                                       unsigned long opt, __u32 timeout)
 {
        DECLARE_WAITQUEUE(wait, current);
        int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
                break;
        }
 
-       hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
+       hdev->req_status = hdev->req_result = 0;
 
        BT_DBG("%s end: err %d", hdev->name, err);
 
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
 }
 
 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
-                               unsigned long opt, __u32 timeout)
+                                       unsigned long opt, __u32 timeout)
 {
        int ret;
 
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
 
 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
 {
+       struct hci_cp_delete_stored_link_key cp;
        struct sk_buff *skb;
        __le16 param;
        __u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        flt_type = HCI_FLT_CLEAR_ALL;
        hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
 
-       /* Page timeout ~20 secs */
-       param = cpu_to_le16(0x8000);
-       hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
-
        /* Connection accept timeout ~20 secs */
        param = cpu_to_le16(0x7d00);
        hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
-       hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
+       bacpy(&cp.bdaddr, BDADDR_ANY);
+       cp.delete_all = 1;
+       hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+}
+
+static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+       BT_DBG("%s", hdev->name);
+
+       /* Read LE buffer size */
+       hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
 }
 
 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
        if (copy_from_user(&ir, ptr, sizeof(ir)))
                return -EFAULT;
 
-       if (!(hdev = hci_dev_get(ir.dev_id)))
+       hdev = hci_dev_get(ir.dev_id);
+       if (!hdev)
                return -ENODEV;
 
        hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
        /* cache_dump can't sleep. Therefore we allocate temp buffer and then
         * copy it to the user space.
         */
-       buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
+       buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
        if (!buf) {
                err = -ENOMEM;
                goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
        struct hci_dev *hdev;
        int ret = 0;
 
-       if (!(hdev = hci_dev_get(dev)))
+       hdev = hci_dev_get(dev);
+       if (!hdev)
                return -ENODEV;
 
        BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
        if (!test_bit(HCI_RAW, &hdev->flags)) {
                atomic_set(&hdev->cmd_cnt, 1);
                set_bit(HCI_INIT, &hdev->flags);
+               hdev->init_last_cmd = 0;
 
-               //__hci_request(hdev, hci_reset_req, 0, HZ);
                ret = __hci_request(hdev, hci_init_req, 0,
                                        msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
+               if (lmp_le_capable(hdev))
+                       ret = __hci_request(hdev, hci_le_init_req, 0,
+                                       msecs_to_jiffies(HCI_INIT_TIMEOUT));
+
                clear_bit(HCI_INIT, &hdev->flags);
        }
 
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
+               if (!test_bit(HCI_SETUP, &hdev->flags))
+                       mgmt_powered(hdev->id, 1);
        } else {
                /* Init failed, cleanup */
                tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        /* Drop last sent command */
        if (hdev->sent_cmd) {
+               del_timer_sync(&hdev->cmd_timer);
                kfree_skb(hdev->sent_cmd);
                hdev->sent_cmd = NULL;
        }
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
+       mgmt_powered(hdev->id, 0);
+
        /* Clear flags */
        hdev->flags = 0;
 
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
                hdev->flush(hdev);
 
        atomic_set(&hdev->cmd_cnt, 1);
-       hdev->acl_cnt = 0; hdev->sco_cnt = 0;
+       hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
 
        if (!test_bit(HCI_RAW, &hdev->flags))
                ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
        read_lock_bh(&hci_dev_list_lock);
        list_for_each(p, &hci_dev_list) {
                struct hci_dev *hdev;
+
                hdev = list_entry(p, struct hci_dev, list);
+
+               hci_del_off_timer(hdev);
+
+               if (!test_bit(HCI_MGMT, &hdev->flags))
+                       set_bit(HCI_PAIRABLE, &hdev->flags);
+
                (dr + n)->dev_id  = hdev->id;
                (dr + n)->dev_opt = hdev->flags;
+
                if (++n >= dev_num)
                        break;
        }
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
+       hci_del_off_timer(hdev);
+
+       if (!test_bit(HCI_MGMT, &hdev->flags))
+               set_bit(HCI_PAIRABLE, &hdev->flags);
+
        strcpy(di.name, hdev->name);
        di.bdaddr   = hdev->bdaddr;
        di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
 }
 EXPORT_SYMBOL(hci_free_dev);
 
+static void hci_power_on(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+
+       BT_DBG("%s", hdev->name);
+
+       if (hci_dev_open(hdev->id) < 0)
+               return;
+
+       if (test_bit(HCI_AUTO_OFF, &hdev->flags))
+               mod_timer(&hdev->off_timer,
+                               jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+
+       if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
+               mgmt_index_added(hdev->id);
+}
+
+static void hci_power_off(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_close(hdev->id);
+}
+
+static void hci_auto_off(unsigned long data)
+{
+       struct hci_dev *hdev = (struct hci_dev *) data;
+
+       BT_DBG("%s", hdev->name);
+
+       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
+       queue_work(hdev->workqueue, &hdev->power_off);
+}
+
+void hci_del_off_timer(struct hci_dev *hdev)
+{
+       BT_DBG("%s", hdev->name);
+
+       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+       del_timer(&hdev->off_timer);
+}
+
+int hci_uuids_clear(struct hci_dev *hdev)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &hdev->uuids) {
+               struct bt_uuid *uuid;
+
+               uuid = list_entry(p, struct bt_uuid, list);
+
+               list_del(p);
+               kfree(uuid);
+       }
+
+       return 0;
+}
+
+int hci_link_keys_clear(struct hci_dev *hdev)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &hdev->link_keys) {
+               struct link_key *key;
+
+               key = list_entry(p, struct link_key, list);
+
+               list_del(p);
+               kfree(key);
+       }
+
+       return 0;
+}
+
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct list_head *p;
+
+       list_for_each(p, &hdev->link_keys) {
+               struct link_key *k;
+
+               k = list_entry(p, struct link_key, list);
+
+               if (bacmp(bdaddr, &k->bdaddr) == 0)
+                       return k;
+       }
+
+       return NULL;
+}
+
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+                                               u8 *val, u8 type, u8 pin_len)
+{
+       struct link_key *key, *old_key;
+       u8 old_key_type;
+
+       old_key = hci_find_link_key(hdev, bdaddr);
+       if (old_key) {
+               old_key_type = old_key->type;
+               key = old_key;
+       } else {
+               old_key_type = 0xff;
+               key = kzalloc(sizeof(*key), GFP_ATOMIC);
+               if (!key)
+                       return -ENOMEM;
+               list_add(&key->list, &hdev->link_keys);
+       }
+
+       BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+
+       bacpy(&key->bdaddr, bdaddr);
+       memcpy(key->val, val, 16);
+       key->type = type;
+       key->pin_len = pin_len;
+
+       if (new_key)
+               mgmt_new_key(hdev->id, key, old_key_type);
+
+       if (type == 0x06)
+               key->type = old_key_type;
+
+       return 0;
+}
+
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct link_key *key;
+
+       key = hci_find_link_key(hdev, bdaddr);
+       if (!key)
+               return -ENOENT;
+
+       BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+
+       list_del(&key->list);
+       kfree(key);
+
+       return 0;
+}
+
+/* HCI command timer function */
+static void hci_cmd_timer(unsigned long arg)
+{
+       struct hci_dev *hdev = (void *) arg;
+
+       BT_ERR("%s command tx timeout", hdev->name);
+       atomic_set(&hdev->cmd_cnt, 1);
+       tasklet_schedule(&hdev->cmd_task);
+}
+
 /* Register HCI device */
 int hci_register_dev(struct hci_dev *hdev)
 {
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
+       hdev->io_capability = 0x03; /* No Input No Output */
 
        hdev->idle_timeout = 0;
        hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
        skb_queue_head_init(&hdev->cmd_q);
        skb_queue_head_init(&hdev->raw_q);
 
+       setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+
        for (i = 0; i < NUM_REASSEMBLY; i++)
                hdev->reassembly[i] = NULL;
 
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
 
        INIT_LIST_HEAD(&hdev->blacklist);
 
+       INIT_LIST_HEAD(&hdev->uuids);
+
+       INIT_LIST_HEAD(&hdev->link_keys);
+
+       INIT_WORK(&hdev->power_on, hci_power_on);
+       INIT_WORK(&hdev->power_off, hci_power_off);
+       setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+
        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
        atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
                }
        }
 
-       mgmt_index_added(hdev->id);
+       set_bit(HCI_AUTO_OFF, &hdev->flags);
+       set_bit(HCI_SETUP, &hdev->flags);
+       queue_work(hdev->workqueue, &hdev->power_on);
+
        hci_notify(hdev, HCI_DEV_REG);
 
        return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
        for (i = 0; i < NUM_REASSEMBLY; i++)
                kfree_skb(hdev->reassembly[i]);
 
-       mgmt_index_removed(hdev->id);
+       if (!test_bit(HCI_INIT, &hdev->flags) &&
+                                       !test_bit(HCI_SETUP, &hdev->flags))
+               mgmt_index_removed(hdev->id);
+
        hci_notify(hdev, HCI_DEV_UNREG);
 
        if (hdev->rfkill) {
@@ -1009,10 +1212,14 @@ int hci_unregister_dev(struct hci_dev *hdev)
 
        hci_unregister_sysfs(hdev);
 
+       hci_del_off_timer(hdev);
+
        destroy_workqueue(hdev->workqueue);
 
        hci_dev_lock_bh(hdev);
        hci_blacklist_clear(hdev);
+       hci_uuids_clear(hdev);
+       hci_link_keys_clear(hdev);
        hci_dev_unlock_bh(hdev);
 
        __hci_dev_put(hdev);
@@ -1313,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
                /* Time stamp */
                __net_timestamp(skb);
 
-               hci_send_to_sock(hdev, skb);
+               hci_send_to_sock(hdev, skb, NULL);
        }
 
        /* Get rid of skb owner, prior to sending to the driver. */
@@ -1349,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
        bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
        skb->dev = (void *) hdev;
 
+       if (test_bit(HCI_INIT, &hdev->flags))
+               hdev->init_last_cmd = opcode;
+
        skb_queue_tail(&hdev->cmd_q, skb);
        tasklet_schedule(&hdev->cmd_task);
 
@@ -1395,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
 
        skb->dev = (void *) hdev;
        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-       hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
+       hci_add_acl_hdr(skb, conn->handle, flags);
 
        list = skb_shinfo(skb)->frag_list;
        if (!list) {
@@ -1413,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
                spin_lock_bh(&conn->data_q.lock);
 
                __skb_queue_tail(&conn->data_q, skb);
+
+               flags &= ~ACL_START;
+               flags |= ACL_CONT;
                do {
                        skb = list; list = list->next;
 
                        skb->dev = (void *) hdev;
                        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-                       hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
+                       hci_add_acl_hdr(skb, conn->handle, flags);
 
                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
 
@@ -1486,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
        }
 
        if (conn) {
-               int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
-               int q = cnt / num;
+               int cnt, q;
+
+               switch (conn->type) {
+               case ACL_LINK:
+                       cnt = hdev->acl_cnt;
+                       break;
+               case SCO_LINK:
+               case ESCO_LINK:
+                       cnt = hdev->sco_cnt;
+                       break;
+               case LE_LINK:
+                       cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+                       break;
+               default:
+                       cnt = 0;
+                       BT_ERR("Unknown link type");
+               }
+
+               q = cnt / num;
                *quote = q ? q : 1;
        } else
                *quote = 0;
@@ -1496,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
        return conn;
 }
 
-static inline void hci_acl_tx_to(struct hci_dev *hdev)
+static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
        struct list_head *p;
        struct hci_conn  *c;
 
-       BT_ERR("%s ACL tx timeout", hdev->name);
+       BT_ERR("%s link tx timeout", hdev->name);
 
        /* Kill stalled connections */
        list_for_each(p, &h->list) {
                c = list_entry(p, struct hci_conn, list);
-               if (c->type == ACL_LINK && c->sent) {
-                       BT_ERR("%s killing stalled ACL connection %s",
+               if (c->type == type && c->sent) {
+                       BT_ERR("%s killing stalled connection %s",
                                hdev->name, batostr(&c->dst));
                        hci_acl_disconn(c, 0x13);
                }
@@ -1527,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
                /* ACL tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
                if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
-                       hci_acl_tx_to(hdev);
+                       hci_link_tx_to(hdev, ACL_LINK);
        }
 
        while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1586,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
        }
 }
 
+static inline void hci_sched_le(struct hci_dev *hdev)
+{
+       struct hci_conn *conn;
+       struct sk_buff *skb;
+       int quote, cnt;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_RAW, &hdev->flags)) {
+               /* LE tx timeout must be longer than maximum
+                * link supervision timeout (40.9 seconds) */
+               if (!hdev->le_cnt && hdev->le_pkts &&
+                               time_after(jiffies, hdev->le_last_tx + HZ * 45))
+                       hci_link_tx_to(hdev, LE_LINK);
+       }
+
+       cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+       while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
+               while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+                       BT_DBG("skb %p len %d", skb, skb->len);
+
+                       hci_send_frame(skb);
+                       hdev->le_last_tx = jiffies;
+
+                       cnt--;
+                       conn->sent++;
+               }
+       }
+       if (hdev->le_pkts)
+               hdev->le_cnt = cnt;
+       else
+               hdev->acl_cnt = cnt;
+}
+
 static void hci_tx_task(unsigned long arg)
 {
        struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1593,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
 
        read_lock(&hci_task_lock);
 
-       BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
+       BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
+               hdev->sco_cnt, hdev->le_cnt);
 
        /* Schedule queues and send stuff to HCI driver */
 
@@ -1603,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
 
        hci_sched_esco(hdev);
 
+       hci_sched_le(hdev);
+
        /* Send next queued raw (unknown type) packet */
        while ((skb = skb_dequeue(&hdev->raw_q)))
                hci_send_frame(skb);
@@ -1700,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
        while ((skb = skb_dequeue(&hdev->rx_q))) {
                if (atomic_read(&hdev->promisc)) {
                        /* Send copy to the sockets */
-                       hci_send_to_sock(hdev, skb);
+                       hci_send_to_sock(hdev, skb, NULL);
                }
 
                if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1750,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
 
        BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
 
-       if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
-               BT_ERR("%s command tx timeout", hdev->name);
-               atomic_set(&hdev->cmd_cnt, 1);
-       }
-
        /* Send queued commands */
-       if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
+       if (atomic_read(&hdev->cmd_cnt)) {
+               skb = skb_dequeue(&hdev->cmd_q);
+               if (!skb)
+                       return;
+
                kfree_skb(hdev->sent_cmd);
 
                hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
                if (hdev->sent_cmd) {
                        atomic_dec(&hdev->cmd_cnt);
                        hci_send_frame(skb);
-                       hdev->cmd_last_tx = jiffies;
+                       mod_timer(&hdev->cmd_timer,
+                                 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
                } else {
                        skb_queue_head(&hdev->cmd_q, skb);
                        tasklet_schedule(&hdev->cmd_task);
index a290854fdaa6ac68bba2443a51763723d5be7f62..98b5764e43156e88d6f4daa725c44d40195dbea9 100644 (file)
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        if (!status) {
                __u8 param = *((__u8 *) sent);
+               int old_pscan, old_iscan;
 
-               clear_bit(HCI_PSCAN, &hdev->flags);
-               clear_bit(HCI_ISCAN, &hdev->flags);
+               old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+               old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
 
-               if (param & SCAN_INQUIRY)
+               if (param & SCAN_INQUIRY) {
                        set_bit(HCI_ISCAN, &hdev->flags);
+                       if (!old_iscan)
+                               mgmt_discoverable(hdev->id, 1);
+               } else if (old_iscan)
+                       mgmt_discoverable(hdev->id, 0);
 
-               if (param & SCAN_PAGE)
+               if (param & SCAN_PAGE) {
                        set_bit(HCI_PSCAN, &hdev->flags);
+                       if (!old_pscan)
+                               mgmt_connectable(hdev->id, 1);
+               } else if (old_pscan)
+                       mgmt_connectable(hdev->id, 0);
        }
 
        hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
        hdev->ssp_mode = *((__u8 *) sent);
 }
 
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+       if (hdev->features[6] & LMP_EXT_INQ)
+               return 2;
+
+       if (hdev->features[3] & LMP_RSSI_INQ)
+               return 1;
+
+       if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+                                               hdev->lmp_subver == 0x0757)
+               return 1;
+
+       if (hdev->manufacturer == 15) {
+               if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+                       return 1;
+               if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+                       return 1;
+               if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+                       return 1;
+       }
+
+       if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+                                               hdev->lmp_subver == 0x1805)
+               return 1;
+
+       return 0;
+}
+
+static void hci_setup_inquiry_mode(struct hci_dev *hdev)
+{
+       u8 mode;
+
+       mode = hci_get_inquiry_mode(hdev);
+
+       hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_dev *hdev)
+{
+       /* The second byte is 0xff instead of 0x9f (two reserved bits
+        * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+        * command otherwise */
+       u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+       /* Events for 1.2 and newer controllers */
+       if (hdev->lmp_ver > 1) {
+               events[4] |= 0x01; /* Flow Specification Complete */
+               events[4] |= 0x02; /* Inquiry Result with RSSI */
+               events[4] |= 0x04; /* Read Remote Extended Features Complete */
+               events[5] |= 0x08; /* Synchronous Connection Complete */
+               events[5] |= 0x10; /* Synchronous Connection Changed */
+       }
+
+       if (hdev->features[3] & LMP_RSSI_INQ)
+               events[4] |= 0x04; /* Inquiry Result with RSSI */
+
+       if (hdev->features[5] & LMP_SNIFF_SUBR)
+               events[5] |= 0x20; /* Sniff Subrating */
+
+       if (hdev->features[5] & LMP_PAUSE_ENC)
+               events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+       if (hdev->features[6] & LMP_EXT_INQ)
+               events[5] |= 0x40; /* Extended Inquiry Result */
+
+       if (hdev->features[6] & LMP_NO_FLUSH)
+               events[7] |= 0x01; /* Enhanced Flush Complete */
+
+       if (hdev->features[7] & LMP_LSTO)
+               events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+               events[6] |= 0x01;      /* IO Capability Request */
+               events[6] |= 0x02;      /* IO Capability Response */
+               events[6] |= 0x04;      /* User Confirmation Request */
+               events[6] |= 0x08;      /* User Passkey Request */
+               events[6] |= 0x10;      /* Remote OOB Data Request */
+               events[6] |= 0x20;      /* Simple Pairing Complete */
+               events[7] |= 0x04;      /* User Passkey Notification */
+               events[7] |= 0x08;      /* Keypress Notification */
+               events[7] |= 0x10;      /* Remote Host Supported
+                                        * Features Notification */
+       }
+
+       if (hdev->features[4] & LMP_LE)
+               events[7] |= 0x20;      /* LE Meta-Event */
+
+       hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+}
+
+static void hci_setup(struct hci_dev *hdev)
+{
+       hci_setup_event_mask(hdev);
+
+       if (hdev->lmp_ver > 1)
+               hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+               u8 mode = 0x01;
+               hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
+       }
+
+       if (hdev->features[3] & LMP_RSSI_INQ)
+               hci_setup_inquiry_mode(hdev);
+
+       if (hdev->features[7] & LMP_INQ_TX_PWR)
+               hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+}
+
 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 
        hdev->hci_ver = rp->hci_ver;
        hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+       hdev->lmp_ver = rp->lmp_ver;
        hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+       hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
 
        BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
                                        hdev->manufacturer,
                                        hdev->hci_ver, hdev->hci_rev);
+
+       if (test_bit(HCI_INIT, &hdev->flags))
+               hci_setup(hdev);
+}
+
+static void hci_setup_link_policy(struct hci_dev *hdev)
+{
+       u16 link_policy = 0;
+
+       if (hdev->features[0] & LMP_RSWITCH)
+               link_policy |= HCI_LP_RSWITCH;
+       if (hdev->features[0] & LMP_HOLD)
+               link_policy |= HCI_LP_HOLD;
+       if (hdev->features[0] & LMP_SNIFF)
+               link_policy |= HCI_LP_SNIFF;
+       if (hdev->features[1] & LMP_PARK)
+               link_policy |= HCI_LP_PARK;
+
+       link_policy = cpu_to_le16(link_policy);
+       hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+                                       sizeof(link_policy), &link_policy);
 }
 
 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
        if (rp->status)
-               return;
+               goto done;
 
        memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
+
+       if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
+               hci_setup_link_policy(hdev);
+
+done:
+       hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
 }
 
 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,107 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
        hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
 }
 
+static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
+}
+
+static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
+}
+
+static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
+}
+
+static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+}
+
+static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
+}
+
+static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_pin_code_reply *rp = (void *) skb->data;
+       struct hci_cp_pin_code_reply *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+
+       if (rp->status != 0)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
+       if (!cp)
+               return;
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+       if (conn)
+               conn->pin_length = cp->pin_len;
+}
+
+static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+                                                               rp->status);
+}
+static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
+                                      struct sk_buff *skb)
+{
+       struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
+       hdev->le_pkts = rp->le_max_pkt;
+
+       hdev->le_cnt = hdev->le_pkts;
+
+       BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+
+       hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
+}
+
 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 {
        BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +870,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
        hci_dev_lock(hdev);
 
        acl = hci_conn_hash_lookup_handle(hdev, handle);
-       if (acl && (sco = acl->link)) {
-               sco->state = BT_CLOSED;
+       if (acl) {
+               sco = acl->link;
+               if (sco) {
+                       sco->state = BT_CLOSED;
 
-               hci_proto_connect_cfm(sco, status);
-               hci_conn_del(sco);
+                       hci_proto_connect_cfm(sco, status);
+                       hci_conn_del(sco);
+               }
        }
 
        hci_dev_unlock(hdev);
@@ -687,7 +938,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
 }
 
 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
-                                               struct hci_conn *conn)
+                                                       struct hci_conn *conn)
 {
        if (conn->state != BT_CONFIG || !conn->out)
                return 0;
@@ -808,11 +1059,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
        hci_dev_lock(hdev);
 
        acl = hci_conn_hash_lookup_handle(hdev, handle);
-       if (acl && (sco = acl->link)) {
-               sco->state = BT_CLOSED;
+       if (acl) {
+               sco = acl->link;
+               if (sco) {
+                       sco->state = BT_CLOSED;
 
-               hci_proto_connect_cfm(sco, status);
-               hci_conn_del(sco);
+                       hci_proto_connect_cfm(sco, status);
+                       hci_conn_del(sco);
+               }
        }
 
        hci_dev_unlock(hdev);
@@ -872,6 +1126,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
        hci_dev_unlock(hdev);
 }
 
+static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
+{
+       struct hci_cp_le_create_conn *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+
+       BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
+               conn);
+
+       if (status) {
+               if (conn && conn->state == BT_CONNECT) {
+                       conn->state = BT_CLOSED;
+                       hci_proto_connect_cfm(conn, status);
+                       hci_conn_del(conn);
+               }
+       } else {
+               if (!conn) {
+                       conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
+                       if (conn)
+                               conn->out = 1;
+                       else
+                               BT_ERR("No memory for new connection");
+               }
+       }
+
+       hci_dev_unlock(hdev);
+}
+
 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -942,6 +1233,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        conn->state = BT_CONFIG;
                        hci_conn_hold(conn);
                        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+                       mgmt_connected(hdev->id, &ev->bdaddr);
                } else
                        conn->state = BT_CONNECTED;
 
@@ -970,8 +1262,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
                                                        sizeof(cp), &cp);
                }
-       } else
+       } else {
                conn->state = BT_CLOSED;
+               if (conn->type == ACL_LINK)
+                       mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+       }
 
        if (conn->type == ACL_LINK)
                hci_sco_setup(conn, ev->status);
@@ -998,7 +1293,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
 
        mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
 
-       if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+       if ((mask & HCI_LM_ACCEPT) &&
+                       !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
                /* Connection accepted */
                struct inquiry_entry *ie;
                struct hci_conn *conn;
@@ -1068,19 +1364,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, ev->status);
 
-       if (ev->status)
+       if (ev->status) {
+               mgmt_disconnect_failed(hdev->id);
                return;
+       }
 
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-       if (conn) {
-               conn->state = BT_CLOSED;
+       if (!conn)
+               goto unlock;
 
-               hci_proto_disconn_cfm(conn, ev->reason);
-               hci_conn_del(conn);
-       }
+       conn->state = BT_CLOSED;
+
+       if (conn->type == ACL_LINK)
+               mgmt_disconnected(hdev->id, &conn->dst);
 
+       hci_proto_disconn_cfm(conn, ev->reason);
+       hci_conn_del(conn);
+
+unlock:
        hci_dev_unlock(hdev);
 }
 
@@ -1393,11 +1696,46 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
                hci_cc_write_ca_timeout(hdev, skb);
                break;
 
+       case HCI_OP_DELETE_STORED_LINK_KEY:
+               hci_cc_delete_stored_link_key(hdev, skb);
+               break;
+
+       case HCI_OP_SET_EVENT_MASK:
+               hci_cc_set_event_mask(hdev, skb);
+               break;
+
+       case HCI_OP_WRITE_INQUIRY_MODE:
+               hci_cc_write_inquiry_mode(hdev, skb);
+               break;
+
+       case HCI_OP_READ_INQ_RSP_TX_POWER:
+               hci_cc_read_inq_rsp_tx_power(hdev, skb);
+               break;
+
+       case HCI_OP_SET_EVENT_FLT:
+               hci_cc_set_event_flt(hdev, skb);
+               break;
+
+       case HCI_OP_PIN_CODE_REPLY:
+               hci_cc_pin_code_reply(hdev, skb);
+               break;
+
+       case HCI_OP_PIN_CODE_NEG_REPLY:
+               hci_cc_pin_code_neg_reply(hdev, skb);
+               break;
+
+       case HCI_OP_LE_READ_BUFFER_SIZE:
+               hci_cc_le_read_buffer_size(hdev, skb);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%x", hdev->name, opcode);
                break;
        }
 
+       if (ev->opcode != HCI_OP_NOP)
+               del_timer(&hdev->cmd_timer);
+
        if (ev->ncmd) {
                atomic_set(&hdev->cmd_cnt, 1);
                if (!skb_queue_empty(&hdev->cmd_q))
@@ -1459,11 +1797,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cs_exit_sniff_mode(hdev, ev->status);
                break;
 
+       case HCI_OP_DISCONNECT:
+               if (ev->status != 0)
+                       mgmt_disconnect_failed(hdev->id);
+               break;
+
+       case HCI_OP_LE_CREATE_CONN:
+               hci_cs_le_create_conn(hdev, ev->status);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%x", hdev->name, opcode);
                break;
        }
 
+       if (ev->opcode != HCI_OP_NOP)
+               del_timer(&hdev->cmd_timer);
+
        if (ev->ncmd) {
                atomic_set(&hdev->cmd_cnt, 1);
                if (!skb_queue_empty(&hdev->cmd_q))
@@ -1529,6 +1879,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
                                hdev->acl_cnt += count;
                                if (hdev->acl_cnt > hdev->acl_pkts)
                                        hdev->acl_cnt = hdev->acl_pkts;
+                       } else if (conn->type == LE_LINK) {
+                               if (hdev->le_pkts) {
+                                       hdev->le_cnt += count;
+                                       if (hdev->le_cnt > hdev->le_pkts)
+                                               hdev->le_cnt = hdev->le_pkts;
+                               } else {
+                                       hdev->acl_cnt += count;
+                                       if (hdev->acl_cnt > hdev->acl_pkts)
+                                               hdev->acl_cnt = hdev->acl_pkts;
+                               }
                        } else {
                                hdev->sco_cnt += count;
                                if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1586,18 +1946,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
                hci_conn_put(conn);
        }
 
+       if (!test_bit(HCI_PAIRABLE, &hdev->flags))
+               hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+                                       sizeof(ev->bdaddr), &ev->bdaddr);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_pin_code_request(hdev->id, &ev->bdaddr);
+
        hci_dev_unlock(hdev);
 }
 
 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
+       struct hci_ev_link_key_req *ev = (void *) skb->data;
+       struct hci_cp_link_key_reply cp;
+       struct hci_conn *conn;
+       struct link_key *key;
+
        BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
+               return;
+
+       hci_dev_lock(hdev);
+
+       key = hci_find_link_key(hdev, &ev->bdaddr);
+       if (!key) {
+               BT_DBG("%s link key not found for %s", hdev->name,
+                                                       batostr(&ev->bdaddr));
+               goto not_found;
+       }
+
+       BT_DBG("%s found key type %u for %s", hdev->name, key->type,
+                                                       batostr(&ev->bdaddr));
+
+       if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
+               BT_DBG("%s ignoring debug key", hdev->name);
+               goto not_found;
+       }
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+
+       if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
+                                               (conn->auth_type & 0x01)) {
+               BT_DBG("%s ignoring unauthenticated key", hdev->name);
+               goto not_found;
+       }
+
+       bacpy(&cp.bdaddr, &ev->bdaddr);
+       memcpy(cp.link_key, key->val, 16);
+
+       hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
+
+       hci_dev_unlock(hdev);
+
+       return;
+
+not_found:
+       hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
+       hci_dev_unlock(hdev);
 }
 
 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_link_key_notify *ev = (void *) skb->data;
        struct hci_conn *conn;
+       u8 pin_len = 0;
 
        BT_DBG("%s", hdev->name);
 
@@ -1607,9 +2021,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
        if (conn) {
                hci_conn_hold(conn);
                conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+               pin_len = conn->pin_length;
                hci_conn_put(conn);
        }
 
+       if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+               hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
+                                                       ev->key_type, pin_len);
+
        hci_dev_unlock(hdev);
 }
 
@@ -1683,7 +2102,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
        hci_dev_lock(hdev);
 
        if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
-               struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
+               struct inquiry_info_with_rssi_and_pscan_mode *info;
+               info = (void *) (skb->data + 1);
 
                for (; num_rsp; num_rsp--) {
                        bacpy(&data.bdaddr, &info->bdaddr);
@@ -1824,17 +2244,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_sniff_subrate *ev = (void *) skb->data;
-       struct hci_conn *conn;
 
        BT_DBG("%s status %d", hdev->name, ev->status);
-
-       hci_dev_lock(hdev);
-
-       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-       if (conn) {
-       }
-
-       hci_dev_unlock(hdev);
 }
 
 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1852,12 +2263,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
 
        for (; num_rsp; num_rsp--) {
                bacpy(&data.bdaddr, &info->bdaddr);
-               data.pscan_rep_mode     = info->pscan_rep_mode;
-               data.pscan_period_mode  = info->pscan_period_mode;
-               data.pscan_mode         = 0x00;
+               data.pscan_rep_mode     = info->pscan_rep_mode;
+               data.pscan_period_mode  = info->pscan_period_mode;
+               data.pscan_mode         = 0x00;
                memcpy(data.dev_class, info->dev_class, 3);
-               data.clock_offset       = info->clock_offset;
-               data.rssi               = info->rssi;
+               data.clock_offset       = info->clock_offset;
+               data.rssi               = info->rssi;
                data.ssp_mode           = 0x01;
                info++;
                hci_inquiry_cache_update(hdev, &data);
@@ -1866,6 +2277,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
        hci_dev_unlock(hdev);
 }
 
+static inline u8 hci_get_auth_req(struct hci_conn *conn)
+{
+       /* If remote requests dedicated bonding follow that lead */
+       if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+               /* If both remote and local IO capabilities allow MITM
+                * protection then require it, otherwise don't */
+               if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
+                       return 0x02;
+               else
+                       return 0x03;
+       }
+
+       /* If remote requests no-bonding follow that lead */
+       if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+               return 0x00;
+
+       return conn->auth_type;
+}
+
 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1876,9 +2306,59 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-       if (conn)
-               hci_conn_hold(conn);
+       if (!conn)
+               goto unlock;
+
+       hci_conn_hold(conn);
+
+       if (!test_bit(HCI_MGMT, &hdev->flags))
+               goto unlock;
+
+       if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
+                       (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+               struct hci_cp_io_capability_reply cp;
+
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               cp.capability = conn->io_capability;
+               cp.oob_data = 0;
+               cp.authentication = hci_get_auth_req(conn);
+
+               hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
+                                                       sizeof(cp), &cp);
+       } else {
+               struct hci_cp_io_capability_neg_reply cp;
+
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               cp.reason = 0x16; /* Pairing not allowed */
+
+               hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
+                                                       sizeof(cp), &cp);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_io_capa_reply *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_lock(hdev);
 
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+       if (!conn)
+               goto unlock;
+
+       hci_conn_hold(conn);
+
+       conn->remote_cap = ev->capability;
+       conn->remote_oob = ev->oob_data;
+       conn->remote_auth = ev->authentication;
+
+unlock:
        hci_dev_unlock(hdev);
 }
 
@@ -1914,6 +2394,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
        hci_dev_unlock(hdev);
 }
 
+static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status %d", hdev->name, ev->status);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
+       if (!conn) {
+               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+               if (!conn) {
+                       BT_ERR("No memory for new connection");
+                       hci_dev_unlock(hdev);
+                       return;
+               }
+       }
+
+       if (ev->status) {
+               hci_proto_connect_cfm(conn, ev->status);
+               conn->state = BT_CLOSED;
+               hci_conn_del(conn);
+               goto unlock;
+       }
+
+       conn->handle = __le16_to_cpu(ev->handle);
+       conn->state = BT_CONNECTED;
+
+       hci_conn_hold_device(conn);
+       hci_conn_add_sysfs(conn);
+
+       hci_proto_connect_cfm(conn, ev->status);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_le_meta *le_ev = (void *) skb->data;
+
+       skb_pull(skb, sizeof(*le_ev));
+
+       switch (le_ev->subevent) {
+       case HCI_EV_LE_CONN_COMPLETE:
+               hci_le_conn_complete_evt(hdev, skb);
+               break;
+
+       default:
+               break;
+       }
+}
+
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2042,6 +2576,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_io_capa_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_IO_CAPA_REPLY:
+               hci_io_capa_reply_evt(hdev, skb);
+               break;
+
        case HCI_EV_SIMPLE_PAIR_COMPLETE:
                hci_simple_pair_complete_evt(hdev, skb);
                break;
@@ -2050,6 +2588,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_remote_host_features_evt(hdev, skb);
                break;
 
+       case HCI_EV_LE_META:
+               hci_le_meta_evt(hdev, skb);
+               break;
+
        default:
                BT_DBG("%s event 0x%x", hdev->name, event);
                break;
@@ -2083,6 +2625,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
 
        bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
        skb->dev = (void *) hdev;
-       hci_send_to_sock(hdev, skb);
+       hci_send_to_sock(hdev, skb, NULL);
        kfree_skb(skb);
 }
index 29827c77f6ce20ab2eb83dce428e47533303d465..d50e9613660887ad101cbdf6000db4b023cefa1d 100644 (file)
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
 };
 
 /* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+                                                       struct sock *skip_sk)
 {
        struct sock *sk;
        struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
                struct hci_filter *flt;
                struct sk_buff *nskb;
 
+               if (sk == skip_sk)
+                       continue;
+
                if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
                        continue;
 
index 5fce3d6d07b4acc2a656b367121b06478c92061e..3c838a65a75a279eb1c776a650ef74b5d6ef0671 100644 (file)
@@ -11,7 +11,7 @@
 
 static struct class *bt_class;
 
-struct dentry *bt_debugfs = NULL;
+struct dentry *bt_debugfs;
 EXPORT_SYMBOL_GPL(bt_debugfs);
 
 static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
                                conn->features[6], conn->features[7]);
 }
 
-#define LINK_ATTR(_name,_mode,_show,_store) \
-struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
+#define LINK_ATTR(_name, _mode, _show, _store) \
+struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
 
 static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
 static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
        .llseek         = seq_lseek,
        .release        = single_release,
 };
+
+static void print_bt_uuid(struct seq_file *f, u8 *uuid)
+{
+       u32 data0, data4;
+       u16 data1, data2, data3, data5;
+
+       memcpy(&data0, &uuid[0], 4);
+       memcpy(&data1, &uuid[4], 2);
+       memcpy(&data2, &uuid[6], 2);
+       memcpy(&data3, &uuid[8], 2);
+       memcpy(&data4, &uuid[10], 4);
+       memcpy(&data5, &uuid[14], 2);
+
+       seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
+                               ntohl(data0), ntohs(data1), ntohs(data2),
+                               ntohs(data3), ntohl(data4), ntohs(data5));
+}
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct list_head *l;
+
+       hci_dev_lock_bh(hdev);
+
+       list_for_each(l, &hdev->uuids) {
+               struct bt_uuid *uuid;
+
+               uuid = list_entry(l, struct bt_uuid, list);
+
+               print_bt_uuid(f, uuid->uuid);
+       }
+
+       hci_dev_unlock_bh(hdev);
+
+       return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+       .open           = uuids_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 int hci_register_sysfs(struct hci_dev *hdev)
 {
        struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
        debugfs_create_file("blacklist", 0444, hdev->debugfs,
                                                hdev, &blacklist_fops);
 
+       debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
        return 0;
 }
 
index 29544c21f4b52b33a34b87a8621d6150cd9b2455..2429ca2d7b06d1b5a8807dd8a9871299b0f0e1ca 100644 (file)
@@ -157,7 +157,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
 
        session->leds = newleds;
 
-       if (!(skb = alloc_skb(3, GFP_ATOMIC))) {
+       skb = alloc_skb(3, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
                return -ENOMEM;
        }
@@ -250,7 +251,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
 
        BT_DBG("session %p data %p size %d", session, data, size);
 
-       if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+       skb = alloc_skb(size + 1, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
                return -ENOMEM;
        }
@@ -283,7 +285,8 @@ static int hidp_queue_report(struct hidp_session *session,
 
        BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
 
-       if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+       skb = alloc_skb(size + 1, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
                return -ENOMEM;
        }
@@ -1016,8 +1019,6 @@ static int __init hidp_init(void)
 {
        int ret;
 
-       l2cap_load();
-
        BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
 
        ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
deleted file mode 100644 (file)
index 675614e..0000000
+++ /dev/null
@@ -1,4930 +0,0 @@
-/*
-   BlueZ - Bluetooth protocol stack for Linux
-   Copyright (C) 2000-2001 Qualcomm Incorporated
-   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
-   Copyright (C) 2010 Google Inc.
-
-   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License version 2 as
-   published by the Free Software Foundation;
-
-   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
-   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
-   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
-   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
-   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
-   SOFTWARE IS DISCLAIMED.
-*/
-
-/* Bluetooth L2CAP core and sockets. */
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/crc16.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <asm/unaligned.h>
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-
-#define VERSION "2.15"
-
-static int disable_ertm;
-
-static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
-
-static const struct proto_ops l2cap_sock_ops;
-
-static struct workqueue_struct *_busy_wq;
-
-static struct bt_sock_list l2cap_sk_list = {
-       .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
-};
-
-static void l2cap_busy_work(struct work_struct *work);
-
-static void __l2cap_sock_close(struct sock *sk, int reason);
-static void l2cap_sock_close(struct sock *sk);
-static void l2cap_sock_kill(struct sock *sk);
-
-static int l2cap_build_conf_req(struct sock *sk, void *data);
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
-                               u8 code, u8 ident, u16 dlen, void *data);
-
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
-
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
-       BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
-       sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-static void l2cap_sock_clear_timer(struct sock *sk)
-{
-       BT_DBG("sock %p state %d", sk, sk->sk_state);
-       sk_stop_timer(sk, &sk->sk_timer);
-}
-
-static void l2cap_sock_timeout(unsigned long arg)
-{
-       struct sock *sk = (struct sock *) arg;
-       int reason;
-
-       BT_DBG("sock %p state %d", sk, sk->sk_state);
-
-       bh_lock_sock(sk);
-
-       if (sock_owned_by_user(sk)) {
-               /* sk is owned by user. Try again later */
-               l2cap_sock_set_timer(sk, HZ / 5);
-               bh_unlock_sock(sk);
-               sock_put(sk);
-               return;
-       }
-
-       if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
-               reason = ECONNREFUSED;
-       else if (sk->sk_state == BT_CONNECT &&
-                               l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
-               reason = ECONNREFUSED;
-       else
-               reason = ETIMEDOUT;
-
-       __l2cap_sock_close(sk, reason);
-
-       bh_unlock_sock(sk);
-
-       l2cap_sock_kill(sk);
-       sock_put(sk);
-}
-
-/* ---- L2CAP channels ---- */
-static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
-{
-       struct sock *s;
-       for (s = l->head; s; s = l2cap_pi(s)->next_c) {
-               if (l2cap_pi(s)->dcid == cid)
-                       break;
-       }
-       return s;
-}
-
-static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
-{
-       struct sock *s;
-       for (s = l->head; s; s = l2cap_pi(s)->next_c) {
-               if (l2cap_pi(s)->scid == cid)
-                       break;
-       }
-       return s;
-}
-
-/* Find channel with given SCID.
- * Returns locked socket */
-static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
-{
-       struct sock *s;
-       read_lock(&l->lock);
-       s = __l2cap_get_chan_by_scid(l, cid);
-       if (s)
-               bh_lock_sock(s);
-       read_unlock(&l->lock);
-       return s;
-}
-
-static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
-{
-       struct sock *s;
-       for (s = l->head; s; s = l2cap_pi(s)->next_c) {
-               if (l2cap_pi(s)->ident == ident)
-                       break;
-       }
-       return s;
-}
-
-static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
-{
-       struct sock *s;
-       read_lock(&l->lock);
-       s = __l2cap_get_chan_by_ident(l, ident);
-       if (s)
-               bh_lock_sock(s);
-       read_unlock(&l->lock);
-       return s;
-}
-
-static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
-{
-       u16 cid = L2CAP_CID_DYN_START;
-
-       for (; cid < L2CAP_CID_DYN_END; cid++) {
-               if (!__l2cap_get_chan_by_scid(l, cid))
-                       return cid;
-       }
-
-       return 0;
-}
-
-static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
-{
-       sock_hold(sk);
-
-       if (l->head)
-               l2cap_pi(l->head)->prev_c = sk;
-
-       l2cap_pi(sk)->next_c = l->head;
-       l2cap_pi(sk)->prev_c = NULL;
-       l->head = sk;
-}
-
-static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
-{
-       struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
-
-       write_lock_bh(&l->lock);
-       if (sk == l->head)
-               l->head = next;
-
-       if (next)
-               l2cap_pi(next)->prev_c = prev;
-       if (prev)
-               l2cap_pi(prev)->next_c = next;
-       write_unlock_bh(&l->lock);
-
-       __sock_put(sk);
-}
-
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
-{
-       struct l2cap_chan_list *l = &conn->chan_list;
-
-       BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
-                       l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
-
-       conn->disc_reason = 0x13;
-
-       l2cap_pi(sk)->conn = conn;
-
-       if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
-               /* Alloc CID for connection-oriented socket */
-               l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
-       } else if (sk->sk_type == SOCK_DGRAM) {
-               /* Connectionless socket */
-               l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
-               l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
-               l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
-       } else {
-               /* Raw socket can send/recv signalling messages only */
-               l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
-               l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
-               l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
-       }
-
-       __l2cap_chan_link(l, sk);
-
-       if (parent)
-               bt_accept_enqueue(parent, sk);
-}
-
-/* Delete channel.
- * Must be called on the locked socket. */
-static void l2cap_chan_del(struct sock *sk, int err)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sock *parent = bt_sk(sk)->parent;
-
-       l2cap_sock_clear_timer(sk);
-
-       BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
-
-       if (conn) {
-               /* Unlink from channel list */
-               l2cap_chan_unlink(&conn->chan_list, sk);
-               l2cap_pi(sk)->conn = NULL;
-               hci_conn_put(conn->hcon);
-       }
-
-       sk->sk_state = BT_CLOSED;
-       sock_set_flag(sk, SOCK_ZAPPED);
-
-       if (err)
-               sk->sk_err = err;
-
-       if (parent) {
-               bt_accept_unlink(sk);
-               parent->sk_data_ready(parent, 0);
-       } else
-               sk->sk_state_change(sk);
-
-       skb_queue_purge(TX_QUEUE(sk));
-
-       if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
-               struct srej_list *l, *tmp;
-
-               del_timer(&l2cap_pi(sk)->retrans_timer);
-               del_timer(&l2cap_pi(sk)->monitor_timer);
-               del_timer(&l2cap_pi(sk)->ack_timer);
-
-               skb_queue_purge(SREJ_QUEUE(sk));
-               skb_queue_purge(BUSY_QUEUE(sk));
-
-               list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
-                       list_del(&l->list);
-                       kfree(l);
-               }
-       }
-}
-
-static inline u8 l2cap_get_auth_type(struct sock *sk)
-{
-       if (sk->sk_type == SOCK_RAW) {
-               switch (l2cap_pi(sk)->sec_level) {
-               case BT_SECURITY_HIGH:
-                       return HCI_AT_DEDICATED_BONDING_MITM;
-               case BT_SECURITY_MEDIUM:
-                       return HCI_AT_DEDICATED_BONDING;
-               default:
-                       return HCI_AT_NO_BONDING;
-               }
-       } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
-                       return HCI_AT_NO_BONDING_MITM;
-               else
-                       return HCI_AT_NO_BONDING;
-       } else {
-               switch (l2cap_pi(sk)->sec_level) {
-               case BT_SECURITY_HIGH:
-                       return HCI_AT_GENERAL_BONDING_MITM;
-               case BT_SECURITY_MEDIUM:
-                       return HCI_AT_GENERAL_BONDING;
-               default:
-                       return HCI_AT_NO_BONDING;
-               }
-       }
-}
-
-/* Service level security */
-static inline int l2cap_check_security(struct sock *sk)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       __u8 auth_type;
-
-       auth_type = l2cap_get_auth_type(sk);
-
-       return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
-                                                               auth_type);
-}
-
-static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
-{
-       u8 id;
-
-       /* Get next available identificator.
-        *    1 - 128 are used by kernel.
-        *  129 - 199 are reserved.
-        *  200 - 254 are used by utilities like l2ping, etc.
-        */
-
-       spin_lock_bh(&conn->lock);
-
-       if (++conn->tx_ident > 128)
-               conn->tx_ident = 1;
-
-       id = conn->tx_ident;
-
-       spin_unlock_bh(&conn->lock);
-
-       return id;
-}
-
-static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
-{
-       struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
-
-       BT_DBG("code 0x%2.2x", code);
-
-       if (!skb)
-               return;
-
-       hci_send_acl(conn->hcon, skb, 0);
-}
-
-static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
-{
-       struct sk_buff *skb;
-       struct l2cap_hdr *lh;
-       struct l2cap_conn *conn = pi->conn;
-       struct sock *sk = (struct sock *)pi;
-       int count, hlen = L2CAP_HDR_SIZE + 2;
-
-       if (sk->sk_state != BT_CONNECTED)
-               return;
-
-       if (pi->fcs == L2CAP_FCS_CRC16)
-               hlen += 2;
-
-       BT_DBG("pi %p, control 0x%2.2x", pi, control);
-
-       count = min_t(unsigned int, conn->mtu, hlen);
-       control |= L2CAP_CTRL_FRAME_TYPE;
-
-       if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
-               control |= L2CAP_CTRL_FINAL;
-               pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-       }
-
-       if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
-               control |= L2CAP_CTRL_POLL;
-               pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
-       }
-
-       skb = bt_skb_alloc(count, GFP_ATOMIC);
-       if (!skb)
-               return;
-
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(pi->dcid);
-       put_unaligned_le16(control, skb_put(skb, 2));
-
-       if (pi->fcs == L2CAP_FCS_CRC16) {
-               u16 fcs = crc16(0, (u8 *)lh, count - 2);
-               put_unaligned_le16(fcs, skb_put(skb, 2));
-       }
-
-       hci_send_acl(pi->conn->hcon, skb, 0);
-}
-
-static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
-{
-       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
-               pi->conn_state |= L2CAP_CONN_RNR_SENT;
-       } else
-               control |= L2CAP_SUPER_RCV_READY;
-
-       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
-       l2cap_send_sframe(pi, control);
-}
-
-static inline int __l2cap_no_conn_pending(struct sock *sk)
-{
-       return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
-}
-
-static void l2cap_do_start(struct sock *sk)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
-       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
-               if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
-                       return;
-
-               if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
-                       struct l2cap_conn_req req;
-                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
-                       req.psm  = l2cap_pi(sk)->psm;
-
-                       l2cap_pi(sk)->ident = l2cap_get_ident(conn);
-                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_REQ, sizeof(req), &req);
-               }
-       } else {
-               struct l2cap_info_req req;
-               req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
-
-               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
-               conn->info_ident = l2cap_get_ident(conn);
-
-               mod_timer(&conn->info_timer, jiffies +
-                                       msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
-
-               l2cap_send_cmd(conn, conn->info_ident,
-                                       L2CAP_INFO_REQ, sizeof(req), &req);
-       }
-}
-
-static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
-{
-       u32 local_feat_mask = l2cap_feat_mask;
-       if (!disable_ertm)
-               local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
-
-       switch (mode) {
-       case L2CAP_MODE_ERTM:
-               return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
-       case L2CAP_MODE_STREAMING:
-               return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
-       default:
-               return 0x00;
-       }
-}
-
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
-{
-       struct l2cap_disconn_req req;
-
-       if (!conn)
-               return;
-
-       skb_queue_purge(TX_QUEUE(sk));
-
-       if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
-               del_timer(&l2cap_pi(sk)->retrans_timer);
-               del_timer(&l2cap_pi(sk)->monitor_timer);
-               del_timer(&l2cap_pi(sk)->ack_timer);
-       }
-
-       req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
-       l2cap_send_cmd(conn, l2cap_get_ident(conn),
-                       L2CAP_DISCONN_REQ, sizeof(req), &req);
-
-       sk->sk_state = BT_DISCONN;
-       sk->sk_err = err;
-}
-
-/* ---- L2CAP connections ---- */
-static void l2cap_conn_start(struct l2cap_conn *conn)
-{
-       struct l2cap_chan_list *l = &conn->chan_list;
-       struct sock_del_list del, *tmp1, *tmp2;
-       struct sock *sk;
-
-       BT_DBG("conn %p", conn);
-
-       INIT_LIST_HEAD(&del.list);
-
-       read_lock(&l->lock);
-
-       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
-               bh_lock_sock(sk);
-
-               if (sk->sk_type != SOCK_SEQPACKET &&
-                               sk->sk_type != SOCK_STREAM) {
-                       bh_unlock_sock(sk);
-                       continue;
-               }
-
-               if (sk->sk_state == BT_CONNECT) {
-                       struct l2cap_conn_req req;
-
-                       if (!l2cap_check_security(sk) ||
-                                       !__l2cap_no_conn_pending(sk)) {
-                               bh_unlock_sock(sk);
-                               continue;
-                       }
-
-                       if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
-                                       conn->feat_mask)
-                                       && l2cap_pi(sk)->conf_state &
-                                       L2CAP_CONF_STATE2_DEVICE) {
-                               tmp1 = kzalloc(sizeof(struct sock_del_list),
-                                               GFP_ATOMIC);
-                               tmp1->sk = sk;
-                               list_add_tail(&tmp1->list, &del.list);
-                               bh_unlock_sock(sk);
-                               continue;
-                       }
-
-                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
-                       req.psm  = l2cap_pi(sk)->psm;
-
-                       l2cap_pi(sk)->ident = l2cap_get_ident(conn);
-                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                               L2CAP_CONN_REQ, sizeof(req), &req);
-
-               } else if (sk->sk_state == BT_CONNECT2) {
-                       struct l2cap_conn_rsp rsp;
-                       char buf[128];
-                       rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
-                       rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
-
-                       if (l2cap_check_security(sk)) {
-                               if (bt_sk(sk)->defer_setup) {
-                                       struct sock *parent = bt_sk(sk)->parent;
-                                       rsp.result = cpu_to_le16(L2CAP_CR_PEND);
-                                       rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
-                                       parent->sk_data_ready(parent, 0);
-
-                               } else {
-                                       sk->sk_state = BT_CONFIG;
-                                       rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
-                                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-                               }
-                       } else {
-                               rsp.result = cpu_to_le16(L2CAP_CR_PEND);
-                               rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
-                       }
-
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
-                       if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
-                                       rsp.result != L2CAP_CR_SUCCESS) {
-                               bh_unlock_sock(sk);
-                               continue;
-                       }
-
-                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-                       l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                                               l2cap_build_conf_req(sk, buf), buf);
-                       l2cap_pi(sk)->num_conf_req++;
-               }
-
-               bh_unlock_sock(sk);
-       }
-
-       read_unlock(&l->lock);
-
-       list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
-               bh_lock_sock(tmp1->sk);
-               __l2cap_sock_close(tmp1->sk, ECONNRESET);
-               bh_unlock_sock(tmp1->sk);
-               list_del(&tmp1->list);
-               kfree(tmp1);
-       }
-}
-
-static void l2cap_conn_ready(struct l2cap_conn *conn)
-{
-       struct l2cap_chan_list *l = &conn->chan_list;
-       struct sock *sk;
-
-       BT_DBG("conn %p", conn);
-
-       read_lock(&l->lock);
-
-       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
-               bh_lock_sock(sk);
-
-               if (sk->sk_type != SOCK_SEQPACKET &&
-                               sk->sk_type != SOCK_STREAM) {
-                       l2cap_sock_clear_timer(sk);
-                       sk->sk_state = BT_CONNECTED;
-                       sk->sk_state_change(sk);
-               } else if (sk->sk_state == BT_CONNECT)
-                       l2cap_do_start(sk);
-
-               bh_unlock_sock(sk);
-       }
-
-       read_unlock(&l->lock);
-}
-
-/* Notify sockets that we cannot guaranty reliability anymore */
-static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
-{
-       struct l2cap_chan_list *l = &conn->chan_list;
-       struct sock *sk;
-
-       BT_DBG("conn %p", conn);
-
-       read_lock(&l->lock);
-
-       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
-               if (l2cap_pi(sk)->force_reliable)
-                       sk->sk_err = err;
-       }
-
-       read_unlock(&l->lock);
-}
-
-static void l2cap_info_timeout(unsigned long arg)
-{
-       struct l2cap_conn *conn = (void *) arg;
-
-       conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
-       conn->info_ident = 0;
-
-       l2cap_conn_start(conn);
-}
-
-static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
-{
-       struct l2cap_conn *conn = hcon->l2cap_data;
-
-       if (conn || status)
-               return conn;
-
-       conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
-       if (!conn)
-               return NULL;
-
-       hcon->l2cap_data = conn;
-       conn->hcon = hcon;
-
-       BT_DBG("hcon %p conn %p", hcon, conn);
-
-       conn->mtu = hcon->hdev->acl_mtu;
-       conn->src = &hcon->hdev->bdaddr;
-       conn->dst = &hcon->dst;
-
-       conn->feat_mask = 0;
-
-       spin_lock_init(&conn->lock);
-       rwlock_init(&conn->chan_list.lock);
-
-       setup_timer(&conn->info_timer, l2cap_info_timeout,
-                                               (unsigned long) conn);
-
-       conn->disc_reason = 0x13;
-
-       return conn;
-}
-
-static void l2cap_conn_del(struct hci_conn *hcon, int err)
-{
-       struct l2cap_conn *conn = hcon->l2cap_data;
-       struct sock *sk;
-
-       if (!conn)
-               return;
-
-       BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
-
-       kfree_skb(conn->rx_skb);
-
-       /* Kill channels */
-       while ((sk = conn->chan_list.head)) {
-               bh_lock_sock(sk);
-               l2cap_chan_del(sk, err);
-               bh_unlock_sock(sk);
-               l2cap_sock_kill(sk);
-       }
-
-       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
-               del_timer_sync(&conn->info_timer);
-
-       hcon->l2cap_data = NULL;
-       kfree(conn);
-}
-
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
-{
-       struct l2cap_chan_list *l = &conn->chan_list;
-       write_lock_bh(&l->lock);
-       __l2cap_chan_add(conn, sk, parent);
-       write_unlock_bh(&l->lock);
-}
-
-/* ---- Socket interface ---- */
-static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
-{
-       struct sock *sk;
-       struct hlist_node *node;
-       sk_for_each(sk, node, &l2cap_sk_list.head)
-               if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
-                       goto found;
-       sk = NULL;
-found:
-       return sk;
-}
-
-/* Find socket with psm and source bdaddr.
- * Returns closest match.
- */
-static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
-{
-       struct sock *sk = NULL, *sk1 = NULL;
-       struct hlist_node *node;
-
-       read_lock(&l2cap_sk_list.lock);
-
-       sk_for_each(sk, node, &l2cap_sk_list.head) {
-               if (state && sk->sk_state != state)
-                       continue;
-
-               if (l2cap_pi(sk)->psm == psm) {
-                       /* Exact match. */
-                       if (!bacmp(&bt_sk(sk)->src, src))
-                               break;
-
-                       /* Closest match */
-                       if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
-                               sk1 = sk;
-               }
-       }
-
-       read_unlock(&l2cap_sk_list.lock);
-
-       return node ? sk : sk1;
-}
-
-static void l2cap_sock_destruct(struct sock *sk)
-{
-       BT_DBG("sk %p", sk);
-
-       skb_queue_purge(&sk->sk_receive_queue);
-       skb_queue_purge(&sk->sk_write_queue);
-}
-
-static void l2cap_sock_cleanup_listen(struct sock *parent)
-{
-       struct sock *sk;
-
-       BT_DBG("parent %p", parent);
-
-       /* Close not yet accepted channels */
-       while ((sk = bt_accept_dequeue(parent, NULL)))
-               l2cap_sock_close(sk);
-
-       parent->sk_state = BT_CLOSED;
-       sock_set_flag(parent, SOCK_ZAPPED);
-}
-
-/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
- */
-static void l2cap_sock_kill(struct sock *sk)
-{
-       if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
-               return;
-
-       BT_DBG("sk %p state %d", sk, sk->sk_state);
-
-       /* Kill poor orphan */
-       bt_sock_unlink(&l2cap_sk_list, sk);
-       sock_set_flag(sk, SOCK_DEAD);
-       sock_put(sk);
-}
-
-static void __l2cap_sock_close(struct sock *sk, int reason)
-{
-       BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
-
-       switch (sk->sk_state) {
-       case BT_LISTEN:
-               l2cap_sock_cleanup_listen(sk);
-               break;
-
-       case BT_CONNECTED:
-       case BT_CONFIG:
-               if (sk->sk_type == SOCK_SEQPACKET ||
-                               sk->sk_type == SOCK_STREAM) {
-                       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
-                       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-                       l2cap_send_disconn_req(conn, sk, reason);
-               } else
-                       l2cap_chan_del(sk, reason);
-               break;
-
-       case BT_CONNECT2:
-               if (sk->sk_type == SOCK_SEQPACKET ||
-                               sk->sk_type == SOCK_STREAM) {
-                       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-                       struct l2cap_conn_rsp rsp;
-                       __u16 result;
-
-                       if (bt_sk(sk)->defer_setup)
-                               result = L2CAP_CR_SEC_BLOCK;
-                       else
-                               result = L2CAP_CR_BAD_PSM;
-                       sk->sk_state = BT_DISCONN;
-
-                       rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-                       rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
-                       rsp.result = cpu_to_le16(result);
-                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-               } else
-                       l2cap_chan_del(sk, reason);
-               break;
-
-       case BT_CONNECT:
-       case BT_DISCONN:
-               l2cap_chan_del(sk, reason);
-               break;
-
-       default:
-               sock_set_flag(sk, SOCK_ZAPPED);
-               break;
-       }
-}
-
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
-{
-       l2cap_sock_clear_timer(sk);
-       lock_sock(sk);
-       __l2cap_sock_close(sk, ECONNRESET);
-       release_sock(sk);
-       l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-
-       BT_DBG("sk %p", sk);
-
-       if (parent) {
-               sk->sk_type = parent->sk_type;
-               bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
-
-               pi->imtu = l2cap_pi(parent)->imtu;
-               pi->omtu = l2cap_pi(parent)->omtu;
-               pi->conf_state = l2cap_pi(parent)->conf_state;
-               pi->mode = l2cap_pi(parent)->mode;
-               pi->fcs  = l2cap_pi(parent)->fcs;
-               pi->max_tx = l2cap_pi(parent)->max_tx;
-               pi->tx_win = l2cap_pi(parent)->tx_win;
-               pi->sec_level = l2cap_pi(parent)->sec_level;
-               pi->role_switch = l2cap_pi(parent)->role_switch;
-               pi->force_reliable = l2cap_pi(parent)->force_reliable;
-       } else {
-               pi->imtu = L2CAP_DEFAULT_MTU;
-               pi->omtu = 0;
-               if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
-                       pi->mode = L2CAP_MODE_ERTM;
-                       pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
-               } else {
-                       pi->mode = L2CAP_MODE_BASIC;
-               }
-               pi->max_tx = L2CAP_DEFAULT_MAX_TX;
-               pi->fcs  = L2CAP_FCS_CRC16;
-               pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
-               pi->sec_level = BT_SECURITY_LOW;
-               pi->role_switch = 0;
-               pi->force_reliable = 0;
-       }
-
-       /* Default config options */
-       pi->conf_len = 0;
-       pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-       skb_queue_head_init(TX_QUEUE(sk));
-       skb_queue_head_init(SREJ_QUEUE(sk));
-       skb_queue_head_init(BUSY_QUEUE(sk));
-       INIT_LIST_HEAD(SREJ_LIST(sk));
-}
-
-static struct proto l2cap_proto = {
-       .name           = "L2CAP",
-       .owner          = THIS_MODULE,
-       .obj_size       = sizeof(struct l2cap_pinfo)
-};
-
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
-{
-       struct sock *sk;
-
-       sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
-       if (!sk)
-               return NULL;
-
-       sock_init_data(sock, sk);
-       INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
-       sk->sk_destruct = l2cap_sock_destruct;
-       sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
-
-       sock_reset_flag(sk, SOCK_ZAPPED);
-
-       sk->sk_protocol = proto;
-       sk->sk_state = BT_OPEN;
-
-       setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
-
-       bt_sock_link(&l2cap_sk_list, sk);
-       return sk;
-}
-
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
-                            int kern)
-{
-       struct sock *sk;
-
-       BT_DBG("sock %p", sock);
-
-       sock->state = SS_UNCONNECTED;
-
-       if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
-                       sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
-               return -ESOCKTNOSUPPORT;
-
-       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
-               return -EPERM;
-
-       sock->ops = &l2cap_sock_ops;
-
-       sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
-       if (!sk)
-               return -ENOMEM;
-
-       l2cap_sock_init(sk, NULL);
-       return 0;
-}
-
-static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
-{
-       struct sock *sk = sock->sk;
-       struct sockaddr_l2 la;
-       int len, err = 0;
-
-       BT_DBG("sk %p", sk);
-
-       if (!addr || addr->sa_family != AF_BLUETOOTH)
-               return -EINVAL;
-
-       memset(&la, 0, sizeof(la));
-       len = min_t(unsigned int, sizeof(la), alen);
-       memcpy(&la, addr, len);
-
-       if (la.l2_cid)
-               return -EINVAL;
-
-       lock_sock(sk);
-
-       if (sk->sk_state != BT_OPEN) {
-               err = -EBADFD;
-               goto done;
-       }
-
-       if (la.l2_psm) {
-               __u16 psm = __le16_to_cpu(la.l2_psm);
-
-               /* PSM must be odd and lsb of upper byte must be 0 */
-               if ((psm & 0x0101) != 0x0001) {
-                       err = -EINVAL;
-                       goto done;
-               }
-
-               /* Restrict usage of well-known PSMs */
-               if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
-                       err = -EACCES;
-                       goto done;
-               }
-       }
-
-       write_lock_bh(&l2cap_sk_list.lock);
-
-       if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
-               err = -EADDRINUSE;
-       } else {
-               /* Save source address */
-               bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
-               l2cap_pi(sk)->psm   = la.l2_psm;
-               l2cap_pi(sk)->sport = la.l2_psm;
-               sk->sk_state = BT_BOUND;
-
-               if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
-                                       __le16_to_cpu(la.l2_psm) == 0x0003)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-       }
-
-       write_unlock_bh(&l2cap_sk_list.lock);
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_do_connect(struct sock *sk)
-{
-       bdaddr_t *src = &bt_sk(sk)->src;
-       bdaddr_t *dst = &bt_sk(sk)->dst;
-       struct l2cap_conn *conn;
-       struct hci_conn *hcon;
-       struct hci_dev *hdev;
-       __u8 auth_type;
-       int err;
-
-       BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
-                                                       l2cap_pi(sk)->psm);
-
-       hdev = hci_get_route(dst, src);
-       if (!hdev)
-               return -EHOSTUNREACH;
-
-       hci_dev_lock_bh(hdev);
-
-       err = -ENOMEM;
-
-       auth_type = l2cap_get_auth_type(sk);
-
-       hcon = hci_connect(hdev, ACL_LINK, dst,
-                                       l2cap_pi(sk)->sec_level, auth_type);
-       if (!hcon)
-               goto done;
-
-       conn = l2cap_conn_add(hcon, 0);
-       if (!conn) {
-               hci_conn_put(hcon);
-               goto done;
-       }
-
-       err = 0;
-
-       /* Update source addr of the socket */
-       bacpy(src, conn->src);
-
-       l2cap_chan_add(conn, sk, NULL);
-
-       sk->sk_state = BT_CONNECT;
-       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
-       if (hcon->state == BT_CONNECTED) {
-               if (sk->sk_type != SOCK_SEQPACKET &&
-                               sk->sk_type != SOCK_STREAM) {
-                       l2cap_sock_clear_timer(sk);
-                       if (l2cap_check_security(sk))
-                               sk->sk_state = BT_CONNECTED;
-               } else
-                       l2cap_do_start(sk);
-       }
-
-done:
-       hci_dev_unlock_bh(hdev);
-       hci_dev_put(hdev);
-       return err;
-}
-
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
-{
-       struct sock *sk = sock->sk;
-       struct sockaddr_l2 la;
-       int len, err = 0;
-
-       BT_DBG("sk %p", sk);
-
-       if (!addr || alen < sizeof(addr->sa_family) ||
-           addr->sa_family != AF_BLUETOOTH)
-               return -EINVAL;
-
-       memset(&la, 0, sizeof(la));
-       len = min_t(unsigned int, sizeof(la), alen);
-       memcpy(&la, addr, len);
-
-       if (la.l2_cid)
-               return -EINVAL;
-
-       lock_sock(sk);
-
-       if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
-                       && !la.l2_psm) {
-               err = -EINVAL;
-               goto done;
-       }
-
-       switch (l2cap_pi(sk)->mode) {
-       case L2CAP_MODE_BASIC:
-               break;
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               if (!disable_ertm)
-                       break;
-               /* fall through */
-       default:
-               err = -ENOTSUPP;
-               goto done;
-       }
-
-       switch (sk->sk_state) {
-       case BT_CONNECT:
-       case BT_CONNECT2:
-       case BT_CONFIG:
-               /* Already connecting */
-               goto wait;
-
-       case BT_CONNECTED:
-               /* Already connected */
-               err = -EISCONN;
-               goto done;
-
-       case BT_OPEN:
-       case BT_BOUND:
-               /* Can connect */
-               break;
-
-       default:
-               err = -EBADFD;
-               goto done;
-       }
-
-       /* PSM must be odd and lsb of upper byte must be 0 */
-       if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
-               sk->sk_type != SOCK_RAW) {
-               err = -EINVAL;
-               goto done;
-       }
-
-       /* Set destination address and psm */
-       bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
-       l2cap_pi(sk)->psm = la.l2_psm;
-
-       err = l2cap_do_connect(sk);
-       if (err)
-               goto done;
-
-wait:
-       err = bt_sock_wait_state(sk, BT_CONNECTED,
-                       sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_listen(struct socket *sock, int backlog)
-{
-       struct sock *sk = sock->sk;
-       int err = 0;
-
-       BT_DBG("sk %p backlog %d", sk, backlog);
-
-       lock_sock(sk);
-
-       if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
-                       || sk->sk_state != BT_BOUND) {
-               err = -EBADFD;
-               goto done;
-       }
-
-       switch (l2cap_pi(sk)->mode) {
-       case L2CAP_MODE_BASIC:
-               break;
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               if (!disable_ertm)
-                       break;
-               /* fall through */
-       default:
-               err = -ENOTSUPP;
-               goto done;
-       }
-
-       if (!l2cap_pi(sk)->psm) {
-               bdaddr_t *src = &bt_sk(sk)->src;
-               u16 psm;
-
-               err = -EINVAL;
-
-               write_lock_bh(&l2cap_sk_list.lock);
-
-               for (psm = 0x1001; psm < 0x1100; psm += 2)
-                       if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
-                               l2cap_pi(sk)->psm   = cpu_to_le16(psm);
-                               l2cap_pi(sk)->sport = cpu_to_le16(psm);
-                               err = 0;
-                               break;
-                       }
-
-               write_unlock_bh(&l2cap_sk_list.lock);
-
-               if (err < 0)
-                       goto done;
-       }
-
-       sk->sk_max_ack_backlog = backlog;
-       sk->sk_ack_backlog = 0;
-       sk->sk_state = BT_LISTEN;
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       struct sock *sk = sock->sk, *nsk;
-       long timeo;
-       int err = 0;
-
-       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
-       if (sk->sk_state != BT_LISTEN) {
-               err = -EBADFD;
-               goto done;
-       }
-
-       timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
-
-       BT_DBG("sk %p timeo %ld", sk, timeo);
-
-       /* Wait for an incoming connection. (wake-one). */
-       add_wait_queue_exclusive(sk_sleep(sk), &wait);
-       while (!(nsk = bt_accept_dequeue(sk, newsock))) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (!timeo) {
-                       err = -EAGAIN;
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
-               if (sk->sk_state != BT_LISTEN) {
-                       err = -EBADFD;
-                       break;
-               }
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk_sleep(sk), &wait);
-
-       if (err)
-               goto done;
-
-       newsock->state = SS_CONNECTED;
-
-       BT_DBG("new socket %p", nsk);
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
-{
-       struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
-       struct sock *sk = sock->sk;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       addr->sa_family = AF_BLUETOOTH;
-       *len = sizeof(struct sockaddr_l2);
-
-       if (peer) {
-               la->l2_psm = l2cap_pi(sk)->psm;
-               bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
-               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       } else {
-               la->l2_psm = l2cap_pi(sk)->sport;
-               bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
-               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
-       }
-
-       return 0;
-}
-
-static int __l2cap_wait_ack(struct sock *sk)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       int err = 0;
-       int timeo = HZ/5;
-
-       add_wait_queue(sk_sleep(sk), &wait);
-       while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               if (!timeo)
-                       timeo = HZ/5;
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-
-               err = sock_error(sk);
-               if (err)
-                       break;
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk_sleep(sk), &wait);
-       return err;
-}
-
-static void l2cap_monitor_timeout(unsigned long arg)
-{
-       struct sock *sk = (void *) arg;
-
-       BT_DBG("sk %p", sk);
-
-       bh_lock_sock(sk);
-       if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
-               l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
-               bh_unlock_sock(sk);
-               return;
-       }
-
-       l2cap_pi(sk)->retry_count++;
-       __mod_monitor_timer();
-
-       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
-       bh_unlock_sock(sk);
-}
-
-static void l2cap_retrans_timeout(unsigned long arg)
-{
-       struct sock *sk = (void *) arg;
-
-       BT_DBG("sk %p", sk);
-
-       bh_lock_sock(sk);
-       l2cap_pi(sk)->retry_count = 1;
-       __mod_monitor_timer();
-
-       l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
-
-       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
-       bh_unlock_sock(sk);
-}
-
-static void l2cap_drop_acked_frames(struct sock *sk)
-{
-       struct sk_buff *skb;
-
-       while ((skb = skb_peek(TX_QUEUE(sk))) &&
-                       l2cap_pi(sk)->unacked_frames) {
-               if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
-                       break;
-
-               skb = skb_dequeue(TX_QUEUE(sk));
-               kfree_skb(skb);
-
-               l2cap_pi(sk)->unacked_frames--;
-       }
-
-       if (!l2cap_pi(sk)->unacked_frames)
-               del_timer(&l2cap_pi(sk)->retrans_timer);
-}
-
-static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-
-       BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
-
-       hci_send_acl(pi->conn->hcon, skb, 0);
-}
-
-static void l2cap_streaming_send(struct sock *sk)
-{
-       struct sk_buff *skb;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u16 control, fcs;
-
-       while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
-               control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
-               control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
-               put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
-
-               if (pi->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
-                       put_unaligned_le16(fcs, skb->data + skb->len - 2);
-               }
-
-               l2cap_do_send(sk, skb);
-
-               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-       }
-}
-
-static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb, *tx_skb;
-       u16 control, fcs;
-
-       skb = skb_peek(TX_QUEUE(sk));
-       if (!skb)
-               return;
-
-       do {
-               if (bt_cb(skb)->tx_seq == tx_seq)
-                       break;
-
-               if (skb_queue_is_last(TX_QUEUE(sk), skb))
-                       return;
-
-       } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
-
-       if (pi->remote_max_tx &&
-                       bt_cb(skb)->retries == pi->remote_max_tx) {
-               l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
-               return;
-       }
-
-       tx_skb = skb_clone(skb, GFP_ATOMIC);
-       bt_cb(skb)->retries++;
-       control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-
-       if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
-               control |= L2CAP_CTRL_FINAL;
-               pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-       }
-
-       control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-                       | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-
-       put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
-       if (pi->fcs == L2CAP_FCS_CRC16) {
-               fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
-               put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
-       }
-
-       l2cap_do_send(sk, tx_skb);
-}
-
-static int l2cap_ertm_send(struct sock *sk)
-{
-       struct sk_buff *skb, *tx_skb;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u16 control, fcs;
-       int nsent = 0;
-
-       if (sk->sk_state != BT_CONNECTED)
-               return -ENOTCONN;
-
-       while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
-
-               if (pi->remote_max_tx &&
-                               bt_cb(skb)->retries == pi->remote_max_tx) {
-                       l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
-                       break;
-               }
-
-               tx_skb = skb_clone(skb, GFP_ATOMIC);
-
-               bt_cb(skb)->retries++;
-
-               control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-               control &= L2CAP_CTRL_SAR;
-
-               if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
-                       control |= L2CAP_CTRL_FINAL;
-                       pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-               }
-               control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-                               | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-               put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
-
-               if (pi->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
-                       put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
-               }
-
-               l2cap_do_send(sk, tx_skb);
-
-               __mod_retrans_timer();
-
-               bt_cb(skb)->tx_seq = pi->next_tx_seq;
-               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-
-               pi->unacked_frames++;
-               pi->frames_sent++;
-
-               if (skb_queue_is_last(TX_QUEUE(sk), skb))
-                       sk->sk_send_head = NULL;
-               else
-                       sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
-
-               nsent++;
-       }
-
-       return nsent;
-}
-
-static int l2cap_retransmit_frames(struct sock *sk)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       int ret;
-
-       if (!skb_queue_empty(TX_QUEUE(sk)))
-               sk->sk_send_head = TX_QUEUE(sk)->next;
-
-       pi->next_tx_seq = pi->expected_ack_seq;
-       ret = l2cap_ertm_send(sk);
-       return ret;
-}
-
-static void l2cap_send_ack(struct l2cap_pinfo *pi)
-{
-       struct sock *sk = (struct sock *)pi;
-       u16 control = 0;
-
-       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
-       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
-               pi->conn_state |= L2CAP_CONN_RNR_SENT;
-               l2cap_send_sframe(pi, control);
-               return;
-       }
-
-       if (l2cap_ertm_send(sk) > 0)
-               return;
-
-       control |= L2CAP_SUPER_RCV_READY;
-       l2cap_send_sframe(pi, control);
-}
-
-static void l2cap_send_srejtail(struct sock *sk)
-{
-       struct srej_list *tail;
-       u16 control;
-
-       control = L2CAP_SUPER_SELECT_REJECT;
-       control |= L2CAP_CTRL_FINAL;
-
-       tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
-       control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
-       l2cap_send_sframe(l2cap_pi(sk), control);
-}
-
-static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff **frag;
-       int err, sent = 0;
-
-       if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
-               return -EFAULT;
-
-       sent += count;
-       len  -= count;
-
-       /* Continuation fragments (no L2CAP header) */
-       frag = &skb_shinfo(skb)->frag_list;
-       while (len) {
-               count = min_t(unsigned int, conn->mtu, len);
-
-               *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
-               if (!*frag)
-                       return err;
-               if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
-                       return -EFAULT;
-
-               sent += count;
-               len  -= count;
-
-               frag = &(*frag)->next;
-       }
-
-       return sent;
-}
-
-static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE + 2;
-       struct l2cap_hdr *lh;
-
-       BT_DBG("sk %p len %d", sk, (int)len);
-
-       count = min_t(unsigned int, (conn->mtu - hlen), len);
-       skb = bt_skb_send_alloc(sk, count + hlen,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               return ERR_PTR(err);
-
-       /* Create L2CAP header */
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-       put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
-
-       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
-       if (unlikely(err < 0)) {
-               kfree_skb(skb);
-               return ERR_PTR(err);
-       }
-       return skb;
-}
-
-static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE;
-       struct l2cap_hdr *lh;
-
-       BT_DBG("sk %p len %d", sk, (int)len);
-
-       count = min_t(unsigned int, (conn->mtu - hlen), len);
-       skb = bt_skb_send_alloc(sk, count + hlen,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               return ERR_PTR(err);
-
-       /* Create L2CAP header */
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-
-       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
-       if (unlikely(err < 0)) {
-               kfree_skb(skb);
-               return ERR_PTR(err);
-       }
-       return skb;
-}
-
-static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
-{
-       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-       struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE + 2;
-       struct l2cap_hdr *lh;
-
-       BT_DBG("sk %p len %d", sk, (int)len);
-
-       if (!conn)
-               return ERR_PTR(-ENOTCONN);
-
-       if (sdulen)
-               hlen += 2;
-
-       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
-               hlen += 2;
-
-       count = min_t(unsigned int, (conn->mtu - hlen), len);
-       skb = bt_skb_send_alloc(sk, count + hlen,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               return ERR_PTR(err);
-
-       /* Create L2CAP header */
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-       put_unaligned_le16(control, skb_put(skb, 2));
-       if (sdulen)
-               put_unaligned_le16(sdulen, skb_put(skb, 2));
-
-       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
-       if (unlikely(err < 0)) {
-               kfree_skb(skb);
-               return ERR_PTR(err);
-       }
-
-       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
-               put_unaligned_le16(0, skb_put(skb, 2));
-
-       bt_cb(skb)->retries = 0;
-       return skb;
-}
-
-static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb;
-       struct sk_buff_head sar_queue;
-       u16 control;
-       size_t size = 0;
-
-       skb_queue_head_init(&sar_queue);
-       control = L2CAP_SDU_START;
-       skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       __skb_queue_tail(&sar_queue, skb);
-       len -= pi->remote_mps;
-       size += pi->remote_mps;
-
-       while (len > 0) {
-               size_t buflen;
-
-               if (len > pi->remote_mps) {
-                       control = L2CAP_SDU_CONTINUE;
-                       buflen = pi->remote_mps;
-               } else {
-                       control = L2CAP_SDU_END;
-                       buflen = len;
-               }
-
-               skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
-               if (IS_ERR(skb)) {
-                       skb_queue_purge(&sar_queue);
-                       return PTR_ERR(skb);
-               }
-
-               __skb_queue_tail(&sar_queue, skb);
-               len -= buflen;
-               size += buflen;
-       }
-       skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
-       if (sk->sk_send_head == NULL)
-               sk->sk_send_head = sar_queue.next;
-
-       return size;
-}
-
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
-{
-       struct sock *sk = sock->sk;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb;
-       u16 control;
-       int err;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       err = sock_error(sk);
-       if (err)
-               return err;
-
-       if (msg->msg_flags & MSG_OOB)
-               return -EOPNOTSUPP;
-
-       lock_sock(sk);
-
-       if (sk->sk_state != BT_CONNECTED) {
-               err = -ENOTCONN;
-               goto done;
-       }
-
-       /* Connectionless channel */
-       if (sk->sk_type == SOCK_DGRAM) {
-               skb = l2cap_create_connless_pdu(sk, msg, len);
-               if (IS_ERR(skb)) {
-                       err = PTR_ERR(skb);
-               } else {
-                       l2cap_do_send(sk, skb);
-                       err = len;
-               }
-               goto done;
-       }
-
-       switch (pi->mode) {
-       case L2CAP_MODE_BASIC:
-               /* Check outgoing MTU */
-               if (len > pi->omtu) {
-                       err = -EMSGSIZE;
-                       goto done;
-               }
-
-               /* Create a basic PDU */
-               skb = l2cap_create_basic_pdu(sk, msg, len);
-               if (IS_ERR(skb)) {
-                       err = PTR_ERR(skb);
-                       goto done;
-               }
-
-               l2cap_do_send(sk, skb);
-               err = len;
-               break;
-
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               /* Entire SDU fits into one PDU */
-               if (len <= pi->remote_mps) {
-                       control = L2CAP_SDU_UNSEGMENTED;
-                       skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
-                       if (IS_ERR(skb)) {
-                               err = PTR_ERR(skb);
-                               goto done;
-                       }
-                       __skb_queue_tail(TX_QUEUE(sk), skb);
-
-                       if (sk->sk_send_head == NULL)
-                               sk->sk_send_head = skb;
-
-               } else {
-               /* Segment SDU into multiples PDUs */
-                       err = l2cap_sar_segment_sdu(sk, msg, len);
-                       if (err < 0)
-                               goto done;
-               }
-
-               if (pi->mode == L2CAP_MODE_STREAMING) {
-                       l2cap_streaming_send(sk);
-               } else {
-                       if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
-                                       (pi->conn_state & L2CAP_CONN_WAIT_F)) {
-                               err = len;
-                               break;
-                       }
-                       err = l2cap_ertm_send(sk);
-               }
-
-               if (err >= 0)
-                       err = len;
-               break;
-
-       default:
-               BT_DBG("bad state %1.1x", pi->mode);
-               err = -EBADFD;
-       }
-
-done:
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
-{
-       struct sock *sk = sock->sk;
-
-       lock_sock(sk);
-
-       if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
-               struct l2cap_conn_rsp rsp;
-               struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-               u8 buf[128];
-
-               sk->sk_state = BT_CONFIG;
-
-               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
-               rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
-               rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-               l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
-               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
-                       release_sock(sk);
-                       return 0;
-               }
-
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                               l2cap_build_conf_req(sk, buf), buf);
-               l2cap_pi(sk)->num_conf_req++;
-
-               release_sock(sk);
-               return 0;
-       }
-
-       release_sock(sk);
-
-       if (sock->type == SOCK_STREAM)
-               return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
-
-       return bt_sock_recvmsg(iocb, sock, msg, len, flags);
-}
-
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
-{
-       struct sock *sk = sock->sk;
-       struct l2cap_options opts;
-       int len, err = 0;
-       u32 opt;
-
-       BT_DBG("sk %p", sk);
-
-       lock_sock(sk);
-
-       switch (optname) {
-       case L2CAP_OPTIONS:
-               if (sk->sk_state == BT_CONNECTED) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               opts.imtu     = l2cap_pi(sk)->imtu;
-               opts.omtu     = l2cap_pi(sk)->omtu;
-               opts.flush_to = l2cap_pi(sk)->flush_to;
-               opts.mode     = l2cap_pi(sk)->mode;
-               opts.fcs      = l2cap_pi(sk)->fcs;
-               opts.max_tx   = l2cap_pi(sk)->max_tx;
-               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
-               len = min_t(unsigned int, sizeof(opts), optlen);
-               if (copy_from_user((char *) &opts, optval, len)) {
-                       err = -EFAULT;
-                       break;
-               }
-
-               if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               l2cap_pi(sk)->mode = opts.mode;
-               switch (l2cap_pi(sk)->mode) {
-               case L2CAP_MODE_BASIC:
-                       l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
-                       break;
-               case L2CAP_MODE_ERTM:
-               case L2CAP_MODE_STREAMING:
-                       if (!disable_ertm)
-                               break;
-                       /* fall through */
-               default:
-                       err = -EINVAL;
-                       break;
-               }
-
-               l2cap_pi(sk)->imtu = opts.imtu;
-               l2cap_pi(sk)->omtu = opts.omtu;
-               l2cap_pi(sk)->fcs  = opts.fcs;
-               l2cap_pi(sk)->max_tx = opts.max_tx;
-               l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
-               break;
-
-       case L2CAP_LM:
-               if (get_user(opt, (u32 __user *) optval)) {
-                       err = -EFAULT;
-                       break;
-               }
-
-               if (opt & L2CAP_LM_AUTH)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
-               if (opt & L2CAP_LM_ENCRYPT)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
-               if (opt & L2CAP_LM_SECURE)
-                       l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
-
-               l2cap_pi(sk)->role_switch    = (opt & L2CAP_LM_MASTER);
-               l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
-               break;
-
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
-
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
-{
-       struct sock *sk = sock->sk;
-       struct bt_security sec;
-       int len, err = 0;
-       u32 opt;
-
-       BT_DBG("sk %p", sk);
-
-       if (level == SOL_L2CAP)
-               return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
-
-       if (level != SOL_BLUETOOTH)
-               return -ENOPROTOOPT;
-
-       lock_sock(sk);
-
-       switch (optname) {
-       case BT_SECURITY:
-               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-                               && sk->sk_type != SOCK_RAW) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               sec.level = BT_SECURITY_LOW;
-
-               len = min_t(unsigned int, sizeof(sec), optlen);
-               if (copy_from_user((char *) &sec, optval, len)) {
-                       err = -EFAULT;
-                       break;
-               }
-
-               if (sec.level < BT_SECURITY_LOW ||
-                                       sec.level > BT_SECURITY_HIGH) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               l2cap_pi(sk)->sec_level = sec.level;
-               break;
-
-       case BT_DEFER_SETUP:
-               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               if (get_user(opt, (u32 __user *) optval)) {
-                       err = -EFAULT;
-                       break;
-               }
-
-               bt_sk(sk)->defer_setup = opt;
-               break;
-
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
-
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
-{
-       struct sock *sk = sock->sk;
-       struct l2cap_options opts;
-       struct l2cap_conninfo cinfo;
-       int len, err = 0;
-       u32 opt;
-
-       BT_DBG("sk %p", sk);
-
-       if (get_user(len, optlen))
-               return -EFAULT;
-
-       lock_sock(sk);
-
-       switch (optname) {
-       case L2CAP_OPTIONS:
-               opts.imtu     = l2cap_pi(sk)->imtu;
-               opts.omtu     = l2cap_pi(sk)->omtu;
-               opts.flush_to = l2cap_pi(sk)->flush_to;
-               opts.mode     = l2cap_pi(sk)->mode;
-               opts.fcs      = l2cap_pi(sk)->fcs;
-               opts.max_tx   = l2cap_pi(sk)->max_tx;
-               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
-               len = min_t(unsigned int, len, sizeof(opts));
-               if (copy_to_user(optval, (char *) &opts, len))
-                       err = -EFAULT;
-
-               break;
-
-       case L2CAP_LM:
-               switch (l2cap_pi(sk)->sec_level) {
-               case BT_SECURITY_LOW:
-                       opt = L2CAP_LM_AUTH;
-                       break;
-               case BT_SECURITY_MEDIUM:
-                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
-                       break;
-               case BT_SECURITY_HIGH:
-                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
-                                                       L2CAP_LM_SECURE;
-                       break;
-               default:
-                       opt = 0;
-                       break;
-               }
-
-               if (l2cap_pi(sk)->role_switch)
-                       opt |= L2CAP_LM_MASTER;
-
-               if (l2cap_pi(sk)->force_reliable)
-                       opt |= L2CAP_LM_RELIABLE;
-
-               if (put_user(opt, (u32 __user *) optval))
-                       err = -EFAULT;
-               break;
-
-       case L2CAP_CONNINFO:
-               if (sk->sk_state != BT_CONNECTED &&
-                                       !(sk->sk_state == BT_CONNECT2 &&
-                                               bt_sk(sk)->defer_setup)) {
-                       err = -ENOTCONN;
-                       break;
-               }
-
-               cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
-               memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
-
-               len = min_t(unsigned int, len, sizeof(cinfo));
-               if (copy_to_user(optval, (char *) &cinfo, len))
-                       err = -EFAULT;
-
-               break;
-
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
-
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
-{
-       struct sock *sk = sock->sk;
-       struct bt_security sec;
-       int len, err = 0;
-
-       BT_DBG("sk %p", sk);
-
-       if (level == SOL_L2CAP)
-               return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
-
-       if (level != SOL_BLUETOOTH)
-               return -ENOPROTOOPT;
-
-       if (get_user(len, optlen))
-               return -EFAULT;
-
-       lock_sock(sk);
-
-       switch (optname) {
-       case BT_SECURITY:
-               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-                               && sk->sk_type != SOCK_RAW) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               sec.level = l2cap_pi(sk)->sec_level;
-
-               len = min_t(unsigned int, len, sizeof(sec));
-               if (copy_to_user(optval, (char *) &sec, len))
-                       err = -EFAULT;
-
-               break;
-
-       case BT_DEFER_SETUP:
-               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
-                       err = -EFAULT;
-
-               break;
-
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
-
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_shutdown(struct socket *sock, int how)
-{
-       struct sock *sk = sock->sk;
-       int err = 0;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-       if (!sk->sk_shutdown) {
-               if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
-                       err = __l2cap_wait_ack(sk);
-
-               sk->sk_shutdown = SHUTDOWN_MASK;
-               l2cap_sock_clear_timer(sk);
-               __l2cap_sock_close(sk, 0);
-
-               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
-                       err = bt_sock_wait_state(sk, BT_CLOSED,
-                                                       sk->sk_lingertime);
-       }
-
-       if (!err && sk->sk_err)
-               err = -sk->sk_err;
-
-       release_sock(sk);
-       return err;
-}
-
-static int l2cap_sock_release(struct socket *sock)
-{
-       struct sock *sk = sock->sk;
-       int err;
-
-       BT_DBG("sock %p, sk %p", sock, sk);
-
-       if (!sk)
-               return 0;
-
-       err = l2cap_sock_shutdown(sock, 2);
-
-       sock_orphan(sk);
-       l2cap_sock_kill(sk);
-       return err;
-}
-
-static void l2cap_chan_ready(struct sock *sk)
-{
-       struct sock *parent = bt_sk(sk)->parent;
-
-       BT_DBG("sk %p, parent %p", sk, parent);
-
-       l2cap_pi(sk)->conf_state = 0;
-       l2cap_sock_clear_timer(sk);
-
-       if (!parent) {
-               /* Outgoing channel.
-                * Wake up socket sleeping on connect.
-                */
-               sk->sk_state = BT_CONNECTED;
-               sk->sk_state_change(sk);
-       } else {
-               /* Incoming channel.
-                * Wake up socket sleeping on accept.
-                */
-               parent->sk_data_ready(parent, 0);
-       }
-}
-
-/* Copy frame to all raw sockets on that connection */
-static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
-{
-       struct l2cap_chan_list *l = &conn->chan_list;
-       struct sk_buff *nskb;
-       struct sock *sk;
-
-       BT_DBG("conn %p", conn);
-
-       read_lock(&l->lock);
-       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
-               if (sk->sk_type != SOCK_RAW)
-                       continue;
-
-               /* Don't send frame to the socket it came from */
-               if (skb->sk == sk)
-                       continue;
-               nskb = skb_clone(skb, GFP_ATOMIC);
-               if (!nskb)
-                       continue;
-
-               if (sock_queue_rcv_skb(sk, nskb))
-                       kfree_skb(nskb);
-       }
-       read_unlock(&l->lock);
-}
-
-/* ---- L2CAP signalling commands ---- */
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
-                               u8 code, u8 ident, u16 dlen, void *data)
-{
-       struct sk_buff *skb, **frag;
-       struct l2cap_cmd_hdr *cmd;
-       struct l2cap_hdr *lh;
-       int len, count;
-
-       BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
-                       conn, code, ident, dlen);
-
-       len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
-       count = min_t(unsigned int, conn->mtu, len);
-
-       skb = bt_skb_alloc(count, GFP_ATOMIC);
-       if (!skb)
-               return NULL;
-
-       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
-       lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
-       lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
-
-       cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
-       cmd->code  = code;
-       cmd->ident = ident;
-       cmd->len   = cpu_to_le16(dlen);
-
-       if (dlen) {
-               count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
-               memcpy(skb_put(skb, count), data, count);
-               data += count;
-       }
-
-       len -= skb->len;
-
-       /* Continuation fragments (no L2CAP header) */
-       frag = &skb_shinfo(skb)->frag_list;
-       while (len) {
-               count = min_t(unsigned int, conn->mtu, len);
-
-               *frag = bt_skb_alloc(count, GFP_ATOMIC);
-               if (!*frag)
-                       goto fail;
-
-               memcpy(skb_put(*frag, count), data, count);
-
-               len  -= count;
-               data += count;
-
-               frag = &(*frag)->next;
-       }
-
-       return skb;
-
-fail:
-       kfree_skb(skb);
-       return NULL;
-}
-
-static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
-{
-       struct l2cap_conf_opt *opt = *ptr;
-       int len;
-
-       len = L2CAP_CONF_OPT_SIZE + opt->len;
-       *ptr += len;
-
-       *type = opt->type;
-       *olen = opt->len;
-
-       switch (opt->len) {
-       case 1:
-               *val = *((u8 *) opt->val);
-               break;
-
-       case 2:
-               *val = get_unaligned_le16(opt->val);
-               break;
-
-       case 4:
-               *val = get_unaligned_le32(opt->val);
-               break;
-
-       default:
-               *val = (unsigned long) opt->val;
-               break;
-       }
-
-       BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
-       return len;
-}
-
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
-{
-       struct l2cap_conf_opt *opt = *ptr;
-
-       BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
-
-       opt->type = type;
-       opt->len  = len;
-
-       switch (len) {
-       case 1:
-               *((u8 *) opt->val)  = val;
-               break;
-
-       case 2:
-               put_unaligned_le16(val, opt->val);
-               break;
-
-       case 4:
-               put_unaligned_le32(val, opt->val);
-               break;
-
-       default:
-               memcpy(opt->val, (void *) val, len);
-               break;
-       }
-
-       *ptr += L2CAP_CONF_OPT_SIZE + len;
-}
-
-static void l2cap_ack_timeout(unsigned long arg)
-{
-       struct sock *sk = (void *) arg;
-
-       bh_lock_sock(sk);
-       l2cap_send_ack(l2cap_pi(sk));
-       bh_unlock_sock(sk);
-}
-
-static inline void l2cap_ertm_init(struct sock *sk)
-{
-       l2cap_pi(sk)->expected_ack_seq = 0;
-       l2cap_pi(sk)->unacked_frames = 0;
-       l2cap_pi(sk)->buffer_seq = 0;
-       l2cap_pi(sk)->num_acked = 0;
-       l2cap_pi(sk)->frames_sent = 0;
-
-       setup_timer(&l2cap_pi(sk)->retrans_timer,
-                       l2cap_retrans_timeout, (unsigned long) sk);
-       setup_timer(&l2cap_pi(sk)->monitor_timer,
-                       l2cap_monitor_timeout, (unsigned long) sk);
-       setup_timer(&l2cap_pi(sk)->ack_timer,
-                       l2cap_ack_timeout, (unsigned long) sk);
-
-       __skb_queue_head_init(SREJ_QUEUE(sk));
-       __skb_queue_head_init(BUSY_QUEUE(sk));
-
-       INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
-
-       sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
-}
-
-static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
-{
-       switch (mode) {
-       case L2CAP_MODE_STREAMING:
-       case L2CAP_MODE_ERTM:
-               if (l2cap_mode_supported(mode, remote_feat_mask))
-                       return mode;
-               /* fall through */
-       default:
-               return L2CAP_MODE_BASIC;
-       }
-}
-
-static int l2cap_build_conf_req(struct sock *sk, void *data)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct l2cap_conf_req *req = data;
-       struct l2cap_conf_rfc rfc = { .mode = pi->mode };
-       void *ptr = req->data;
-
-       BT_DBG("sk %p", sk);
-
-       if (pi->num_conf_req || pi->num_conf_rsp)
-               goto done;
-
-       switch (pi->mode) {
-       case L2CAP_MODE_STREAMING:
-       case L2CAP_MODE_ERTM:
-               if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
-                       break;
-
-               /* fall through */
-       default:
-               pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
-               break;
-       }
-
-done:
-       switch (pi->mode) {
-       case L2CAP_MODE_BASIC:
-               if (pi->imtu != L2CAP_DEFAULT_MTU)
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
-
-               if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
-                               !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
-                       break;
-
-               rfc.mode            = L2CAP_MODE_BASIC;
-               rfc.txwin_size      = 0;
-               rfc.max_transmit    = 0;
-               rfc.retrans_timeout = 0;
-               rfc.monitor_timeout = 0;
-               rfc.max_pdu_size    = 0;
-
-               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-                                                       (unsigned long) &rfc);
-               break;
-
-       case L2CAP_MODE_ERTM:
-               rfc.mode            = L2CAP_MODE_ERTM;
-               rfc.txwin_size      = pi->tx_win;
-               rfc.max_transmit    = pi->max_tx;
-               rfc.retrans_timeout = 0;
-               rfc.monitor_timeout = 0;
-               rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
-               if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
-                       rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
-               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-                                                       (unsigned long) &rfc);
-
-               if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
-                       break;
-
-               if (pi->fcs == L2CAP_FCS_NONE ||
-                               pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
-                       pi->fcs = L2CAP_FCS_NONE;
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
-               }
-               break;
-
-       case L2CAP_MODE_STREAMING:
-               rfc.mode            = L2CAP_MODE_STREAMING;
-               rfc.txwin_size      = 0;
-               rfc.max_transmit    = 0;
-               rfc.retrans_timeout = 0;
-               rfc.monitor_timeout = 0;
-               rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
-               if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
-                       rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
-               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-                                                       (unsigned long) &rfc);
-
-               if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
-                       break;
-
-               if (pi->fcs == L2CAP_FCS_NONE ||
-                               pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
-                       pi->fcs = L2CAP_FCS_NONE;
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
-               }
-               break;
-       }
-
-       /* FIXME: Need actual value of the flush timeout */
-       //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
-       //   l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
-
-       req->dcid  = cpu_to_le16(pi->dcid);
-       req->flags = cpu_to_le16(0);
-
-       return ptr - data;
-}
-
-static int l2cap_parse_conf_req(struct sock *sk, void *data)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct l2cap_conf_rsp *rsp = data;
-       void *ptr = rsp->data;
-       void *req = pi->conf_req;
-       int len = pi->conf_len;
-       int type, hint, olen;
-       unsigned long val;
-       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
-       u16 mtu = L2CAP_DEFAULT_MTU;
-       u16 result = L2CAP_CONF_SUCCESS;
-
-       BT_DBG("sk %p", sk);
-
-       while (len >= L2CAP_CONF_OPT_SIZE) {
-               len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
-
-               hint  = type & L2CAP_CONF_HINT;
-               type &= L2CAP_CONF_MASK;
-
-               switch (type) {
-               case L2CAP_CONF_MTU:
-                       mtu = val;
-                       break;
-
-               case L2CAP_CONF_FLUSH_TO:
-                       pi->flush_to = val;
-                       break;
-
-               case L2CAP_CONF_QOS:
-                       break;
-
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *) val, olen);
-                       break;
-
-               case L2CAP_CONF_FCS:
-                       if (val == L2CAP_FCS_NONE)
-                               pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
-
-                       break;
-
-               default:
-                       if (hint)
-                               break;
-
-                       result = L2CAP_CONF_UNKNOWN;
-                       *((u8 *) ptr++) = type;
-                       break;
-               }
-       }
-
-       if (pi->num_conf_rsp || pi->num_conf_req > 1)
-               goto done;
-
-       switch (pi->mode) {
-       case L2CAP_MODE_STREAMING:
-       case L2CAP_MODE_ERTM:
-               if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
-                       pi->mode = l2cap_select_mode(rfc.mode,
-                                       pi->conn->feat_mask);
-                       break;
-               }
-
-               if (pi->mode != rfc.mode)
-                       return -ECONNREFUSED;
-
-               break;
-       }
-
-done:
-       if (pi->mode != rfc.mode) {
-               result = L2CAP_CONF_UNACCEPT;
-               rfc.mode = pi->mode;
-
-               if (pi->num_conf_rsp == 1)
-                       return -ECONNREFUSED;
-
-               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-                                       sizeof(rfc), (unsigned long) &rfc);
-       }
-
-
-       if (result == L2CAP_CONF_SUCCESS) {
-               /* Configure output options and let the other side know
-                * which ones we don't like. */
-
-               if (mtu < L2CAP_DEFAULT_MIN_MTU)
-                       result = L2CAP_CONF_UNACCEPT;
-               else {
-                       pi->omtu = mtu;
-                       pi->conf_state |= L2CAP_CONF_MTU_DONE;
-               }
-               l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
-
-               switch (rfc.mode) {
-               case L2CAP_MODE_BASIC:
-                       pi->fcs = L2CAP_FCS_NONE;
-                       pi->conf_state |= L2CAP_CONF_MODE_DONE;
-                       break;
-
-               case L2CAP_MODE_ERTM:
-                       pi->remote_tx_win = rfc.txwin_size;
-                       pi->remote_max_tx = rfc.max_transmit;
-
-                       if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
-                               rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
-                       pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
-
-                       rfc.retrans_timeout =
-                               le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
-                       rfc.monitor_timeout =
-                               le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
-
-                       pi->conf_state |= L2CAP_CONF_MODE_DONE;
-
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-                                       sizeof(rfc), (unsigned long) &rfc);
-
-                       break;
-
-               case L2CAP_MODE_STREAMING:
-                       if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
-                               rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
-                       pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
-
-                       pi->conf_state |= L2CAP_CONF_MODE_DONE;
-
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-                                       sizeof(rfc), (unsigned long) &rfc);
-
-                       break;
-
-               default:
-                       result = L2CAP_CONF_UNACCEPT;
-
-                       memset(&rfc, 0, sizeof(rfc));
-                       rfc.mode = pi->mode;
-               }
-
-               if (result == L2CAP_CONF_SUCCESS)
-                       pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
-       }
-       rsp->scid   = cpu_to_le16(pi->dcid);
-       rsp->result = cpu_to_le16(result);
-       rsp->flags  = cpu_to_le16(0x0000);
-
-       return ptr - data;
-}
-
-static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct l2cap_conf_req *req = data;
-       void *ptr = req->data;
-       int type, olen;
-       unsigned long val;
-       struct l2cap_conf_rfc rfc;
-
-       BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
-
-       while (len >= L2CAP_CONF_OPT_SIZE) {
-               len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
-
-               switch (type) {
-               case L2CAP_CONF_MTU:
-                       if (val < L2CAP_DEFAULT_MIN_MTU) {
-                               *result = L2CAP_CONF_UNACCEPT;
-                               pi->imtu = L2CAP_DEFAULT_MIN_MTU;
-                       } else
-                               pi->imtu = val;
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
-                       break;
-
-               case L2CAP_CONF_FLUSH_TO:
-                       pi->flush_to = val;
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
-                                                       2, pi->flush_to);
-                       break;
-
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
-
-                       if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
-                                                       rfc.mode != pi->mode)
-                               return -ECONNREFUSED;
-
-                       pi->fcs = 0;
-
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-                                       sizeof(rfc), (unsigned long) &rfc);
-                       break;
-               }
-       }
-
-       if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
-               return -ECONNREFUSED;
-
-       pi->mode = rfc.mode;
-
-       if (*result == L2CAP_CONF_SUCCESS) {
-               switch (rfc.mode) {
-               case L2CAP_MODE_ERTM:
-                       pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
-                       pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
-                       pi->mps    = le16_to_cpu(rfc.max_pdu_size);
-                       break;
-               case L2CAP_MODE_STREAMING:
-                       pi->mps    = le16_to_cpu(rfc.max_pdu_size);
-               }
-       }
-
-       req->dcid   = cpu_to_le16(pi->dcid);
-       req->flags  = cpu_to_le16(0x0000);
-
-       return ptr - data;
-}
-
-static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
-{
-       struct l2cap_conf_rsp *rsp = data;
-       void *ptr = rsp->data;
-
-       BT_DBG("sk %p", sk);
-
-       rsp->scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-       rsp->result = cpu_to_le16(result);
-       rsp->flags  = cpu_to_le16(flags);
-
-       return ptr - data;
-}
-
-static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       int type, olen;
-       unsigned long val;
-       struct l2cap_conf_rfc rfc;
-
-       BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
-
-       if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
-               return;
-
-       while (len >= L2CAP_CONF_OPT_SIZE) {
-               len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
-
-               switch (type) {
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
-                       goto done;
-               }
-       }
-
-done:
-       switch (rfc.mode) {
-       case L2CAP_MODE_ERTM:
-               pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
-               pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
-               pi->mps    = le16_to_cpu(rfc.max_pdu_size);
-               break;
-       case L2CAP_MODE_STREAMING:
-               pi->mps    = le16_to_cpu(rfc.max_pdu_size);
-       }
-}
-
-static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
-
-       if (rej->reason != 0x0000)
-               return 0;
-
-       if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
-                                       cmd->ident == conn->info_ident) {
-               del_timer(&conn->info_timer);
-
-               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
-               conn->info_ident = 0;
-
-               l2cap_conn_start(conn);
-       }
-
-       return 0;
-}
-
-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_chan_list *list = &conn->chan_list;
-       struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
-       struct l2cap_conn_rsp rsp;
-       struct sock *parent, *sk = NULL;
-       int result, status = L2CAP_CS_NO_INFO;
-
-       u16 dcid = 0, scid = __le16_to_cpu(req->scid);
-       __le16 psm = req->psm;
-
-       BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
-
-       /* Check if we have socket listening on psm */
-       parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
-       if (!parent) {
-               result = L2CAP_CR_BAD_PSM;
-               goto sendresp;
-       }
-
-       bh_lock_sock(parent);
-
-       /* Check if the ACL is secure enough (if not SDP) */
-       if (psm != cpu_to_le16(0x0001) &&
-                               !hci_conn_check_link_mode(conn->hcon)) {
-               conn->disc_reason = 0x05;
-               result = L2CAP_CR_SEC_BLOCK;
-               goto response;
-       }
-
-       result = L2CAP_CR_NO_MEM;
-
-       /* Check for backlog size */
-       if (sk_acceptq_is_full(parent)) {
-               BT_DBG("backlog full %d", parent->sk_ack_backlog);
-               goto response;
-       }
-
-       sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
-       if (!sk)
-               goto response;
-
-       write_lock_bh(&list->lock);
-
-       /* Check if we already have channel with that dcid */
-       if (__l2cap_get_chan_by_dcid(list, scid)) {
-               write_unlock_bh(&list->lock);
-               sock_set_flag(sk, SOCK_ZAPPED);
-               l2cap_sock_kill(sk);
-               goto response;
-       }
-
-       hci_conn_hold(conn->hcon);
-
-       l2cap_sock_init(sk, parent);
-       bacpy(&bt_sk(sk)->src, conn->src);
-       bacpy(&bt_sk(sk)->dst, conn->dst);
-       l2cap_pi(sk)->psm  = psm;
-       l2cap_pi(sk)->dcid = scid;
-
-       __l2cap_chan_add(conn, sk, parent);
-       dcid = l2cap_pi(sk)->scid;
-
-       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
-       l2cap_pi(sk)->ident = cmd->ident;
-
-       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
-               if (l2cap_check_security(sk)) {
-                       if (bt_sk(sk)->defer_setup) {
-                               sk->sk_state = BT_CONNECT2;
-                               result = L2CAP_CR_PEND;
-                               status = L2CAP_CS_AUTHOR_PEND;
-                               parent->sk_data_ready(parent, 0);
-                       } else {
-                               sk->sk_state = BT_CONFIG;
-                               result = L2CAP_CR_SUCCESS;
-                               status = L2CAP_CS_NO_INFO;
-                       }
-               } else {
-                       sk->sk_state = BT_CONNECT2;
-                       result = L2CAP_CR_PEND;
-                       status = L2CAP_CS_AUTHEN_PEND;
-               }
-       } else {
-               sk->sk_state = BT_CONNECT2;
-               result = L2CAP_CR_PEND;
-               status = L2CAP_CS_NO_INFO;
-       }
-
-       write_unlock_bh(&list->lock);
-
-response:
-       bh_unlock_sock(parent);
-
-sendresp:
-       rsp.scid   = cpu_to_le16(scid);
-       rsp.dcid   = cpu_to_le16(dcid);
-       rsp.result = cpu_to_le16(result);
-       rsp.status = cpu_to_le16(status);
-       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
-       if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
-               struct l2cap_info_req info;
-               info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
-
-               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
-               conn->info_ident = l2cap_get_ident(conn);
-
-               mod_timer(&conn->info_timer, jiffies +
-                                       msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
-
-               l2cap_send_cmd(conn, conn->info_ident,
-                                       L2CAP_INFO_REQ, sizeof(info), &info);
-       }
-
-       if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
-                               result == L2CAP_CR_SUCCESS) {
-               u8 buf[128];
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                                       l2cap_build_conf_req(sk, buf), buf);
-               l2cap_pi(sk)->num_conf_req++;
-       }
-
-       return 0;
-}
-
-static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
-       u16 scid, dcid, result, status;
-       struct sock *sk;
-       u8 req[128];
-
-       scid   = __le16_to_cpu(rsp->scid);
-       dcid   = __le16_to_cpu(rsp->dcid);
-       result = __le16_to_cpu(rsp->result);
-       status = __le16_to_cpu(rsp->status);
-
-       BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
-
-       if (scid) {
-               sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
-               if (!sk)
-                       return -EFAULT;
-       } else {
-               sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
-               if (!sk)
-                       return -EFAULT;
-       }
-
-       switch (result) {
-       case L2CAP_CR_SUCCESS:
-               sk->sk_state = BT_CONFIG;
-               l2cap_pi(sk)->ident = 0;
-               l2cap_pi(sk)->dcid = dcid;
-               l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
-
-               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
-                       break;
-
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-
-               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                                       l2cap_build_conf_req(sk, req), req);
-               l2cap_pi(sk)->num_conf_req++;
-               break;
-
-       case L2CAP_CR_PEND:
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-               break;
-
-       default:
-               /* don't delete l2cap channel if sk is owned by user */
-               if (sock_owned_by_user(sk)) {
-                       sk->sk_state = BT_DISCONN;
-                       l2cap_sock_clear_timer(sk);
-                       l2cap_sock_set_timer(sk, HZ / 5);
-                       break;
-               }
-
-               l2cap_chan_del(sk, ECONNREFUSED);
-               break;
-       }
-
-       bh_unlock_sock(sk);
-       return 0;
-}
-
-static inline void set_default_fcs(struct l2cap_pinfo *pi)
-{
-       /* FCS is enabled only in ERTM or streaming mode, if one or both
-        * sides request it.
-        */
-       if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
-               pi->fcs = L2CAP_FCS_NONE;
-       else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
-               pi->fcs = L2CAP_FCS_CRC16;
-}
-
-static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
-{
-       struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
-       u16 dcid, flags;
-       u8 rsp[64];
-       struct sock *sk;
-       int len;
-
-       dcid  = __le16_to_cpu(req->dcid);
-       flags = __le16_to_cpu(req->flags);
-
-       BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
-
-       sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
-       if (!sk)
-               return -ENOENT;
-
-       if (sk->sk_state != BT_CONFIG) {
-               struct l2cap_cmd_rej rej;
-
-               rej.reason = cpu_to_le16(0x0002);
-               l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
-                               sizeof(rej), &rej);
-               goto unlock;
-       }
-
-       /* Reject if config buffer is too small. */
-       len = cmd_len - sizeof(*req);
-       if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
-               l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
-                               l2cap_build_conf_rsp(sk, rsp,
-                                       L2CAP_CONF_REJECT, flags), rsp);
-               goto unlock;
-       }
-
-       /* Store config. */
-       memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
-       l2cap_pi(sk)->conf_len += len;
-
-       if (flags & 0x0001) {
-               /* Incomplete config. Send empty response. */
-               l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
-                               l2cap_build_conf_rsp(sk, rsp,
-                                       L2CAP_CONF_SUCCESS, 0x0001), rsp);
-               goto unlock;
-       }
-
-       /* Complete config. */
-       len = l2cap_parse_conf_req(sk, rsp);
-       if (len < 0) {
-               l2cap_send_disconn_req(conn, sk, ECONNRESET);
-               goto unlock;
-       }
-
-       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
-       l2cap_pi(sk)->num_conf_rsp++;
-
-       /* Reset config buffer. */
-       l2cap_pi(sk)->conf_len = 0;
-
-       if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
-               goto unlock;
-
-       if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
-               set_default_fcs(l2cap_pi(sk));
-
-               sk->sk_state = BT_CONNECTED;
-
-               l2cap_pi(sk)->next_tx_seq = 0;
-               l2cap_pi(sk)->expected_tx_seq = 0;
-               __skb_queue_head_init(TX_QUEUE(sk));
-               if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
-                       l2cap_ertm_init(sk);
-
-               l2cap_chan_ready(sk);
-               goto unlock;
-       }
-
-       if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
-               u8 buf[64];
-               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-                                       l2cap_build_conf_req(sk, buf), buf);
-               l2cap_pi(sk)->num_conf_req++;
-       }
-
-unlock:
-       bh_unlock_sock(sk);
-       return 0;
-}
-
-static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
-       u16 scid, flags, result;
-       struct sock *sk;
-       int len = cmd->len - sizeof(*rsp);
-
-       scid   = __le16_to_cpu(rsp->scid);
-       flags  = __le16_to_cpu(rsp->flags);
-       result = __le16_to_cpu(rsp->result);
-
-       BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
-                       scid, flags, result);
-
-       sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
-       if (!sk)
-               return 0;
-
-       switch (result) {
-       case L2CAP_CONF_SUCCESS:
-               l2cap_conf_rfc_get(sk, rsp->data, len);
-               break;
-
-       case L2CAP_CONF_UNACCEPT:
-               if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
-                       char req[64];
-
-                       if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
-                               l2cap_send_disconn_req(conn, sk, ECONNRESET);
-                               goto done;
-                       }
-
-                       /* throw out any old stored conf requests */
-                       result = L2CAP_CONF_SUCCESS;
-                       len = l2cap_parse_conf_rsp(sk, rsp->data,
-                                                       len, req, &result);
-                       if (len < 0) {
-                               l2cap_send_disconn_req(conn, sk, ECONNRESET);
-                               goto done;
-                       }
-
-                       l2cap_send_cmd(conn, l2cap_get_ident(conn),
-                                               L2CAP_CONF_REQ, len, req);
-                       l2cap_pi(sk)->num_conf_req++;
-                       if (result != L2CAP_CONF_SUCCESS)
-                               goto done;
-                       break;
-               }
-
-       default:
-               sk->sk_err = ECONNRESET;
-               l2cap_sock_set_timer(sk, HZ * 5);
-               l2cap_send_disconn_req(conn, sk, ECONNRESET);
-               goto done;
-       }
-
-       if (flags & 0x01)
-               goto done;
-
-       l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
-
-       if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
-               set_default_fcs(l2cap_pi(sk));
-
-               sk->sk_state = BT_CONNECTED;
-               l2cap_pi(sk)->next_tx_seq = 0;
-               l2cap_pi(sk)->expected_tx_seq = 0;
-               __skb_queue_head_init(TX_QUEUE(sk));
-               if (l2cap_pi(sk)->mode ==  L2CAP_MODE_ERTM)
-                       l2cap_ertm_init(sk);
-
-               l2cap_chan_ready(sk);
-       }
-
-done:
-       bh_unlock_sock(sk);
-       return 0;
-}
-
-static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
-       struct l2cap_disconn_rsp rsp;
-       u16 dcid, scid;
-       struct sock *sk;
-
-       scid = __le16_to_cpu(req->scid);
-       dcid = __le16_to_cpu(req->dcid);
-
-       BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
-
-       sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
-       if (!sk)
-               return 0;
-
-       rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
-       rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
-       l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
-
-       sk->sk_shutdown = SHUTDOWN_MASK;
-
-       /* don't delete l2cap channel if sk is owned by user */
-       if (sock_owned_by_user(sk)) {
-               sk->sk_state = BT_DISCONN;
-               l2cap_sock_clear_timer(sk);
-               l2cap_sock_set_timer(sk, HZ / 5);
-               bh_unlock_sock(sk);
-               return 0;
-       }
-
-       l2cap_chan_del(sk, ECONNRESET);
-       bh_unlock_sock(sk);
-
-       l2cap_sock_kill(sk);
-       return 0;
-}
-
-static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
-       u16 dcid, scid;
-       struct sock *sk;
-
-       scid = __le16_to_cpu(rsp->scid);
-       dcid = __le16_to_cpu(rsp->dcid);
-
-       BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
-
-       sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
-       if (!sk)
-               return 0;
-
-       /* don't delete l2cap channel if sk is owned by user */
-       if (sock_owned_by_user(sk)) {
-               sk->sk_state = BT_DISCONN;
-               l2cap_sock_clear_timer(sk);
-               l2cap_sock_set_timer(sk, HZ / 5);
-               bh_unlock_sock(sk);
-               return 0;
-       }
-
-       l2cap_chan_del(sk, 0);
-       bh_unlock_sock(sk);
-
-       l2cap_sock_kill(sk);
-       return 0;
-}
-
-static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_info_req *req = (struct l2cap_info_req *) data;
-       u16 type;
-
-       type = __le16_to_cpu(req->type);
-
-       BT_DBG("type 0x%4.4x", type);
-
-       if (type == L2CAP_IT_FEAT_MASK) {
-               u8 buf[8];
-               u32 feat_mask = l2cap_feat_mask;
-               struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
-               rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
-               rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
-               if (!disable_ertm)
-                       feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
-                                                        | L2CAP_FEAT_FCS;
-               put_unaligned_le32(feat_mask, rsp->data);
-               l2cap_send_cmd(conn, cmd->ident,
-                                       L2CAP_INFO_RSP, sizeof(buf), buf);
-       } else if (type == L2CAP_IT_FIXED_CHAN) {
-               u8 buf[12];
-               struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
-               rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
-               rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
-               memcpy(buf + 4, l2cap_fixed_chan, 8);
-               l2cap_send_cmd(conn, cmd->ident,
-                                       L2CAP_INFO_RSP, sizeof(buf), buf);
-       } else {
-               struct l2cap_info_rsp rsp;
-               rsp.type   = cpu_to_le16(type);
-               rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
-               l2cap_send_cmd(conn, cmd->ident,
-                                       L2CAP_INFO_RSP, sizeof(rsp), &rsp);
-       }
-
-       return 0;
-}
-
-static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
-       struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
-       u16 type, result;
-
-       type   = __le16_to_cpu(rsp->type);
-       result = __le16_to_cpu(rsp->result);
-
-       BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
-
-       del_timer(&conn->info_timer);
-
-       if (result != L2CAP_IR_SUCCESS) {
-               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
-               conn->info_ident = 0;
-
-               l2cap_conn_start(conn);
-
-               return 0;
-       }
-
-       if (type == L2CAP_IT_FEAT_MASK) {
-               conn->feat_mask = get_unaligned_le32(rsp->data);
-
-               if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
-                       struct l2cap_info_req req;
-                       req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
-
-                       conn->info_ident = l2cap_get_ident(conn);
-
-                       l2cap_send_cmd(conn, conn->info_ident,
-                                       L2CAP_INFO_REQ, sizeof(req), &req);
-               } else {
-                       conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
-                       conn->info_ident = 0;
-
-                       l2cap_conn_start(conn);
-               }
-       } else if (type == L2CAP_IT_FIXED_CHAN) {
-               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
-               conn->info_ident = 0;
-
-               l2cap_conn_start(conn);
-       }
-
-       return 0;
-}
-
-static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
-{
-       u8 *data = skb->data;
-       int len = skb->len;
-       struct l2cap_cmd_hdr cmd;
-       int err = 0;
-
-       l2cap_raw_recv(conn, skb);
-
-       while (len >= L2CAP_CMD_HDR_SIZE) {
-               u16 cmd_len;
-               memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
-               data += L2CAP_CMD_HDR_SIZE;
-               len  -= L2CAP_CMD_HDR_SIZE;
-
-               cmd_len = le16_to_cpu(cmd.len);
-
-               BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
-
-               if (cmd_len > len || !cmd.ident) {
-                       BT_DBG("corrupted command");
-                       break;
-               }
-
-               switch (cmd.code) {
-               case L2CAP_COMMAND_REJ:
-                       l2cap_command_rej(conn, &cmd, data);
-                       break;
-
-               case L2CAP_CONN_REQ:
-                       err = l2cap_connect_req(conn, &cmd, data);
-                       break;
-
-               case L2CAP_CONN_RSP:
-                       err = l2cap_connect_rsp(conn, &cmd, data);
-                       break;
-
-               case L2CAP_CONF_REQ:
-                       err = l2cap_config_req(conn, &cmd, cmd_len, data);
-                       break;
-
-               case L2CAP_CONF_RSP:
-                       err = l2cap_config_rsp(conn, &cmd, data);
-                       break;
-
-               case L2CAP_DISCONN_REQ:
-                       err = l2cap_disconnect_req(conn, &cmd, data);
-                       break;
-
-               case L2CAP_DISCONN_RSP:
-                       err = l2cap_disconnect_rsp(conn, &cmd, data);
-                       break;
-
-               case L2CAP_ECHO_REQ:
-                       l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
-                       break;
-
-               case L2CAP_ECHO_RSP:
-                       break;
-
-               case L2CAP_INFO_REQ:
-                       err = l2cap_information_req(conn, &cmd, data);
-                       break;
-
-               case L2CAP_INFO_RSP:
-                       err = l2cap_information_rsp(conn, &cmd, data);
-                       break;
-
-               default:
-                       BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
-                       err = -EINVAL;
-                       break;
-               }
-
-               if (err) {
-                       struct l2cap_cmd_rej rej;
-                       BT_DBG("error %d", err);
-
-                       /* FIXME: Map err to a valid reason */
-                       rej.reason = cpu_to_le16(0);
-                       l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
-               }
-
-               data += cmd_len;
-               len  -= cmd_len;
-       }
-
-       kfree_skb(skb);
-}
-
-static int l2cap_check_fcs(struct l2cap_pinfo *pi,  struct sk_buff *skb)
-{
-       u16 our_fcs, rcv_fcs;
-       int hdr_size = L2CAP_HDR_SIZE + 2;
-
-       if (pi->fcs == L2CAP_FCS_CRC16) {
-               skb_trim(skb, skb->len - 2);
-               rcv_fcs = get_unaligned_le16(skb->data + skb->len);
-               our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
-
-               if (our_fcs != rcv_fcs)
-                       return -EBADMSG;
-       }
-       return 0;
-}
-
-static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u16 control = 0;
-
-       pi->frames_sent = 0;
-
-       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
-       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
-               l2cap_send_sframe(pi, control);
-               pi->conn_state |= L2CAP_CONN_RNR_SENT;
-       }
-
-       if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
-               l2cap_retransmit_frames(sk);
-
-       l2cap_ertm_send(sk);
-
-       if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
-                       pi->frames_sent == 0) {
-               control |= L2CAP_SUPER_RCV_READY;
-               l2cap_send_sframe(pi, control);
-       }
-}
-
-static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
-{
-       struct sk_buff *next_skb;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       int tx_seq_offset, next_tx_seq_offset;
-
-       bt_cb(skb)->tx_seq = tx_seq;
-       bt_cb(skb)->sar = sar;
-
-       next_skb = skb_peek(SREJ_QUEUE(sk));
-       if (!next_skb) {
-               __skb_queue_tail(SREJ_QUEUE(sk), skb);
-               return 0;
-       }
-
-       tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
-       if (tx_seq_offset < 0)
-               tx_seq_offset += 64;
-
-       do {
-               if (bt_cb(next_skb)->tx_seq == tx_seq)
-                       return -EINVAL;
-
-               next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
-                                               pi->buffer_seq) % 64;
-               if (next_tx_seq_offset < 0)
-                       next_tx_seq_offset += 64;
-
-               if (next_tx_seq_offset > tx_seq_offset) {
-                       __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
-                       return 0;
-               }
-
-               if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
-                       break;
-
-       } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
-
-       __skb_queue_tail(SREJ_QUEUE(sk), skb);
-
-       return 0;
-}
-
-static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *_skb;
-       int err;
-
-       switch (control & L2CAP_CTRL_SAR) {
-       case L2CAP_SDU_UNSEGMENTED:
-               if (pi->conn_state & L2CAP_CONN_SAR_SDU)
-                       goto drop;
-
-               err = sock_queue_rcv_skb(sk, skb);
-               if (!err)
-                       return err;
-
-               break;
-
-       case L2CAP_SDU_START:
-               if (pi->conn_state & L2CAP_CONN_SAR_SDU)
-                       goto drop;
-
-               pi->sdu_len = get_unaligned_le16(skb->data);
-
-               if (pi->sdu_len > pi->imtu)
-                       goto disconnect;
-
-               pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
-               if (!pi->sdu)
-                       return -ENOMEM;
-
-               /* pull sdu_len bytes only after alloc, because of Local Busy
-                * condition we have to be sure that this will be executed
-                * only once, i.e., when alloc does not fail */
-               skb_pull(skb, 2);
-
-               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
-               pi->conn_state |= L2CAP_CONN_SAR_SDU;
-               pi->partial_sdu_len = skb->len;
-               break;
-
-       case L2CAP_SDU_CONTINUE:
-               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
-                       goto disconnect;
-
-               if (!pi->sdu)
-                       goto disconnect;
-
-               pi->partial_sdu_len += skb->len;
-               if (pi->partial_sdu_len > pi->sdu_len)
-                       goto drop;
-
-               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
-               break;
-
-       case L2CAP_SDU_END:
-               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
-                       goto disconnect;
-
-               if (!pi->sdu)
-                       goto disconnect;
-
-               if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
-                       pi->partial_sdu_len += skb->len;
-
-                       if (pi->partial_sdu_len > pi->imtu)
-                               goto drop;
-
-                       if (pi->partial_sdu_len != pi->sdu_len)
-                               goto drop;
-
-                       memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-               }
-
-               _skb = skb_clone(pi->sdu, GFP_ATOMIC);
-               if (!_skb) {
-                       pi->conn_state |= L2CAP_CONN_SAR_RETRY;
-                       return -ENOMEM;
-               }
-
-               err = sock_queue_rcv_skb(sk, _skb);
-               if (err < 0) {
-                       kfree_skb(_skb);
-                       pi->conn_state |= L2CAP_CONN_SAR_RETRY;
-                       return err;
-               }
-
-               pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
-               pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
-
-               kfree_skb(pi->sdu);
-               break;
-       }
-
-       kfree_skb(skb);
-       return 0;
-
-drop:
-       kfree_skb(pi->sdu);
-       pi->sdu = NULL;
-
-disconnect:
-       l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
-       kfree_skb(skb);
-       return 0;
-}
-
-static int l2cap_try_push_rx_skb(struct sock *sk)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *skb;
-       u16 control;
-       int err;
-
-       while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
-               control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
-               err = l2cap_ertm_reassembly_sdu(sk, skb, control);
-               if (err < 0) {
-                       skb_queue_head(BUSY_QUEUE(sk), skb);
-                       return -EBUSY;
-               }
-
-               pi->buffer_seq = (pi->buffer_seq + 1) % 64;
-       }
-
-       if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
-               goto done;
-
-       control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-       control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
-       l2cap_send_sframe(pi, control);
-       l2cap_pi(sk)->retry_count = 1;
-
-       del_timer(&pi->retrans_timer);
-       __mod_monitor_timer();
-
-       l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
-
-done:
-       pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
-       pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
-
-       BT_DBG("sk %p, Exit local busy", sk);
-
-       return 0;
-}
-
-static void l2cap_busy_work(struct work_struct *work)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       struct l2cap_pinfo *pi =
-               container_of(work, struct l2cap_pinfo, busy_work);
-       struct sock *sk = (struct sock *)pi;
-       int n_tries = 0, timeo = HZ/5, err;
-       struct sk_buff *skb;
-
-       lock_sock(sk);
-
-       add_wait_queue(sk_sleep(sk), &wait);
-       while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
-                       err = -EBUSY;
-                       l2cap_send_disconn_req(pi->conn, sk, EBUSY);
-                       break;
-               }
-
-               if (!timeo)
-                       timeo = HZ/5;
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-
-               err = sock_error(sk);
-               if (err)
-                       break;
-
-               if (l2cap_try_push_rx_skb(sk) == 0)
-                       break;
-       }
-
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk_sleep(sk), &wait);
-
-       release_sock(sk);
-}
-
-static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       int sctrl, err;
-
-       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
-               bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
-               __skb_queue_tail(BUSY_QUEUE(sk), skb);
-               return l2cap_try_push_rx_skb(sk);
-
-
-       }
-
-       err = l2cap_ertm_reassembly_sdu(sk, skb, control);
-       if (err >= 0) {
-               pi->buffer_seq = (pi->buffer_seq + 1) % 64;
-               return err;
-       }
-
-       /* Busy Condition */
-       BT_DBG("sk %p, Enter local busy", sk);
-
-       pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
-       bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
-       __skb_queue_tail(BUSY_QUEUE(sk), skb);
-
-       sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-       sctrl |= L2CAP_SUPER_RCV_NOT_READY;
-       l2cap_send_sframe(pi, sctrl);
-
-       pi->conn_state |= L2CAP_CONN_RNR_SENT;
-
-       del_timer(&pi->ack_timer);
-
-       queue_work(_busy_wq, &pi->busy_work);
-
-       return err;
-}
-
-static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct sk_buff *_skb;
-       int err = -EINVAL;
-
-       /*
-        * TODO: We have to notify the userland if some data is lost with the
-        * Streaming Mode.
-        */
-
-       switch (control & L2CAP_CTRL_SAR) {
-       case L2CAP_SDU_UNSEGMENTED:
-               if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
-                       kfree_skb(pi->sdu);
-                       break;
-               }
-
-               err = sock_queue_rcv_skb(sk, skb);
-               if (!err)
-                       return 0;
-
-               break;
-
-       case L2CAP_SDU_START:
-               if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
-                       kfree_skb(pi->sdu);
-                       break;
-               }
-
-               pi->sdu_len = get_unaligned_le16(skb->data);
-               skb_pull(skb, 2);
-
-               if (pi->sdu_len > pi->imtu) {
-                       err = -EMSGSIZE;
-                       break;
-               }
-
-               pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
-               if (!pi->sdu) {
-                       err = -ENOMEM;
-                       break;
-               }
-
-               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
-               pi->conn_state |= L2CAP_CONN_SAR_SDU;
-               pi->partial_sdu_len = skb->len;
-               err = 0;
-               break;
-
-       case L2CAP_SDU_CONTINUE:
-               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
-                       break;
-
-               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
-               pi->partial_sdu_len += skb->len;
-               if (pi->partial_sdu_len > pi->sdu_len)
-                       kfree_skb(pi->sdu);
-               else
-                       err = 0;
-
-               break;
-
-       case L2CAP_SDU_END:
-               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
-                       break;
-
-               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
-               pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
-               pi->partial_sdu_len += skb->len;
-
-               if (pi->partial_sdu_len > pi->imtu)
-                       goto drop;
-
-               if (pi->partial_sdu_len == pi->sdu_len) {
-                       _skb = skb_clone(pi->sdu, GFP_ATOMIC);
-                       err = sock_queue_rcv_skb(sk, _skb);
-                       if (err < 0)
-                               kfree_skb(_skb);
-               }
-               err = 0;
-
-drop:
-               kfree_skb(pi->sdu);
-               break;
-       }
-
-       kfree_skb(skb);
-       return err;
-}
-
-static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
-{
-       struct sk_buff *skb;
-       u16 control;
-
-       while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
-               if (bt_cb(skb)->tx_seq != tx_seq)
-                       break;
-
-               skb = skb_dequeue(SREJ_QUEUE(sk));
-               control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
-               l2cap_ertm_reassembly_sdu(sk, skb, control);
-               l2cap_pi(sk)->buffer_seq_srej =
-                       (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
-               tx_seq = (tx_seq + 1) % 64;
-       }
-}
-
-static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct srej_list *l, *tmp;
-       u16 control;
-
-       list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
-               if (l->tx_seq == tx_seq) {
-                       list_del(&l->list);
-                       kfree(l);
-                       return;
-               }
-               control = L2CAP_SUPER_SELECT_REJECT;
-               control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-               l2cap_send_sframe(pi, control);
-               list_del(&l->list);
-               list_add_tail(&l->list, SREJ_LIST(sk));
-       }
-}
-
-static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct srej_list *new;
-       u16 control;
-
-       while (tx_seq != pi->expected_tx_seq) {
-               control = L2CAP_SUPER_SELECT_REJECT;
-               control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-               l2cap_send_sframe(pi, control);
-
-               new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
-               new->tx_seq = pi->expected_tx_seq;
-               pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
-               list_add_tail(&new->list, SREJ_LIST(sk));
-       }
-       pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
-}
-
-static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u8 tx_seq = __get_txseq(rx_control);
-       u8 req_seq = __get_reqseq(rx_control);
-       u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
-       int tx_seq_offset, expected_tx_seq_offset;
-       int num_to_ack = (pi->tx_win/6) + 1;
-       int err = 0;
-
-       BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
-                                                               rx_control);
-
-       if (L2CAP_CTRL_FINAL & rx_control &&
-                       l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
-               del_timer(&pi->monitor_timer);
-               if (pi->unacked_frames > 0)
-                       __mod_retrans_timer();
-               pi->conn_state &= ~L2CAP_CONN_WAIT_F;
-       }
-
-       pi->expected_ack_seq = req_seq;
-       l2cap_drop_acked_frames(sk);
-
-       if (tx_seq == pi->expected_tx_seq)
-               goto expected;
-
-       tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
-       if (tx_seq_offset < 0)
-               tx_seq_offset += 64;
-
-       /* invalid tx_seq */
-       if (tx_seq_offset >= pi->tx_win) {
-               l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
-               goto drop;
-       }
-
-       if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
-               goto drop;
-
-       if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
-               struct srej_list *first;
-
-               first = list_first_entry(SREJ_LIST(sk),
-                               struct srej_list, list);
-               if (tx_seq == first->tx_seq) {
-                       l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
-                       l2cap_check_srej_gap(sk, tx_seq);
-
-                       list_del(&first->list);
-                       kfree(first);
-
-                       if (list_empty(SREJ_LIST(sk))) {
-                               pi->buffer_seq = pi->buffer_seq_srej;
-                               pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
-                               l2cap_send_ack(pi);
-                               BT_DBG("sk %p, Exit SREJ_SENT", sk);
-                       }
-               } else {
-                       struct srej_list *l;
-
-                       /* duplicated tx_seq */
-                       if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
-                               goto drop;
-
-                       list_for_each_entry(l, SREJ_LIST(sk), list) {
-                               if (l->tx_seq == tx_seq) {
-                                       l2cap_resend_srejframe(sk, tx_seq);
-                                       return 0;
-                               }
-                       }
-                       l2cap_send_srejframe(sk, tx_seq);
-               }
-       } else {
-               expected_tx_seq_offset =
-                       (pi->expected_tx_seq - pi->buffer_seq) % 64;
-               if (expected_tx_seq_offset < 0)
-                       expected_tx_seq_offset += 64;
-
-               /* duplicated tx_seq */
-               if (tx_seq_offset < expected_tx_seq_offset)
-                       goto drop;
-
-               pi->conn_state |= L2CAP_CONN_SREJ_SENT;
-
-               BT_DBG("sk %p, Enter SREJ", sk);
-
-               INIT_LIST_HEAD(SREJ_LIST(sk));
-               pi->buffer_seq_srej = pi->buffer_seq;
-
-               __skb_queue_head_init(SREJ_QUEUE(sk));
-               __skb_queue_head_init(BUSY_QUEUE(sk));
-               l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
-
-               pi->conn_state |= L2CAP_CONN_SEND_PBIT;
-
-               l2cap_send_srejframe(sk, tx_seq);
-
-               del_timer(&pi->ack_timer);
-       }
-       return 0;
-
-expected:
-       pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
-
-       if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
-               bt_cb(skb)->tx_seq = tx_seq;
-               bt_cb(skb)->sar = sar;
-               __skb_queue_tail(SREJ_QUEUE(sk), skb);
-               return 0;
-       }
-
-       err = l2cap_push_rx_skb(sk, skb, rx_control);
-       if (err < 0)
-               return 0;
-
-       if (rx_control & L2CAP_CTRL_FINAL) {
-               if (pi->conn_state & L2CAP_CONN_REJ_ACT)
-                       pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
-               else
-                       l2cap_retransmit_frames(sk);
-       }
-
-       __mod_ack_timer();
-
-       pi->num_acked = (pi->num_acked + 1) % num_to_ack;
-       if (pi->num_acked == num_to_ack - 1)
-               l2cap_send_ack(pi);
-
-       return 0;
-
-drop:
-       kfree_skb(skb);
-       return 0;
-}
-
-static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-
-       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
-                                               rx_control);
-
-       pi->expected_ack_seq = __get_reqseq(rx_control);
-       l2cap_drop_acked_frames(sk);
-
-       if (rx_control & L2CAP_CTRL_POLL) {
-               pi->conn_state |= L2CAP_CONN_SEND_FBIT;
-               if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
-                       if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
-                                       (pi->unacked_frames > 0))
-                               __mod_retrans_timer();
-
-                       pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-                       l2cap_send_srejtail(sk);
-               } else {
-                       l2cap_send_i_or_rr_or_rnr(sk);
-               }
-
-       } else if (rx_control & L2CAP_CTRL_FINAL) {
-               pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
-               if (pi->conn_state & L2CAP_CONN_REJ_ACT)
-                       pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
-               else
-                       l2cap_retransmit_frames(sk);
-
-       } else {
-               if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
-                               (pi->unacked_frames > 0))
-                       __mod_retrans_timer();
-
-               pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-               if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
-                       l2cap_send_ack(pi);
-               else
-                       l2cap_ertm_send(sk);
-       }
-}
-
-static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u8 tx_seq = __get_reqseq(rx_control);
-
-       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
-
-       pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
-       pi->expected_ack_seq = tx_seq;
-       l2cap_drop_acked_frames(sk);
-
-       if (rx_control & L2CAP_CTRL_FINAL) {
-               if (pi->conn_state & L2CAP_CONN_REJ_ACT)
-                       pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
-               else
-                       l2cap_retransmit_frames(sk);
-       } else {
-               l2cap_retransmit_frames(sk);
-
-               if (pi->conn_state & L2CAP_CONN_WAIT_F)
-                       pi->conn_state |= L2CAP_CONN_REJ_ACT;
-       }
-}
-static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u8 tx_seq = __get_reqseq(rx_control);
-
-       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
-
-       pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
-       if (rx_control & L2CAP_CTRL_POLL) {
-               pi->expected_ack_seq = tx_seq;
-               l2cap_drop_acked_frames(sk);
-
-               pi->conn_state |= L2CAP_CONN_SEND_FBIT;
-               l2cap_retransmit_one_frame(sk, tx_seq);
-
-               l2cap_ertm_send(sk);
-
-               if (pi->conn_state & L2CAP_CONN_WAIT_F) {
-                       pi->srej_save_reqseq = tx_seq;
-                       pi->conn_state |= L2CAP_CONN_SREJ_ACT;
-               }
-       } else if (rx_control & L2CAP_CTRL_FINAL) {
-               if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
-                               pi->srej_save_reqseq == tx_seq)
-                       pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
-               else
-                       l2cap_retransmit_one_frame(sk, tx_seq);
-       } else {
-               l2cap_retransmit_one_frame(sk, tx_seq);
-               if (pi->conn_state & L2CAP_CONN_WAIT_F) {
-                       pi->srej_save_reqseq = tx_seq;
-                       pi->conn_state |= L2CAP_CONN_SREJ_ACT;
-               }
-       }
-}
-
-static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u8 tx_seq = __get_reqseq(rx_control);
-
-       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
-
-       pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
-       pi->expected_ack_seq = tx_seq;
-       l2cap_drop_acked_frames(sk);
-
-       if (rx_control & L2CAP_CTRL_POLL)
-               pi->conn_state |= L2CAP_CONN_SEND_FBIT;
-
-       if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
-               del_timer(&pi->retrans_timer);
-               if (rx_control & L2CAP_CTRL_POLL)
-                       l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
-               return;
-       }
-
-       if (rx_control & L2CAP_CTRL_POLL)
-               l2cap_send_srejtail(sk);
-       else
-               l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
-}
-
-static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
-{
-       BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
-
-       if (L2CAP_CTRL_FINAL & rx_control &&
-                       l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
-               del_timer(&l2cap_pi(sk)->monitor_timer);
-               if (l2cap_pi(sk)->unacked_frames > 0)
-                       __mod_retrans_timer();
-               l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
-       }
-
-       switch (rx_control & L2CAP_CTRL_SUPERVISE) {
-       case L2CAP_SUPER_RCV_READY:
-               l2cap_data_channel_rrframe(sk, rx_control);
-               break;
-
-       case L2CAP_SUPER_REJECT:
-               l2cap_data_channel_rejframe(sk, rx_control);
-               break;
-
-       case L2CAP_SUPER_SELECT_REJECT:
-               l2cap_data_channel_srejframe(sk, rx_control);
-               break;
-
-       case L2CAP_SUPER_RCV_NOT_READY:
-               l2cap_data_channel_rnrframe(sk, rx_control);
-               break;
-       }
-
-       kfree_skb(skb);
-       return 0;
-}
-
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
-{
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       u16 control;
-       u8 req_seq;
-       int len, next_tx_seq_offset, req_seq_offset;
-
-       control = get_unaligned_le16(skb->data);
-       skb_pull(skb, 2);
-       len = skb->len;
-
-       /*
-        * We can just drop the corrupted I-frame here.
-        * Receiver will miss it and start proper recovery
-        * procedures and ask retransmission.
-        */
-       if (l2cap_check_fcs(pi, skb))
-               goto drop;
-
-       if (__is_sar_start(control) && __is_iframe(control))
-               len -= 2;
-
-       if (pi->fcs == L2CAP_FCS_CRC16)
-               len -= 2;
-
-       if (len > pi->mps) {
-               l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
-               goto drop;
-       }
-
-       req_seq = __get_reqseq(control);
-       req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
-       if (req_seq_offset < 0)
-               req_seq_offset += 64;
-
-       next_tx_seq_offset =
-               (pi->next_tx_seq - pi->expected_ack_seq) % 64;
-       if (next_tx_seq_offset < 0)
-               next_tx_seq_offset += 64;
-
-       /* check for invalid req-seq */
-       if (req_seq_offset > next_tx_seq_offset) {
-               l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
-               goto drop;
-       }
-
-       if (__is_iframe(control)) {
-               if (len < 0) {
-                       l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
-                       goto drop;
-               }
-
-               l2cap_data_channel_iframe(sk, control, skb);
-       } else {
-               if (len != 0) {
-                       BT_ERR("%d", len);
-                       l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
-                       goto drop;
-               }
-
-               l2cap_data_channel_sframe(sk, control, skb);
-       }
-
-       return 0;
-
-drop:
-       kfree_skb(skb);
-       return 0;
-}
-
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
-{
-       struct sock *sk;
-       struct l2cap_pinfo *pi;
-       u16 control;
-       u8 tx_seq;
-       int len;
-
-       sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
-       if (!sk) {
-               BT_DBG("unknown cid 0x%4.4x", cid);
-               goto drop;
-       }
-
-       pi = l2cap_pi(sk);
-
-       BT_DBG("sk %p, len %d", sk, skb->len);
-
-       if (sk->sk_state != BT_CONNECTED)
-               goto drop;
-
-       switch (pi->mode) {
-       case L2CAP_MODE_BASIC:
-               /* If socket recv buffers overflows we drop data here
-                * which is *bad* because L2CAP has to be reliable.
-                * But we don't have any other choice. L2CAP doesn't
-                * provide flow control mechanism. */
-
-               if (pi->imtu < skb->len)
-                       goto drop;
-
-               if (!sock_queue_rcv_skb(sk, skb))
-                       goto done;
-               break;
-
-       case L2CAP_MODE_ERTM:
-               if (!sock_owned_by_user(sk)) {
-                       l2cap_ertm_data_rcv(sk, skb);
-               } else {
-                       if (sk_add_backlog(sk, skb))
-                               goto drop;
-               }
-
-               goto done;
-
-       case L2CAP_MODE_STREAMING:
-               control = get_unaligned_le16(skb->data);
-               skb_pull(skb, 2);
-               len = skb->len;
-
-               if (l2cap_check_fcs(pi, skb))
-                       goto drop;
-
-               if (__is_sar_start(control))
-                       len -= 2;
-
-               if (pi->fcs == L2CAP_FCS_CRC16)
-                       len -= 2;
-
-               if (len > pi->mps || len < 0 || __is_sframe(control))
-                       goto drop;
-
-               tx_seq = __get_txseq(control);
-
-               if (pi->expected_tx_seq == tx_seq)
-                       pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
-               else
-                       pi->expected_tx_seq = (tx_seq + 1) % 64;
-
-               l2cap_streaming_reassembly_sdu(sk, skb, control);
-
-               goto done;
-
-       default:
-               BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
-               break;
-       }
-
-drop:
-       kfree_skb(skb);
-
-done:
-       if (sk)
-               bh_unlock_sock(sk);
-
-       return 0;
-}
-
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
-{
-       struct sock *sk;
-
-       sk = l2cap_get_sock_by_psm(0, psm, conn->src);
-       if (!sk)
-               goto drop;
-
-       bh_lock_sock(sk);
-
-       BT_DBG("sk %p, len %d", sk, skb->len);
-
-       if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
-               goto drop;
-
-       if (l2cap_pi(sk)->imtu < skb->len)
-               goto drop;
-
-       if (!sock_queue_rcv_skb(sk, skb))
-               goto done;
-
-drop:
-       kfree_skb(skb);
-
-done:
-       if (sk)
-               bh_unlock_sock(sk);
-       return 0;
-}
-
-static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
-{
-       struct l2cap_hdr *lh = (void *) skb->data;
-       u16 cid, len;
-       __le16 psm;
-
-       skb_pull(skb, L2CAP_HDR_SIZE);
-       cid = __le16_to_cpu(lh->cid);
-       len = __le16_to_cpu(lh->len);
-
-       if (len != skb->len) {
-               kfree_skb(skb);
-               return;
-       }
-
-       BT_DBG("len %d, cid 0x%4.4x", len, cid);
-
-       switch (cid) {
-       case L2CAP_CID_SIGNALING:
-               l2cap_sig_channel(conn, skb);
-               break;
-
-       case L2CAP_CID_CONN_LESS:
-               psm = get_unaligned_le16(skb->data);
-               skb_pull(skb, 2);
-               l2cap_conless_channel(conn, psm, skb);
-               break;
-
-       default:
-               l2cap_data_channel(conn, cid, skb);
-               break;
-       }
-}
-
-/* ---- L2CAP interface with lower layer (HCI) ---- */
-
-static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-       int exact = 0, lm1 = 0, lm2 = 0;
-       register struct sock *sk;
-       struct hlist_node *node;
-
-       if (type != ACL_LINK)
-               return -EINVAL;
-
-       BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
-
-       /* Find listening sockets and check their link_mode */
-       read_lock(&l2cap_sk_list.lock);
-       sk_for_each(sk, node, &l2cap_sk_list.head) {
-               if (sk->sk_state != BT_LISTEN)
-                       continue;
-
-               if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
-                       lm1 |= HCI_LM_ACCEPT;
-                       if (l2cap_pi(sk)->role_switch)
-                               lm1 |= HCI_LM_MASTER;
-                       exact++;
-               } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
-                       lm2 |= HCI_LM_ACCEPT;
-                       if (l2cap_pi(sk)->role_switch)
-                               lm2 |= HCI_LM_MASTER;
-               }
-       }
-       read_unlock(&l2cap_sk_list.lock);
-
-       return exact ? lm1 : lm2;
-}
-
-static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
-{
-       struct l2cap_conn *conn;
-
-       BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
-       if (hcon->type != ACL_LINK)
-               return -EINVAL;
-
-       if (!status) {
-               conn = l2cap_conn_add(hcon, status);
-               if (conn)
-                       l2cap_conn_ready(conn);
-       } else
-               l2cap_conn_del(hcon, bt_err(status));
-
-       return 0;
-}
-
-static int l2cap_disconn_ind(struct hci_conn *hcon)
-{
-       struct l2cap_conn *conn = hcon->l2cap_data;
-
-       BT_DBG("hcon %p", hcon);
-
-       if (hcon->type != ACL_LINK || !conn)
-               return 0x13;
-
-       return conn->disc_reason;
-}
-
-static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
-{
-       BT_DBG("hcon %p reason %d", hcon, reason);
-
-       if (hcon->type != ACL_LINK)
-               return -EINVAL;
-
-       l2cap_conn_del(hcon, bt_err(reason));
-
-       return 0;
-}
-
-static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
-{
-       if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
-               return;
-
-       if (encrypt == 0x00) {
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
-                       l2cap_sock_clear_timer(sk);
-                       l2cap_sock_set_timer(sk, HZ * 5);
-               } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
-                       __l2cap_sock_close(sk, ECONNREFUSED);
-       } else {
-               if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
-                       l2cap_sock_clear_timer(sk);
-       }
-}
-
-static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
-{
-       struct l2cap_chan_list *l;
-       struct l2cap_conn *conn = hcon->l2cap_data;
-       struct sock *sk;
-
-       if (!conn)
-               return 0;
-
-       l = &conn->chan_list;
-
-       BT_DBG("conn %p", conn);
-
-       read_lock(&l->lock);
-
-       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
-               bh_lock_sock(sk);
-
-               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
-                       bh_unlock_sock(sk);
-                       continue;
-               }
-
-               if (!status && (sk->sk_state == BT_CONNECTED ||
-                                               sk->sk_state == BT_CONFIG)) {
-                       l2cap_check_encryption(sk, encrypt);
-                       bh_unlock_sock(sk);
-                       continue;
-               }
-
-               if (sk->sk_state == BT_CONNECT) {
-                       if (!status) {
-                               struct l2cap_conn_req req;
-                               req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
-                               req.psm  = l2cap_pi(sk)->psm;
-
-                               l2cap_pi(sk)->ident = l2cap_get_ident(conn);
-                               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
-                               l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_REQ, sizeof(req), &req);
-                       } else {
-                               l2cap_sock_clear_timer(sk);
-                               l2cap_sock_set_timer(sk, HZ / 10);
-                       }
-               } else if (sk->sk_state == BT_CONNECT2) {
-                       struct l2cap_conn_rsp rsp;
-                       __u16 result;
-
-                       if (!status) {
-                               sk->sk_state = BT_CONFIG;
-                               result = L2CAP_CR_SUCCESS;
-                       } else {
-                               sk->sk_state = BT_DISCONN;
-                               l2cap_sock_set_timer(sk, HZ / 10);
-                               result = L2CAP_CR_SEC_BLOCK;
-                       }
-
-                       rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
-                       rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
-                       rsp.result = cpu_to_le16(result);
-                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
-                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-               }
-
-               bh_unlock_sock(sk);
-       }
-
-       read_unlock(&l->lock);
-
-       return 0;
-}
-
-static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
-{
-       struct l2cap_conn *conn = hcon->l2cap_data;
-
-       if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
-               goto drop;
-
-       BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
-
-       if (flags & ACL_START) {
-               struct l2cap_hdr *hdr;
-               struct sock *sk;
-               u16 cid;
-               int len;
-
-               if (conn->rx_len) {
-                       BT_ERR("Unexpected start frame (len %d)", skb->len);
-                       kfree_skb(conn->rx_skb);
-                       conn->rx_skb = NULL;
-                       conn->rx_len = 0;
-                       l2cap_conn_unreliable(conn, ECOMM);
-               }
-
-               /* Start fragment always begin with Basic L2CAP header */
-               if (skb->len < L2CAP_HDR_SIZE) {
-                       BT_ERR("Frame is too short (len %d)", skb->len);
-                       l2cap_conn_unreliable(conn, ECOMM);
-                       goto drop;
-               }
-
-               hdr = (struct l2cap_hdr *) skb->data;
-               len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
-               cid = __le16_to_cpu(hdr->cid);
-
-               if (len == skb->len) {
-                       /* Complete frame received */
-                       l2cap_recv_frame(conn, skb);
-                       return 0;
-               }
-
-               BT_DBG("Start: total len %d, frag len %d", len, skb->len);
-
-               if (skb->len > len) {
-                       BT_ERR("Frame is too long (len %d, expected len %d)",
-                               skb->len, len);
-                       l2cap_conn_unreliable(conn, ECOMM);
-                       goto drop;
-               }
-
-               sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
-
-               if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
-                       BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
-                                       len, l2cap_pi(sk)->imtu);
-                       bh_unlock_sock(sk);
-                       l2cap_conn_unreliable(conn, ECOMM);
-                       goto drop;
-               }
-
-               if (sk)
-                       bh_unlock_sock(sk);
-
-               /* Allocate skb for the complete frame (with header) */
-               conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
-               if (!conn->rx_skb)
-                       goto drop;
-
-               skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
-                                                               skb->len);
-               conn->rx_len = len - skb->len;
-       } else {
-               BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
-
-               if (!conn->rx_len) {
-                       BT_ERR("Unexpected continuation frame (len %d)", skb->len);
-                       l2cap_conn_unreliable(conn, ECOMM);
-                       goto drop;
-               }
-
-               if (skb->len > conn->rx_len) {
-                       BT_ERR("Fragment is too long (len %d, expected %d)",
-                                       skb->len, conn->rx_len);
-                       kfree_skb(conn->rx_skb);
-                       conn->rx_skb = NULL;
-                       conn->rx_len = 0;
-                       l2cap_conn_unreliable(conn, ECOMM);
-                       goto drop;
-               }
-
-               skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
-                                                               skb->len);
-               conn->rx_len -= skb->len;
-
-               if (!conn->rx_len) {
-                       /* Complete frame received */
-                       l2cap_recv_frame(conn, conn->rx_skb);
-                       conn->rx_skb = NULL;
-               }
-       }
-
-drop:
-       kfree_skb(skb);
-       return 0;
-}
-
-static int l2cap_debugfs_show(struct seq_file *f, void *p)
-{
-       struct sock *sk;
-       struct hlist_node *node;
-
-       read_lock_bh(&l2cap_sk_list.lock);
-
-       sk_for_each(sk, node, &l2cap_sk_list.head) {
-               struct l2cap_pinfo *pi = l2cap_pi(sk);
-
-               seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
-                                       batostr(&bt_sk(sk)->src),
-                                       batostr(&bt_sk(sk)->dst),
-                                       sk->sk_state, __le16_to_cpu(pi->psm),
-                                       pi->scid, pi->dcid,
-                                       pi->imtu, pi->omtu, pi->sec_level);
-       }
-
-       read_unlock_bh(&l2cap_sk_list.lock);
-
-       return 0;
-}
-
-static int l2cap_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, l2cap_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations l2cap_debugfs_fops = {
-       .open           = l2cap_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static struct dentry *l2cap_debugfs;
-
-static const struct proto_ops l2cap_sock_ops = {
-       .family         = PF_BLUETOOTH,
-       .owner          = THIS_MODULE,
-       .release        = l2cap_sock_release,
-       .bind           = l2cap_sock_bind,
-       .connect        = l2cap_sock_connect,
-       .listen         = l2cap_sock_listen,
-       .accept         = l2cap_sock_accept,
-       .getname        = l2cap_sock_getname,
-       .sendmsg        = l2cap_sock_sendmsg,
-       .recvmsg        = l2cap_sock_recvmsg,
-       .poll           = bt_sock_poll,
-       .ioctl          = bt_sock_ioctl,
-       .mmap           = sock_no_mmap,
-       .socketpair     = sock_no_socketpair,
-       .shutdown       = l2cap_sock_shutdown,
-       .setsockopt     = l2cap_sock_setsockopt,
-       .getsockopt     = l2cap_sock_getsockopt
-};
-
-static const struct net_proto_family l2cap_sock_family_ops = {
-       .family = PF_BLUETOOTH,
-       .owner  = THIS_MODULE,
-       .create = l2cap_sock_create,
-};
-
-static struct hci_proto l2cap_hci_proto = {
-       .name           = "L2CAP",
-       .id             = HCI_PROTO_L2CAP,
-       .connect_ind    = l2cap_connect_ind,
-       .connect_cfm    = l2cap_connect_cfm,
-       .disconn_ind    = l2cap_disconn_ind,
-       .disconn_cfm    = l2cap_disconn_cfm,
-       .security_cfm   = l2cap_security_cfm,
-       .recv_acldata   = l2cap_recv_acldata
-};
-
-static int __init l2cap_init(void)
-{
-       int err;
-
-       err = proto_register(&l2cap_proto, 0);
-       if (err < 0)
-               return err;
-
-       _busy_wq = create_singlethread_workqueue("l2cap");
-       if (!_busy_wq) {
-               proto_unregister(&l2cap_proto);
-               return -ENOMEM;
-       }
-
-       err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
-       if (err < 0) {
-               BT_ERR("L2CAP socket registration failed");
-               goto error;
-       }
-
-       err = hci_register_proto(&l2cap_hci_proto);
-       if (err < 0) {
-               BT_ERR("L2CAP protocol registration failed");
-               bt_sock_unregister(BTPROTO_L2CAP);
-               goto error;
-       }
-
-       if (bt_debugfs) {
-               l2cap_debugfs = debugfs_create_file("l2cap", 0444,
-                                       bt_debugfs, NULL, &l2cap_debugfs_fops);
-               if (!l2cap_debugfs)
-                       BT_ERR("Failed to create L2CAP debug file");
-       }
-
-       BT_INFO("L2CAP ver %s", VERSION);
-       BT_INFO("L2CAP socket layer initialized");
-
-       return 0;
-
-error:
-       destroy_workqueue(_busy_wq);
-       proto_unregister(&l2cap_proto);
-       return err;
-}
-
-static void __exit l2cap_exit(void)
-{
-       debugfs_remove(l2cap_debugfs);
-
-       flush_workqueue(_busy_wq);
-       destroy_workqueue(_busy_wq);
-
-       if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
-               BT_ERR("L2CAP socket unregistration failed");
-
-       if (hci_unregister_proto(&l2cap_hci_proto) < 0)
-               BT_ERR("L2CAP protocol unregistration failed");
-
-       proto_unregister(&l2cap_proto);
-}
-
-void l2cap_load(void)
-{
-       /* Dummy function to trigger automatic L2CAP module loading by
-        * other modules that use L2CAP sockets but don't use any other
-        * symbols from it. */
-}
-EXPORT_SYMBOL(l2cap_load);
-
-module_init(l2cap_init);
-module_exit(l2cap_exit);
-
-module_param(disable_ertm, bool, 0644);
-MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
new file mode 100644 (file)
index 0000000..efcef0d
--- /dev/null
@@ -0,0 +1,4060 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2000-2001 Qualcomm Incorporated
+   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+   Copyright (C) 2010 Google Inc.
+
+   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP core. */
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/crc16.h>
+#include <net/sock.h>
+
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+int disable_ertm;
+
+static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
+static u8 l2cap_fixed_chan[8] = { 0x02, };
+
+static struct workqueue_struct *_busy_wq;
+
+struct bt_sock_list l2cap_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
+static void l2cap_busy_work(struct work_struct *work);
+
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+                               u8 code, u8 ident, u16 dlen, void *data);
+
+static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
+
+/* ---- L2CAP channels ---- */
+static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
+{
+       struct sock *s;
+       for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+               if (l2cap_pi(s)->dcid == cid)
+                       break;
+       }
+       return s;
+}
+
+static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
+{
+       struct sock *s;
+       for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+               if (l2cap_pi(s)->scid == cid)
+                       break;
+       }
+       return s;
+}
+
+/* Find channel with given SCID.
+ * Returns locked socket */
+static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
+{
+       struct sock *s;
+       read_lock(&l->lock);
+       s = __l2cap_get_chan_by_scid(l, cid);
+       if (s)
+               bh_lock_sock(s);
+       read_unlock(&l->lock);
+       return s;
+}
+
+static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
+{
+       struct sock *s;
+       for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+               if (l2cap_pi(s)->ident == ident)
+                       break;
+       }
+       return s;
+}
+
+static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
+{
+       struct sock *s;
+       read_lock(&l->lock);
+       s = __l2cap_get_chan_by_ident(l, ident);
+       if (s)
+               bh_lock_sock(s);
+       read_unlock(&l->lock);
+       return s;
+}
+
+static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
+{
+       u16 cid = L2CAP_CID_DYN_START;
+
+       for (; cid < L2CAP_CID_DYN_END; cid++) {
+               if (!__l2cap_get_chan_by_scid(l, cid))
+                       return cid;
+       }
+
+       return 0;
+}
+
+static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
+{
+       sock_hold(sk);
+
+       if (l->head)
+               l2cap_pi(l->head)->prev_c = sk;
+
+       l2cap_pi(sk)->next_c = l->head;
+       l2cap_pi(sk)->prev_c = NULL;
+       l->head = sk;
+}
+
+static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
+{
+       struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
+
+       write_lock_bh(&l->lock);
+       if (sk == l->head)
+               l->head = next;
+
+       if (next)
+               l2cap_pi(next)->prev_c = prev;
+       if (prev)
+               l2cap_pi(prev)->next_c = next;
+       write_unlock_bh(&l->lock);
+
+       __sock_put(sk);
+}
+
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
+{
+       struct l2cap_chan_list *l = &conn->chan_list;
+
+       BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
+                       l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
+
+       conn->disc_reason = 0x13;
+
+       l2cap_pi(sk)->conn = conn;
+
+       if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
+               if (conn->hcon->type == LE_LINK) {
+                       /* LE connection */
+                       l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
+                       l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
+                       l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
+               } else {
+                       /* Alloc CID for connection-oriented socket */
+                       l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+                       l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+               }
+       } else if (sk->sk_type == SOCK_DGRAM) {
+               /* Connectionless socket */
+               l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
+               l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
+               l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+       } else {
+               /* Raw socket can send/recv signalling messages only */
+               l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
+               l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
+               l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+       }
+
+       __l2cap_chan_link(l, sk);
+
+       if (parent)
+               bt_accept_enqueue(parent, sk);
+}
+
+/* Delete channel.
+ * Must be called on the locked socket. */
+void l2cap_chan_del(struct sock *sk, int err)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sock *parent = bt_sk(sk)->parent;
+
+       l2cap_sock_clear_timer(sk);
+
+       BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
+
+       if (conn) {
+               /* Unlink from channel list */
+               l2cap_chan_unlink(&conn->chan_list, sk);
+               l2cap_pi(sk)->conn = NULL;
+               hci_conn_put(conn->hcon);
+       }
+
+       sk->sk_state = BT_CLOSED;
+       sock_set_flag(sk, SOCK_ZAPPED);
+
+       if (err)
+               sk->sk_err = err;
+
+       if (parent) {
+               bt_accept_unlink(sk);
+               parent->sk_data_ready(parent, 0);
+       } else
+               sk->sk_state_change(sk);
+
+       skb_queue_purge(TX_QUEUE(sk));
+
+       if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+               struct srej_list *l, *tmp;
+
+               del_timer(&l2cap_pi(sk)->retrans_timer);
+               del_timer(&l2cap_pi(sk)->monitor_timer);
+               del_timer(&l2cap_pi(sk)->ack_timer);
+
+               skb_queue_purge(SREJ_QUEUE(sk));
+               skb_queue_purge(BUSY_QUEUE(sk));
+
+               list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
+                       list_del(&l->list);
+                       kfree(l);
+               }
+       }
+}
+
+static inline u8 l2cap_get_auth_type(struct sock *sk)
+{
+       if (sk->sk_type == SOCK_RAW) {
+               switch (l2cap_pi(sk)->sec_level) {
+               case BT_SECURITY_HIGH:
+                       return HCI_AT_DEDICATED_BONDING_MITM;
+               case BT_SECURITY_MEDIUM:
+                       return HCI_AT_DEDICATED_BONDING;
+               default:
+                       return HCI_AT_NO_BONDING;
+               }
+       } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+               if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+
+               if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
+                       return HCI_AT_NO_BONDING_MITM;
+               else
+                       return HCI_AT_NO_BONDING;
+       } else {
+               switch (l2cap_pi(sk)->sec_level) {
+               case BT_SECURITY_HIGH:
+                       return HCI_AT_GENERAL_BONDING_MITM;
+               case BT_SECURITY_MEDIUM:
+                       return HCI_AT_GENERAL_BONDING;
+               default:
+                       return HCI_AT_NO_BONDING;
+               }
+       }
+}
+
+/* Service level security */
+static inline int l2cap_check_security(struct sock *sk)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       __u8 auth_type;
+
+       auth_type = l2cap_get_auth_type(sk);
+
+       return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
+                                                               auth_type);
+}
+
+u8 l2cap_get_ident(struct l2cap_conn *conn)
+{
+       u8 id;
+
+       /* Get next available identificator.
+        *    1 - 128 are used by kernel.
+        *  129 - 199 are reserved.
+        *  200 - 254 are used by utilities like l2ping, etc.
+        */
+
+       spin_lock_bh(&conn->lock);
+
+       if (++conn->tx_ident > 128)
+               conn->tx_ident = 1;
+
+       id = conn->tx_ident;
+
+       spin_unlock_bh(&conn->lock);
+
+       return id;
+}
+
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+{
+       struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
+       u8 flags;
+
+       BT_DBG("code 0x%2.2x", code);
+
+       if (!skb)
+               return;
+
+       if (lmp_no_flush_capable(conn->hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
+
+       hci_send_acl(conn->hcon, skb, flags);
+}
+
+static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
+{
+       struct sk_buff *skb;
+       struct l2cap_hdr *lh;
+       struct l2cap_conn *conn = pi->conn;
+       struct sock *sk = (struct sock *)pi;
+       int count, hlen = L2CAP_HDR_SIZE + 2;
+       u8 flags;
+
+       if (sk->sk_state != BT_CONNECTED)
+               return;
+
+       if (pi->fcs == L2CAP_FCS_CRC16)
+               hlen += 2;
+
+       BT_DBG("pi %p, control 0x%2.2x", pi, control);
+
+       count = min_t(unsigned int, conn->mtu, hlen);
+       control |= L2CAP_CTRL_FRAME_TYPE;
+
+       if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+               control |= L2CAP_CTRL_FINAL;
+               pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+       }
+
+       if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
+               control |= L2CAP_CTRL_POLL;
+               pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
+       }
+
+       skb = bt_skb_alloc(count, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(pi->dcid);
+       put_unaligned_le16(control, skb_put(skb, 2));
+
+       if (pi->fcs == L2CAP_FCS_CRC16) {
+               u16 fcs = crc16(0, (u8 *)lh, count - 2);
+               put_unaligned_le16(fcs, skb_put(skb, 2));
+       }
+
+       if (lmp_no_flush_capable(conn->hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
+
+       hci_send_acl(pi->conn->hcon, skb, flags);
+}
+
+static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
+{
+       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+               control |= L2CAP_SUPER_RCV_NOT_READY;
+               pi->conn_state |= L2CAP_CONN_RNR_SENT;
+       } else
+               control |= L2CAP_SUPER_RCV_READY;
+
+       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+       l2cap_send_sframe(pi, control);
+}
+
+static inline int __l2cap_no_conn_pending(struct sock *sk)
+{
+       return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
+}
+
+static void l2cap_do_start(struct sock *sk)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
+               if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
+                       return;
+
+               if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
+                       struct l2cap_conn_req req;
+                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+                       req.psm  = l2cap_pi(sk)->psm;
+
+                       l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+
+                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_REQ, sizeof(req), &req);
+               }
+       } else {
+               struct l2cap_info_req req;
+               req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+
+               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
+               conn->info_ident = l2cap_get_ident(conn);
+
+               mod_timer(&conn->info_timer, jiffies +
+                                       msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
+
+               l2cap_send_cmd(conn, conn->info_ident,
+                                       L2CAP_INFO_REQ, sizeof(req), &req);
+       }
+}
+
+static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
+{
+       u32 local_feat_mask = l2cap_feat_mask;
+       if (!disable_ertm)
+               local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
+
+       switch (mode) {
+       case L2CAP_MODE_ERTM:
+               return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
+       case L2CAP_MODE_STREAMING:
+               return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
+       default:
+               return 0x00;
+       }
+}
+
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
+{
+       struct l2cap_disconn_req req;
+
+       if (!conn)
+               return;
+
+       skb_queue_purge(TX_QUEUE(sk));
+
+       if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+               del_timer(&l2cap_pi(sk)->retrans_timer);
+               del_timer(&l2cap_pi(sk)->monitor_timer);
+               del_timer(&l2cap_pi(sk)->ack_timer);
+       }
+
+       req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+       l2cap_send_cmd(conn, l2cap_get_ident(conn),
+                       L2CAP_DISCONN_REQ, sizeof(req), &req);
+
+       sk->sk_state = BT_DISCONN;
+       sk->sk_err = err;
+}
+
+/* ---- L2CAP connections ---- */
+static void l2cap_conn_start(struct l2cap_conn *conn)
+{
+       struct l2cap_chan_list *l = &conn->chan_list;
+       struct sock_del_list del, *tmp1, *tmp2;
+       struct sock *sk;
+
+       BT_DBG("conn %p", conn);
+
+       INIT_LIST_HEAD(&del.list);
+
+       read_lock(&l->lock);
+
+       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+               bh_lock_sock(sk);
+
+               if (sk->sk_type != SOCK_SEQPACKET &&
+                               sk->sk_type != SOCK_STREAM) {
+                       bh_unlock_sock(sk);
+                       continue;
+               }
+
+               if (sk->sk_state == BT_CONNECT) {
+                       struct l2cap_conn_req req;
+
+                       if (!l2cap_check_security(sk) ||
+                                       !__l2cap_no_conn_pending(sk)) {
+                               bh_unlock_sock(sk);
+                               continue;
+                       }
+
+                       if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
+                                       conn->feat_mask)
+                                       && l2cap_pi(sk)->conf_state &
+                                       L2CAP_CONF_STATE2_DEVICE) {
+                               tmp1 = kzalloc(sizeof(struct sock_del_list),
+                                               GFP_ATOMIC);
+                               tmp1->sk = sk;
+                               list_add_tail(&tmp1->list, &del.list);
+                               bh_unlock_sock(sk);
+                               continue;
+                       }
+
+                       req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+                       req.psm  = l2cap_pi(sk)->psm;
+
+                       l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+
+                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                               L2CAP_CONN_REQ, sizeof(req), &req);
+
+               } else if (sk->sk_state == BT_CONNECT2) {
+                       struct l2cap_conn_rsp rsp;
+                       char buf[128];
+                       rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+                       rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+
+                       if (l2cap_check_security(sk)) {
+                               if (bt_sk(sk)->defer_setup) {
+                                       struct sock *parent = bt_sk(sk)->parent;
+                                       rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+                                       rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
+                                       parent->sk_data_ready(parent, 0);
+
+                               } else {
+                                       sk->sk_state = BT_CONFIG;
+                                       rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+                                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+                               }
+                       } else {
+                               rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+                               rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+                       }
+
+                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+                       if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
+                                       rsp.result != L2CAP_CR_SUCCESS) {
+                               bh_unlock_sock(sk);
+                               continue;
+                       }
+
+                       l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+                       l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+                                               l2cap_build_conf_req(sk, buf), buf);
+                       l2cap_pi(sk)->num_conf_req++;
+               }
+
+               bh_unlock_sock(sk);
+       }
+
+       read_unlock(&l->lock);
+
+       list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
+               bh_lock_sock(tmp1->sk);
+               __l2cap_sock_close(tmp1->sk, ECONNRESET);
+               bh_unlock_sock(tmp1->sk);
+               list_del(&tmp1->list);
+               kfree(tmp1);
+       }
+}
+
+/* Find socket with cid and source bdaddr.
+ * Returns closest match, locked.
+ */
+static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
+{
+       struct sock *s, *sk = NULL, *sk1 = NULL;
+       struct hlist_node *node;
+
+       read_lock(&l2cap_sk_list.lock);
+
+       sk_for_each(sk, node, &l2cap_sk_list.head) {
+               if (state && sk->sk_state != state)
+                       continue;
+
+               if (l2cap_pi(sk)->scid == cid) {
+                       /* Exact match. */
+                       if (!bacmp(&bt_sk(sk)->src, src))
+                               break;
+
+                       /* Closest match */
+                       if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+                               sk1 = sk;
+               }
+       }
+       s = node ? sk : sk1;
+       if (s)
+               bh_lock_sock(s);
+       read_unlock(&l2cap_sk_list.lock);
+
+       return s;
+}
+
+static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+{
+       struct l2cap_chan_list *list = &conn->chan_list;
+       struct sock *parent, *uninitialized_var(sk);
+
+       BT_DBG("");
+
+       /* Check if we have socket listening on cid */
+       parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+                                                       conn->src);
+       if (!parent)
+               return;
+
+       /* Check for backlog size */
+       if (sk_acceptq_is_full(parent)) {
+               BT_DBG("backlog full %d", parent->sk_ack_backlog);
+               goto clean;
+       }
+
+       sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+       if (!sk)
+               goto clean;
+
+       write_lock_bh(&list->lock);
+
+       hci_conn_hold(conn->hcon);
+
+       l2cap_sock_init(sk, parent);
+       bacpy(&bt_sk(sk)->src, conn->src);
+       bacpy(&bt_sk(sk)->dst, conn->dst);
+
+       __l2cap_chan_add(conn, sk, parent);
+
+       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+       sk->sk_state = BT_CONNECTED;
+       parent->sk_data_ready(parent, 0);
+
+       write_unlock_bh(&list->lock);
+
+clean:
+       bh_unlock_sock(parent);
+}
+
+static void l2cap_conn_ready(struct l2cap_conn *conn)
+{
+       struct l2cap_chan_list *l = &conn->chan_list;
+       struct sock *sk;
+
+       BT_DBG("conn %p", conn);
+
+       if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+               l2cap_le_conn_ready(conn);
+
+       read_lock(&l->lock);
+
+       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+               bh_lock_sock(sk);
+
+               if (conn->hcon->type == LE_LINK) {
+                       l2cap_sock_clear_timer(sk);
+                       sk->sk_state = BT_CONNECTED;
+                       sk->sk_state_change(sk);
+               }
+
+               if (sk->sk_type != SOCK_SEQPACKET &&
+                               sk->sk_type != SOCK_STREAM) {
+                       l2cap_sock_clear_timer(sk);
+                       sk->sk_state = BT_CONNECTED;
+                       sk->sk_state_change(sk);
+               } else if (sk->sk_state == BT_CONNECT)
+                       l2cap_do_start(sk);
+
+               bh_unlock_sock(sk);
+       }
+
+       read_unlock(&l->lock);
+}
+
+/* Notify sockets that we cannot guaranty reliability anymore */
+static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
+{
+       struct l2cap_chan_list *l = &conn->chan_list;
+       struct sock *sk;
+
+       BT_DBG("conn %p", conn);
+
+       read_lock(&l->lock);
+
+       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+               if (l2cap_pi(sk)->force_reliable)
+                       sk->sk_err = err;
+       }
+
+       read_unlock(&l->lock);
+}
+
+static void l2cap_info_timeout(unsigned long arg)
+{
+       struct l2cap_conn *conn = (void *) arg;
+
+       conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+       conn->info_ident = 0;
+
+       l2cap_conn_start(conn);
+}
+
+static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+
+       if (conn || status)
+               return conn;
+
+       conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
+       if (!conn)
+               return NULL;
+
+       hcon->l2cap_data = conn;
+       conn->hcon = hcon;
+
+       BT_DBG("hcon %p conn %p", hcon, conn);
+
+       if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
+               conn->mtu = hcon->hdev->le_mtu;
+       else
+               conn->mtu = hcon->hdev->acl_mtu;
+
+       conn->src = &hcon->hdev->bdaddr;
+       conn->dst = &hcon->dst;
+
+       conn->feat_mask = 0;
+
+       spin_lock_init(&conn->lock);
+       rwlock_init(&conn->chan_list.lock);
+
+       if (hcon->type != LE_LINK)
+               setup_timer(&conn->info_timer, l2cap_info_timeout,
+                                               (unsigned long) conn);
+
+       conn->disc_reason = 0x13;
+
+       return conn;
+}
+
+static void l2cap_conn_del(struct hci_conn *hcon, int err)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct sock *sk;
+
+       if (!conn)
+               return;
+
+       BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+       kfree_skb(conn->rx_skb);
+
+       /* Kill channels */
+       while ((sk = conn->chan_list.head)) {
+               bh_lock_sock(sk);
+               l2cap_chan_del(sk, err);
+               bh_unlock_sock(sk);
+               l2cap_sock_kill(sk);
+       }
+
+       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+               del_timer_sync(&conn->info_timer);
+
+       hcon->l2cap_data = NULL;
+       kfree(conn);
+}
+
+static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
+{
+       struct l2cap_chan_list *l = &conn->chan_list;
+       write_lock_bh(&l->lock);
+       __l2cap_chan_add(conn, sk, parent);
+       write_unlock_bh(&l->lock);
+}
+
+/* ---- Socket interface ---- */
+
+/* Find socket with psm and source bdaddr.
+ * Returns closest match.
+ */
+static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
+{
+       struct sock *sk = NULL, *sk1 = NULL;
+       struct hlist_node *node;
+
+       read_lock(&l2cap_sk_list.lock);
+
+       sk_for_each(sk, node, &l2cap_sk_list.head) {
+               if (state && sk->sk_state != state)
+                       continue;
+
+               if (l2cap_pi(sk)->psm == psm) {
+                       /* Exact match. */
+                       if (!bacmp(&bt_sk(sk)->src, src))
+                               break;
+
+                       /* Closest match */
+                       if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+                               sk1 = sk;
+               }
+       }
+
+       read_unlock(&l2cap_sk_list.lock);
+
+       return node ? sk : sk1;
+}
+
+int l2cap_do_connect(struct sock *sk)
+{
+       bdaddr_t *src = &bt_sk(sk)->src;
+       bdaddr_t *dst = &bt_sk(sk)->dst;
+       struct l2cap_conn *conn;
+       struct hci_conn *hcon;
+       struct hci_dev *hdev;
+       __u8 auth_type;
+       int err;
+
+       BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
+                                                       l2cap_pi(sk)->psm);
+
+       hdev = hci_get_route(dst, src);
+       if (!hdev)
+               return -EHOSTUNREACH;
+
+       hci_dev_lock_bh(hdev);
+
+       err = -ENOMEM;
+
+       auth_type = l2cap_get_auth_type(sk);
+
+       if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
+               hcon = hci_connect(hdev, LE_LINK, dst,
+                                       l2cap_pi(sk)->sec_level, auth_type);
+       else
+               hcon = hci_connect(hdev, ACL_LINK, dst,
+                                       l2cap_pi(sk)->sec_level, auth_type);
+
+       if (!hcon)
+               goto done;
+
+       conn = l2cap_conn_add(hcon, 0);
+       if (!conn) {
+               hci_conn_put(hcon);
+               goto done;
+       }
+
+       err = 0;
+
+       /* Update source addr of the socket */
+       bacpy(src, conn->src);
+
+       l2cap_chan_add(conn, sk, NULL);
+
+       sk->sk_state = BT_CONNECT;
+       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+       if (hcon->state == BT_CONNECTED) {
+               if (sk->sk_type != SOCK_SEQPACKET &&
+                               sk->sk_type != SOCK_STREAM) {
+                       l2cap_sock_clear_timer(sk);
+                       if (l2cap_check_security(sk))
+                               sk->sk_state = BT_CONNECTED;
+               } else
+                       l2cap_do_start(sk);
+       }
+
+done:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+       return err;
+}
+
+int __l2cap_wait_ack(struct sock *sk)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       int err = 0;
+       int timeo = HZ/5;
+
+       add_wait_queue(sk_sleep(sk), &wait);
+       while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               if (!timeo)
+                       timeo = HZ/5;
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
+
+               err = sock_error(sk);
+               if (err)
+                       break;
+       }
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+       return err;
+}
+
+static void l2cap_monitor_timeout(unsigned long arg)
+{
+       struct sock *sk = (void *) arg;
+
+       BT_DBG("sk %p", sk);
+
+       bh_lock_sock(sk);
+       if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
+               l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
+               bh_unlock_sock(sk);
+               return;
+       }
+
+       l2cap_pi(sk)->retry_count++;
+       __mod_monitor_timer();
+
+       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
+       bh_unlock_sock(sk);
+}
+
+static void l2cap_retrans_timeout(unsigned long arg)
+{
+       struct sock *sk = (void *) arg;
+
+       BT_DBG("sk %p", sk);
+
+       bh_lock_sock(sk);
+       l2cap_pi(sk)->retry_count = 1;
+       __mod_monitor_timer();
+
+       l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
+
+       l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
+       bh_unlock_sock(sk);
+}
+
+static void l2cap_drop_acked_frames(struct sock *sk)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_peek(TX_QUEUE(sk))) &&
+                       l2cap_pi(sk)->unacked_frames) {
+               if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
+                       break;
+
+               skb = skb_dequeue(TX_QUEUE(sk));
+               kfree_skb(skb);
+
+               l2cap_pi(sk)->unacked_frames--;
+       }
+
+       if (!l2cap_pi(sk)->unacked_frames)
+               del_timer(&l2cap_pi(sk)->retrans_timer);
+}
+
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct hci_conn *hcon = pi->conn->hcon;
+       u16 flags;
+
+       BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
+
+       if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
+
+       hci_send_acl(hcon, skb, flags);
+}
+
+void l2cap_streaming_send(struct sock *sk)
+{
+       struct sk_buff *skb;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u16 control, fcs;
+
+       while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
+               control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
+               control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
+               put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
+
+               if (pi->fcs == L2CAP_FCS_CRC16) {
+                       fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
+                       put_unaligned_le16(fcs, skb->data + skb->len - 2);
+               }
+
+               l2cap_do_send(sk, skb);
+
+               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
+       }
+}
+
+static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb, *tx_skb;
+       u16 control, fcs;
+
+       skb = skb_peek(TX_QUEUE(sk));
+       if (!skb)
+               return;
+
+       do {
+               if (bt_cb(skb)->tx_seq == tx_seq)
+                       break;
+
+               if (skb_queue_is_last(TX_QUEUE(sk), skb))
+                       return;
+
+       } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
+
+       if (pi->remote_max_tx &&
+                       bt_cb(skb)->retries == pi->remote_max_tx) {
+               l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
+               return;
+       }
+
+       tx_skb = skb_clone(skb, GFP_ATOMIC);
+       bt_cb(skb)->retries++;
+       control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+
+       if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+               control |= L2CAP_CTRL_FINAL;
+               pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+       }
+
+       control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+                       | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+
+       put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+       if (pi->fcs == L2CAP_FCS_CRC16) {
+               fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
+               put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+       }
+
+       l2cap_do_send(sk, tx_skb);
+}
+
+int l2cap_ertm_send(struct sock *sk)
+{
+       struct sk_buff *skb, *tx_skb;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u16 control, fcs;
+       int nsent = 0;
+
+       if (sk->sk_state != BT_CONNECTED)
+               return -ENOTCONN;
+
+       while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
+
+               if (pi->remote_max_tx &&
+                               bt_cb(skb)->retries == pi->remote_max_tx) {
+                       l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
+                       break;
+               }
+
+               tx_skb = skb_clone(skb, GFP_ATOMIC);
+
+               bt_cb(skb)->retries++;
+
+               control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+               control &= L2CAP_CTRL_SAR;
+
+               if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+                       control |= L2CAP_CTRL_FINAL;
+                       pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+               }
+               control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+                               | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+               put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+
+               if (pi->fcs == L2CAP_FCS_CRC16) {
+                       fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
+                       put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+               }
+
+               l2cap_do_send(sk, tx_skb);
+
+               __mod_retrans_timer();
+
+               bt_cb(skb)->tx_seq = pi->next_tx_seq;
+               pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
+
+               pi->unacked_frames++;
+               pi->frames_sent++;
+
+               if (skb_queue_is_last(TX_QUEUE(sk), skb))
+                       sk->sk_send_head = NULL;
+               else
+                       sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+
+               nsent++;
+       }
+
+       return nsent;
+}
+
+static int l2cap_retransmit_frames(struct sock *sk)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int ret;
+
+       if (!skb_queue_empty(TX_QUEUE(sk)))
+               sk->sk_send_head = TX_QUEUE(sk)->next;
+
+       pi->next_tx_seq = pi->expected_ack_seq;
+       ret = l2cap_ertm_send(sk);
+       return ret;
+}
+
+static void l2cap_send_ack(struct l2cap_pinfo *pi)
+{
+       struct sock *sk = (struct sock *)pi;
+       u16 control = 0;
+
+       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+               control |= L2CAP_SUPER_RCV_NOT_READY;
+               pi->conn_state |= L2CAP_CONN_RNR_SENT;
+               l2cap_send_sframe(pi, control);
+               return;
+       }
+
+       if (l2cap_ertm_send(sk) > 0)
+               return;
+
+       control |= L2CAP_SUPER_RCV_READY;
+       l2cap_send_sframe(pi, control);
+}
+
+static void l2cap_send_srejtail(struct sock *sk)
+{
+       struct srej_list *tail;
+       u16 control;
+
+       control = L2CAP_SUPER_SELECT_REJECT;
+       control |= L2CAP_CTRL_FINAL;
+
+       tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
+       control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+       l2cap_send_sframe(l2cap_pi(sk), control);
+}
+
+static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff **frag;
+       int err, sent = 0;
+
+       if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
+               return -EFAULT;
+
+       sent += count;
+       len  -= count;
+
+       /* Continuation fragments (no L2CAP header) */
+       frag = &skb_shinfo(skb)->frag_list;
+       while (len) {
+               count = min_t(unsigned int, conn->mtu, len);
+
+               *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
+               if (!*frag)
+                       return err;
+               if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+                       return -EFAULT;
+
+               sent += count;
+               len  -= count;
+
+               frag = &(*frag)->next;
+       }
+
+       return sent;
+}
+
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff *skb;
+       int err, count, hlen = L2CAP_HDR_SIZE + 2;
+       struct l2cap_hdr *lh;
+
+       BT_DBG("sk %p len %d", sk, (int)len);
+
+       count = min_t(unsigned int, (conn->mtu - hlen), len);
+       skb = bt_skb_send_alloc(sk, count + hlen,
+                       msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               return ERR_PTR(err);
+
+       /* Create L2CAP header */
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+       put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
+
+       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+       if (unlikely(err < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(err);
+       }
+       return skb;
+}
+
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff *skb;
+       int err, count, hlen = L2CAP_HDR_SIZE;
+       struct l2cap_hdr *lh;
+
+       BT_DBG("sk %p len %d", sk, (int)len);
+
+       count = min_t(unsigned int, (conn->mtu - hlen), len);
+       skb = bt_skb_send_alloc(sk, count + hlen,
+                       msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               return ERR_PTR(err);
+
+       /* Create L2CAP header */
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+
+       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+       if (unlikely(err < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(err);
+       }
+       return skb;
+}
+
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+       struct sk_buff *skb;
+       int err, count, hlen = L2CAP_HDR_SIZE + 2;
+       struct l2cap_hdr *lh;
+
+       BT_DBG("sk %p len %d", sk, (int)len);
+
+       if (!conn)
+               return ERR_PTR(-ENOTCONN);
+
+       if (sdulen)
+               hlen += 2;
+
+       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+               hlen += 2;
+
+       count = min_t(unsigned int, (conn->mtu - hlen), len);
+       skb = bt_skb_send_alloc(sk, count + hlen,
+                       msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               return ERR_PTR(err);
+
+       /* Create L2CAP header */
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+       put_unaligned_le16(control, skb_put(skb, 2));
+       if (sdulen)
+               put_unaligned_le16(sdulen, skb_put(skb, 2));
+
+       err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+       if (unlikely(err < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(err);
+       }
+
+       if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+               put_unaligned_le16(0, skb_put(skb, 2));
+
+       bt_cb(skb)->retries = 0;
+       return skb;
+}
+
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb;
+       struct sk_buff_head sar_queue;
+       u16 control;
+       size_t size = 0;
+
+       skb_queue_head_init(&sar_queue);
+       control = L2CAP_SDU_START;
+       skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       __skb_queue_tail(&sar_queue, skb);
+       len -= pi->remote_mps;
+       size += pi->remote_mps;
+
+       while (len > 0) {
+               size_t buflen;
+
+               if (len > pi->remote_mps) {
+                       control = L2CAP_SDU_CONTINUE;
+                       buflen = pi->remote_mps;
+               } else {
+                       control = L2CAP_SDU_END;
+                       buflen = len;
+               }
+
+               skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
+               if (IS_ERR(skb)) {
+                       skb_queue_purge(&sar_queue);
+                       return PTR_ERR(skb);
+               }
+
+               __skb_queue_tail(&sar_queue, skb);
+               len -= buflen;
+               size += buflen;
+       }
+       skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
+       if (sk->sk_send_head == NULL)
+               sk->sk_send_head = sar_queue.next;
+
+       return size;
+}
+
+static void l2cap_chan_ready(struct sock *sk)
+{
+       struct sock *parent = bt_sk(sk)->parent;
+
+       BT_DBG("sk %p, parent %p", sk, parent);
+
+       l2cap_pi(sk)->conf_state = 0;
+       l2cap_sock_clear_timer(sk);
+
+       if (!parent) {
+               /* Outgoing channel.
+                * Wake up socket sleeping on connect.
+                */
+               sk->sk_state = BT_CONNECTED;
+               sk->sk_state_change(sk);
+       } else {
+               /* Incoming channel.
+                * Wake up socket sleeping on accept.
+                */
+               parent->sk_data_ready(parent, 0);
+       }
+}
+
+/* Copy frame to all raw sockets on that connection */
+static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+       struct l2cap_chan_list *l = &conn->chan_list;
+       struct sk_buff *nskb;
+       struct sock *sk;
+
+       BT_DBG("conn %p", conn);
+
+       read_lock(&l->lock);
+       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+               if (sk->sk_type != SOCK_RAW)
+                       continue;
+
+               /* Don't send frame to the socket it came from */
+               if (skb->sk == sk)
+                       continue;
+               nskb = skb_clone(skb, GFP_ATOMIC);
+               if (!nskb)
+                       continue;
+
+               if (sock_queue_rcv_skb(sk, nskb))
+                       kfree_skb(nskb);
+       }
+       read_unlock(&l->lock);
+}
+
+/* ---- L2CAP signalling commands ---- */
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+                               u8 code, u8 ident, u16 dlen, void *data)
+{
+       struct sk_buff *skb, **frag;
+       struct l2cap_cmd_hdr *cmd;
+       struct l2cap_hdr *lh;
+       int len, count;
+
+       BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
+                       conn, code, ident, dlen);
+
+       len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
+       count = min_t(unsigned int, conn->mtu, len);
+
+       skb = bt_skb_alloc(count, GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+
+       lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+       lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
+
+       if (conn->hcon->type == LE_LINK)
+               lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+       else
+               lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+
+       cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
+       cmd->code  = code;
+       cmd->ident = ident;
+       cmd->len   = cpu_to_le16(dlen);
+
+       if (dlen) {
+               count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
+               memcpy(skb_put(skb, count), data, count);
+               data += count;
+       }
+
+       len -= skb->len;
+
+       /* Continuation fragments (no L2CAP header) */
+       frag = &skb_shinfo(skb)->frag_list;
+       while (len) {
+               count = min_t(unsigned int, conn->mtu, len);
+
+               *frag = bt_skb_alloc(count, GFP_ATOMIC);
+               if (!*frag)
+                       goto fail;
+
+               memcpy(skb_put(*frag, count), data, count);
+
+               len  -= count;
+               data += count;
+
+               frag = &(*frag)->next;
+       }
+
+       return skb;
+
+fail:
+       kfree_skb(skb);
+       return NULL;
+}
+
+static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
+{
+       struct l2cap_conf_opt *opt = *ptr;
+       int len;
+
+       len = L2CAP_CONF_OPT_SIZE + opt->len;
+       *ptr += len;
+
+       *type = opt->type;
+       *olen = opt->len;
+
+       switch (opt->len) {
+       case 1:
+               *val = *((u8 *) opt->val);
+               break;
+
+       case 2:
+               *val = get_unaligned_le16(opt->val);
+               break;
+
+       case 4:
+               *val = get_unaligned_le32(opt->val);
+               break;
+
+       default:
+               *val = (unsigned long) opt->val;
+               break;
+       }
+
+       BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
+       return len;
+}
+
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+{
+       struct l2cap_conf_opt *opt = *ptr;
+
+       BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
+
+       opt->type = type;
+       opt->len  = len;
+
+       switch (len) {
+       case 1:
+               *((u8 *) opt->val)  = val;
+               break;
+
+       case 2:
+               put_unaligned_le16(val, opt->val);
+               break;
+
+       case 4:
+               put_unaligned_le32(val, opt->val);
+               break;
+
+       default:
+               memcpy(opt->val, (void *) val, len);
+               break;
+       }
+
+       *ptr += L2CAP_CONF_OPT_SIZE + len;
+}
+
+static void l2cap_ack_timeout(unsigned long arg)
+{
+       struct sock *sk = (void *) arg;
+
+       bh_lock_sock(sk);
+       l2cap_send_ack(l2cap_pi(sk));
+       bh_unlock_sock(sk);
+}
+
+static inline void l2cap_ertm_init(struct sock *sk)
+{
+       l2cap_pi(sk)->expected_ack_seq = 0;
+       l2cap_pi(sk)->unacked_frames = 0;
+       l2cap_pi(sk)->buffer_seq = 0;
+       l2cap_pi(sk)->num_acked = 0;
+       l2cap_pi(sk)->frames_sent = 0;
+
+       setup_timer(&l2cap_pi(sk)->retrans_timer,
+                       l2cap_retrans_timeout, (unsigned long) sk);
+       setup_timer(&l2cap_pi(sk)->monitor_timer,
+                       l2cap_monitor_timeout, (unsigned long) sk);
+       setup_timer(&l2cap_pi(sk)->ack_timer,
+                       l2cap_ack_timeout, (unsigned long) sk);
+
+       __skb_queue_head_init(SREJ_QUEUE(sk));
+       __skb_queue_head_init(BUSY_QUEUE(sk));
+
+       INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
+
+       sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
+}
+
+static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
+{
+       switch (mode) {
+       case L2CAP_MODE_STREAMING:
+       case L2CAP_MODE_ERTM:
+               if (l2cap_mode_supported(mode, remote_feat_mask))
+                       return mode;
+               /* fall through */
+       default:
+               return L2CAP_MODE_BASIC;
+       }
+}
+
+int l2cap_build_conf_req(struct sock *sk, void *data)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct l2cap_conf_req *req = data;
+       struct l2cap_conf_rfc rfc = { .mode = pi->mode };
+       void *ptr = req->data;
+
+       BT_DBG("sk %p", sk);
+
+       if (pi->num_conf_req || pi->num_conf_rsp)
+               goto done;
+
+       switch (pi->mode) {
+       case L2CAP_MODE_STREAMING:
+       case L2CAP_MODE_ERTM:
+               if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
+                       break;
+
+               /* fall through */
+       default:
+               pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
+               break;
+       }
+
+done:
+       if (pi->imtu != L2CAP_DEFAULT_MTU)
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+
+       switch (pi->mode) {
+       case L2CAP_MODE_BASIC:
+               if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
+                               !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
+                       break;
+
+               rfc.mode            = L2CAP_MODE_BASIC;
+               rfc.txwin_size      = 0;
+               rfc.max_transmit    = 0;
+               rfc.retrans_timeout = 0;
+               rfc.monitor_timeout = 0;
+               rfc.max_pdu_size    = 0;
+
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+                                                       (unsigned long) &rfc);
+               break;
+
+       case L2CAP_MODE_ERTM:
+               rfc.mode            = L2CAP_MODE_ERTM;
+               rfc.txwin_size      = pi->tx_win;
+               rfc.max_transmit    = pi->max_tx;
+               rfc.retrans_timeout = 0;
+               rfc.monitor_timeout = 0;
+               rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+               if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
+                       rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
+
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+                                                       (unsigned long) &rfc);
+
+               if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
+                       break;
+
+               if (pi->fcs == L2CAP_FCS_NONE ||
+                               pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+                       pi->fcs = L2CAP_FCS_NONE;
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
+               }
+               break;
+
+       case L2CAP_MODE_STREAMING:
+               rfc.mode            = L2CAP_MODE_STREAMING;
+               rfc.txwin_size      = 0;
+               rfc.max_transmit    = 0;
+               rfc.retrans_timeout = 0;
+               rfc.monitor_timeout = 0;
+               rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+               if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
+                       rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
+
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+                                                       (unsigned long) &rfc);
+
+               if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
+                       break;
+
+               if (pi->fcs == L2CAP_FCS_NONE ||
+                               pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+                       pi->fcs = L2CAP_FCS_NONE;
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
+               }
+               break;
+       }
+
+       req->dcid  = cpu_to_le16(pi->dcid);
+       req->flags = cpu_to_le16(0);
+
+       return ptr - data;
+}
+
+static int l2cap_parse_conf_req(struct sock *sk, void *data)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct l2cap_conf_rsp *rsp = data;
+       void *ptr = rsp->data;
+       void *req = pi->conf_req;
+       int len = pi->conf_len;
+       int type, hint, olen;
+       unsigned long val;
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+       u16 mtu = L2CAP_DEFAULT_MTU;
+       u16 result = L2CAP_CONF_SUCCESS;
+
+       BT_DBG("sk %p", sk);
+
+       while (len >= L2CAP_CONF_OPT_SIZE) {
+               len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+
+               hint  = type & L2CAP_CONF_HINT;
+               type &= L2CAP_CONF_MASK;
+
+               switch (type) {
+               case L2CAP_CONF_MTU:
+                       mtu = val;
+                       break;
+
+               case L2CAP_CONF_FLUSH_TO:
+                       pi->flush_to = val;
+                       break;
+
+               case L2CAP_CONF_QOS:
+                       break;
+
+               case L2CAP_CONF_RFC:
+                       if (olen == sizeof(rfc))
+                               memcpy(&rfc, (void *) val, olen);
+                       break;
+
+               case L2CAP_CONF_FCS:
+                       if (val == L2CAP_FCS_NONE)
+                               pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
+
+                       break;
+
+               default:
+                       if (hint)
+                               break;
+
+                       result = L2CAP_CONF_UNKNOWN;
+                       *((u8 *) ptr++) = type;
+                       break;
+               }
+       }
+
+       if (pi->num_conf_rsp || pi->num_conf_req > 1)
+               goto done;
+
+       switch (pi->mode) {
+       case L2CAP_MODE_STREAMING:
+       case L2CAP_MODE_ERTM:
+               if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
+                       pi->mode = l2cap_select_mode(rfc.mode,
+                                       pi->conn->feat_mask);
+                       break;
+               }
+
+               if (pi->mode != rfc.mode)
+                       return -ECONNREFUSED;
+
+               break;
+       }
+
+done:
+       if (pi->mode != rfc.mode) {
+               result = L2CAP_CONF_UNACCEPT;
+               rfc.mode = pi->mode;
+
+               if (pi->num_conf_rsp == 1)
+                       return -ECONNREFUSED;
+
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                       sizeof(rfc), (unsigned long) &rfc);
+       }
+
+
+       if (result == L2CAP_CONF_SUCCESS) {
+               /* Configure output options and let the other side know
+                * which ones we don't like. */
+
+               if (mtu < L2CAP_DEFAULT_MIN_MTU)
+                       result = L2CAP_CONF_UNACCEPT;
+               else {
+                       pi->omtu = mtu;
+                       pi->conf_state |= L2CAP_CONF_MTU_DONE;
+               }
+               l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+
+               switch (rfc.mode) {
+               case L2CAP_MODE_BASIC:
+                       pi->fcs = L2CAP_FCS_NONE;
+                       pi->conf_state |= L2CAP_CONF_MODE_DONE;
+                       break;
+
+               case L2CAP_MODE_ERTM:
+                       pi->remote_tx_win = rfc.txwin_size;
+                       pi->remote_max_tx = rfc.max_transmit;
+
+                       if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
+                               rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
+
+                       pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+
+                       rfc.retrans_timeout =
+                               le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+                       rfc.monitor_timeout =
+                               le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
+
+                       pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                       sizeof(rfc), (unsigned long) &rfc);
+
+                       break;
+
+               case L2CAP_MODE_STREAMING:
+                       if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
+                               rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
+
+                       pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+
+                       pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                       sizeof(rfc), (unsigned long) &rfc);
+
+                       break;
+
+               default:
+                       result = L2CAP_CONF_UNACCEPT;
+
+                       memset(&rfc, 0, sizeof(rfc));
+                       rfc.mode = pi->mode;
+               }
+
+               if (result == L2CAP_CONF_SUCCESS)
+                       pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+       }
+       rsp->scid   = cpu_to_le16(pi->dcid);
+       rsp->result = cpu_to_le16(result);
+       rsp->flags  = cpu_to_le16(0x0000);
+
+       return ptr - data;
+}
+
+static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct l2cap_conf_req *req = data;
+       void *ptr = req->data;
+       int type, olen;
+       unsigned long val;
+       struct l2cap_conf_rfc rfc;
+
+       BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
+
+       while (len >= L2CAP_CONF_OPT_SIZE) {
+               len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+               switch (type) {
+               case L2CAP_CONF_MTU:
+                       if (val < L2CAP_DEFAULT_MIN_MTU) {
+                               *result = L2CAP_CONF_UNACCEPT;
+                               pi->imtu = L2CAP_DEFAULT_MIN_MTU;
+                       } else
+                               pi->imtu = val;
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+                       break;
+
+               case L2CAP_CONF_FLUSH_TO:
+                       pi->flush_to = val;
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+                                                       2, pi->flush_to);
+                       break;
+
+               case L2CAP_CONF_RFC:
+                       if (olen == sizeof(rfc))
+                               memcpy(&rfc, (void *)val, olen);
+
+                       if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
+                                                       rfc.mode != pi->mode)
+                               return -ECONNREFUSED;
+
+                       pi->fcs = 0;
+
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                       sizeof(rfc), (unsigned long) &rfc);
+                       break;
+               }
+       }
+
+       if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
+               return -ECONNREFUSED;
+
+       pi->mode = rfc.mode;
+
+       if (*result == L2CAP_CONF_SUCCESS) {
+               switch (rfc.mode) {
+               case L2CAP_MODE_ERTM:
+                       pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+                       pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+                       pi->mps    = le16_to_cpu(rfc.max_pdu_size);
+                       break;
+               case L2CAP_MODE_STREAMING:
+                       pi->mps    = le16_to_cpu(rfc.max_pdu_size);
+               }
+       }
+
+       req->dcid   = cpu_to_le16(pi->dcid);
+       req->flags  = cpu_to_le16(0x0000);
+
+       return ptr - data;
+}
+
+static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
+{
+       struct l2cap_conf_rsp *rsp = data;
+       void *ptr = rsp->data;
+
+       BT_DBG("sk %p", sk);
+
+       rsp->scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+       rsp->result = cpu_to_le16(result);
+       rsp->flags  = cpu_to_le16(flags);
+
+       return ptr - data;
+}
+
+static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int type, olen;
+       unsigned long val;
+       struct l2cap_conf_rfc rfc;
+
+       BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+
+       if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
+               return;
+
+       while (len >= L2CAP_CONF_OPT_SIZE) {
+               len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+               switch (type) {
+               case L2CAP_CONF_RFC:
+                       if (olen == sizeof(rfc))
+                               memcpy(&rfc, (void *)val, olen);
+                       goto done;
+               }
+       }
+
+done:
+       switch (rfc.mode) {
+       case L2CAP_MODE_ERTM:
+               pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+               pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+               pi->mps    = le16_to_cpu(rfc.max_pdu_size);
+               break;
+       case L2CAP_MODE_STREAMING:
+               pi->mps    = le16_to_cpu(rfc.max_pdu_size);
+       }
+}
+
+static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
+
+       if (rej->reason != 0x0000)
+               return 0;
+
+       if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
+                                       cmd->ident == conn->info_ident) {
+               del_timer(&conn->info_timer);
+
+               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+               conn->info_ident = 0;
+
+               l2cap_conn_start(conn);
+       }
+
+       return 0;
+}
+
+static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_chan_list *list = &conn->chan_list;
+       struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
+       struct l2cap_conn_rsp rsp;
+       struct sock *parent, *sk = NULL;
+       int result, status = L2CAP_CS_NO_INFO;
+
+       u16 dcid = 0, scid = __le16_to_cpu(req->scid);
+       __le16 psm = req->psm;
+
+       BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+
+       /* Check if we have socket listening on psm */
+       parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
+       if (!parent) {
+               result = L2CAP_CR_BAD_PSM;
+               goto sendresp;
+       }
+
+       bh_lock_sock(parent);
+
+       /* Check if the ACL is secure enough (if not SDP) */
+       if (psm != cpu_to_le16(0x0001) &&
+                               !hci_conn_check_link_mode(conn->hcon)) {
+               conn->disc_reason = 0x05;
+               result = L2CAP_CR_SEC_BLOCK;
+               goto response;
+       }
+
+       result = L2CAP_CR_NO_MEM;
+
+       /* Check for backlog size */
+       if (sk_acceptq_is_full(parent)) {
+               BT_DBG("backlog full %d", parent->sk_ack_backlog);
+               goto response;
+       }
+
+       sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+       if (!sk)
+               goto response;
+
+       write_lock_bh(&list->lock);
+
+       /* Check if we already have channel with that dcid */
+       if (__l2cap_get_chan_by_dcid(list, scid)) {
+               write_unlock_bh(&list->lock);
+               sock_set_flag(sk, SOCK_ZAPPED);
+               l2cap_sock_kill(sk);
+               goto response;
+       }
+
+       hci_conn_hold(conn->hcon);
+
+       l2cap_sock_init(sk, parent);
+       bacpy(&bt_sk(sk)->src, conn->src);
+       bacpy(&bt_sk(sk)->dst, conn->dst);
+       l2cap_pi(sk)->psm  = psm;
+       l2cap_pi(sk)->dcid = scid;
+
+       __l2cap_chan_add(conn, sk, parent);
+       dcid = l2cap_pi(sk)->scid;
+
+       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+       l2cap_pi(sk)->ident = cmd->ident;
+
+       if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
+               if (l2cap_check_security(sk)) {
+                       if (bt_sk(sk)->defer_setup) {
+                               sk->sk_state = BT_CONNECT2;
+                               result = L2CAP_CR_PEND;
+                               status = L2CAP_CS_AUTHOR_PEND;
+                               parent->sk_data_ready(parent, 0);
+                       } else {
+                               sk->sk_state = BT_CONFIG;
+                               result = L2CAP_CR_SUCCESS;
+                               status = L2CAP_CS_NO_INFO;
+                       }
+               } else {
+                       sk->sk_state = BT_CONNECT2;
+                       result = L2CAP_CR_PEND;
+                       status = L2CAP_CS_AUTHEN_PEND;
+               }
+       } else {
+               sk->sk_state = BT_CONNECT2;
+               result = L2CAP_CR_PEND;
+               status = L2CAP_CS_NO_INFO;
+       }
+
+       write_unlock_bh(&list->lock);
+
+response:
+       bh_unlock_sock(parent);
+
+sendresp:
+       rsp.scid   = cpu_to_le16(scid);
+       rsp.dcid   = cpu_to_le16(dcid);
+       rsp.result = cpu_to_le16(result);
+       rsp.status = cpu_to_le16(status);
+       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+       if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
+               struct l2cap_info_req info;
+               info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+
+               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
+               conn->info_ident = l2cap_get_ident(conn);
+
+               mod_timer(&conn->info_timer, jiffies +
+                                       msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
+
+               l2cap_send_cmd(conn, conn->info_ident,
+                                       L2CAP_INFO_REQ, sizeof(info), &info);
+       }
+
+       if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
+                               result == L2CAP_CR_SUCCESS) {
+               u8 buf[128];
+               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+                                       l2cap_build_conf_req(sk, buf), buf);
+               l2cap_pi(sk)->num_conf_req++;
+       }
+
+       return 0;
+}
+
+static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
+       u16 scid, dcid, result, status;
+       struct sock *sk;
+       u8 req[128];
+
+       scid   = __le16_to_cpu(rsp->scid);
+       dcid   = __le16_to_cpu(rsp->dcid);
+       result = __le16_to_cpu(rsp->result);
+       status = __le16_to_cpu(rsp->status);
+
+       BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
+
+       if (scid) {
+               sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+               if (!sk)
+                       return -EFAULT;
+       } else {
+               sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
+               if (!sk)
+                       return -EFAULT;
+       }
+
+       switch (result) {
+       case L2CAP_CR_SUCCESS:
+               sk->sk_state = BT_CONFIG;
+               l2cap_pi(sk)->ident = 0;
+               l2cap_pi(sk)->dcid = dcid;
+               l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
+
+               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
+                       break;
+
+               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+
+               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+                                       l2cap_build_conf_req(sk, req), req);
+               l2cap_pi(sk)->num_conf_req++;
+               break;
+
+       case L2CAP_CR_PEND:
+               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+               break;
+
+       default:
+               /* don't delete l2cap channel if sk is owned by user */
+               if (sock_owned_by_user(sk)) {
+                       sk->sk_state = BT_DISCONN;
+                       l2cap_sock_clear_timer(sk);
+                       l2cap_sock_set_timer(sk, HZ / 5);
+                       break;
+               }
+
+               l2cap_chan_del(sk, ECONNREFUSED);
+               break;
+       }
+
+       bh_unlock_sock(sk);
+       return 0;
+}
+
+static inline void set_default_fcs(struct l2cap_pinfo *pi)
+{
+       /* FCS is enabled only in ERTM or streaming mode, if one or both
+        * sides request it.
+        */
+       if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
+               pi->fcs = L2CAP_FCS_NONE;
+       else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
+               pi->fcs = L2CAP_FCS_CRC16;
+}
+
+static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+{
+       struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
+       u16 dcid, flags;
+       u8 rsp[64];
+       struct sock *sk;
+       int len;
+
+       dcid  = __le16_to_cpu(req->dcid);
+       flags = __le16_to_cpu(req->flags);
+
+       BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
+
+       sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
+       if (!sk)
+               return -ENOENT;
+
+       if (sk->sk_state != BT_CONFIG) {
+               struct l2cap_cmd_rej rej;
+
+               rej.reason = cpu_to_le16(0x0002);
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+                               sizeof(rej), &rej);
+               goto unlock;
+       }
+
+       /* Reject if config buffer is too small. */
+       len = cmd_len - sizeof(*req);
+       if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+                               l2cap_build_conf_rsp(sk, rsp,
+                                       L2CAP_CONF_REJECT, flags), rsp);
+               goto unlock;
+       }
+
+       /* Store config. */
+       memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
+       l2cap_pi(sk)->conf_len += len;
+
+       if (flags & 0x0001) {
+               /* Incomplete config. Send empty response. */
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+                               l2cap_build_conf_rsp(sk, rsp,
+                                       L2CAP_CONF_SUCCESS, 0x0001), rsp);
+               goto unlock;
+       }
+
+       /* Complete config. */
+       len = l2cap_parse_conf_req(sk, rsp);
+       if (len < 0) {
+               l2cap_send_disconn_req(conn, sk, ECONNRESET);
+               goto unlock;
+       }
+
+       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
+       l2cap_pi(sk)->num_conf_rsp++;
+
+       /* Reset config buffer. */
+       l2cap_pi(sk)->conf_len = 0;
+
+       if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
+               goto unlock;
+
+       if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
+               set_default_fcs(l2cap_pi(sk));
+
+               sk->sk_state = BT_CONNECTED;
+
+               l2cap_pi(sk)->next_tx_seq = 0;
+               l2cap_pi(sk)->expected_tx_seq = 0;
+               __skb_queue_head_init(TX_QUEUE(sk));
+               if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+                       l2cap_ertm_init(sk);
+
+               l2cap_chan_ready(sk);
+               goto unlock;
+       }
+
+       if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
+               u8 buf[64];
+               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+                                       l2cap_build_conf_req(sk, buf), buf);
+               l2cap_pi(sk)->num_conf_req++;
+       }
+
+unlock:
+       bh_unlock_sock(sk);
+       return 0;
+}
+
+static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
+       u16 scid, flags, result;
+       struct sock *sk;
+       int len = cmd->len - sizeof(*rsp);
+
+       scid   = __le16_to_cpu(rsp->scid);
+       flags  = __le16_to_cpu(rsp->flags);
+       result = __le16_to_cpu(rsp->result);
+
+       BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
+                       scid, flags, result);
+
+       sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+       if (!sk)
+               return 0;
+
+       switch (result) {
+       case L2CAP_CONF_SUCCESS:
+               l2cap_conf_rfc_get(sk, rsp->data, len);
+               break;
+
+       case L2CAP_CONF_UNACCEPT:
+               if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
+                       char req[64];
+
+                       if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
+                               l2cap_send_disconn_req(conn, sk, ECONNRESET);
+                               goto done;
+                       }
+
+                       /* throw out any old stored conf requests */
+                       result = L2CAP_CONF_SUCCESS;
+                       len = l2cap_parse_conf_rsp(sk, rsp->data,
+                                                       len, req, &result);
+                       if (len < 0) {
+                               l2cap_send_disconn_req(conn, sk, ECONNRESET);
+                               goto done;
+                       }
+
+                       l2cap_send_cmd(conn, l2cap_get_ident(conn),
+                                               L2CAP_CONF_REQ, len, req);
+                       l2cap_pi(sk)->num_conf_req++;
+                       if (result != L2CAP_CONF_SUCCESS)
+                               goto done;
+                       break;
+               }
+
+       default:
+               sk->sk_err = ECONNRESET;
+               l2cap_sock_set_timer(sk, HZ * 5);
+               l2cap_send_disconn_req(conn, sk, ECONNRESET);
+               goto done;
+       }
+
+       if (flags & 0x01)
+               goto done;
+
+       l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
+
+       if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
+               set_default_fcs(l2cap_pi(sk));
+
+               sk->sk_state = BT_CONNECTED;
+               l2cap_pi(sk)->next_tx_seq = 0;
+               l2cap_pi(sk)->expected_tx_seq = 0;
+               __skb_queue_head_init(TX_QUEUE(sk));
+               if (l2cap_pi(sk)->mode ==  L2CAP_MODE_ERTM)
+                       l2cap_ertm_init(sk);
+
+               l2cap_chan_ready(sk);
+       }
+
+done:
+       bh_unlock_sock(sk);
+       return 0;
+}
+
+static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
+       struct l2cap_disconn_rsp rsp;
+       u16 dcid, scid;
+       struct sock *sk;
+
+       scid = __le16_to_cpu(req->scid);
+       dcid = __le16_to_cpu(req->dcid);
+
+       BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
+
+       sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
+       if (!sk)
+               return 0;
+
+       rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+       rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
+
+       sk->sk_shutdown = SHUTDOWN_MASK;
+
+       /* don't delete l2cap channel if sk is owned by user */
+       if (sock_owned_by_user(sk)) {
+               sk->sk_state = BT_DISCONN;
+               l2cap_sock_clear_timer(sk);
+               l2cap_sock_set_timer(sk, HZ / 5);
+               bh_unlock_sock(sk);
+               return 0;
+       }
+
+       l2cap_chan_del(sk, ECONNRESET);
+       bh_unlock_sock(sk);
+
+       l2cap_sock_kill(sk);
+       return 0;
+}
+
+static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
+       u16 dcid, scid;
+       struct sock *sk;
+
+       scid = __le16_to_cpu(rsp->scid);
+       dcid = __le16_to_cpu(rsp->dcid);
+
+       BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
+
+       sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+       if (!sk)
+               return 0;
+
+       /* don't delete l2cap channel if sk is owned by user */
+       if (sock_owned_by_user(sk)) {
+               sk->sk_state = BT_DISCONN;
+               l2cap_sock_clear_timer(sk);
+               l2cap_sock_set_timer(sk, HZ / 5);
+               bh_unlock_sock(sk);
+               return 0;
+       }
+
+       l2cap_chan_del(sk, 0);
+       bh_unlock_sock(sk);
+
+       l2cap_sock_kill(sk);
+       return 0;
+}
+
+static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_info_req *req = (struct l2cap_info_req *) data;
+       u16 type;
+
+       type = __le16_to_cpu(req->type);
+
+       BT_DBG("type 0x%4.4x", type);
+
+       if (type == L2CAP_IT_FEAT_MASK) {
+               u8 buf[8];
+               u32 feat_mask = l2cap_feat_mask;
+               struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+               rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+               rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+               if (!disable_ertm)
+                       feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
+                                                        | L2CAP_FEAT_FCS;
+               put_unaligned_le32(feat_mask, rsp->data);
+               l2cap_send_cmd(conn, cmd->ident,
+                                       L2CAP_INFO_RSP, sizeof(buf), buf);
+       } else if (type == L2CAP_IT_FIXED_CHAN) {
+               u8 buf[12];
+               struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+               rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+               rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+               memcpy(buf + 4, l2cap_fixed_chan, 8);
+               l2cap_send_cmd(conn, cmd->ident,
+                                       L2CAP_INFO_RSP, sizeof(buf), buf);
+       } else {
+               struct l2cap_info_rsp rsp;
+               rsp.type   = cpu_to_le16(type);
+               rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
+               l2cap_send_cmd(conn, cmd->ident,
+                                       L2CAP_INFO_RSP, sizeof(rsp), &rsp);
+       }
+
+       return 0;
+}
+
+static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+       u16 type, result;
+
+       type   = __le16_to_cpu(rsp->type);
+       result = __le16_to_cpu(rsp->result);
+
+       BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
+
+       del_timer(&conn->info_timer);
+
+       if (result != L2CAP_IR_SUCCESS) {
+               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+               conn->info_ident = 0;
+
+               l2cap_conn_start(conn);
+
+               return 0;
+       }
+
+       if (type == L2CAP_IT_FEAT_MASK) {
+               conn->feat_mask = get_unaligned_le32(rsp->data);
+
+               if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
+                       struct l2cap_info_req req;
+                       req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+
+                       conn->info_ident = l2cap_get_ident(conn);
+
+                       l2cap_send_cmd(conn, conn->info_ident,
+                                       L2CAP_INFO_REQ, sizeof(req), &req);
+               } else {
+                       conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+                       conn->info_ident = 0;
+
+                       l2cap_conn_start(conn);
+               }
+       } else if (type == L2CAP_IT_FIXED_CHAN) {
+               conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+               conn->info_ident = 0;
+
+               l2cap_conn_start(conn);
+       }
+
+       return 0;
+}
+
+static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
+                                                       u16 to_multiplier)
+{
+       u16 max_latency;
+
+       if (min > max || min < 6 || max > 3200)
+               return -EINVAL;
+
+       if (to_multiplier < 10 || to_multiplier > 3200)
+               return -EINVAL;
+
+       if (max >= to_multiplier * 8)
+               return -EINVAL;
+
+       max_latency = (to_multiplier * 8 / max) - 1;
+       if (latency > 499 || latency > max_latency)
+               return -EINVAL;
+
+       return 0;
+}
+
+static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+                                       struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       struct hci_conn *hcon = conn->hcon;
+       struct l2cap_conn_param_update_req *req;
+       struct l2cap_conn_param_update_rsp rsp;
+       u16 min, max, latency, to_multiplier, cmd_len;
+       int err;
+
+       if (!(hcon->link_mode & HCI_LM_MASTER))
+               return -EINVAL;
+
+       cmd_len = __le16_to_cpu(cmd->len);
+       if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
+               return -EPROTO;
+
+       req = (struct l2cap_conn_param_update_req *) data;
+       min             = __le16_to_cpu(req->min);
+       max             = __le16_to_cpu(req->max);
+       latency         = __le16_to_cpu(req->latency);
+       to_multiplier   = __le16_to_cpu(req->to_multiplier);
+
+       BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+                                               min, max, latency, to_multiplier);
+
+       memset(&rsp, 0, sizeof(rsp));
+
+       err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+       if (err)
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+       else
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+
+       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
+                                                       sizeof(rsp), &rsp);
+
+       if (!err)
+               hci_le_conn_update(hcon, min, max, latency, to_multiplier);
+
+       return 0;
+}
+
+static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+                       struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+{
+       int err = 0;
+
+       switch (cmd->code) {
+       case L2CAP_COMMAND_REJ:
+               l2cap_command_rej(conn, cmd, data);
+               break;
+
+       case L2CAP_CONN_REQ:
+               err = l2cap_connect_req(conn, cmd, data);
+               break;
+
+       case L2CAP_CONN_RSP:
+               err = l2cap_connect_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_CONF_REQ:
+               err = l2cap_config_req(conn, cmd, cmd_len, data);
+               break;
+
+       case L2CAP_CONF_RSP:
+               err = l2cap_config_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_DISCONN_REQ:
+               err = l2cap_disconnect_req(conn, cmd, data);
+               break;
+
+       case L2CAP_DISCONN_RSP:
+               err = l2cap_disconnect_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_ECHO_REQ:
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
+               break;
+
+       case L2CAP_ECHO_RSP:
+               break;
+
+       case L2CAP_INFO_REQ:
+               err = l2cap_information_req(conn, cmd, data);
+               break;
+
+       case L2CAP_INFO_RSP:
+               err = l2cap_information_rsp(conn, cmd, data);
+               break;
+
+       default:
+               BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
+                                       struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+       switch (cmd->code) {
+       case L2CAP_COMMAND_REJ:
+               return 0;
+
+       case L2CAP_CONN_PARAM_UPDATE_REQ:
+               return l2cap_conn_param_update_req(conn, cmd, data);
+
+       case L2CAP_CONN_PARAM_UPDATE_RSP:
+               return 0;
+
+       default:
+               BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
+               return -EINVAL;
+       }
+}
+
+static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+                                                       struct sk_buff *skb)
+{
+       u8 *data = skb->data;
+       int len = skb->len;
+       struct l2cap_cmd_hdr cmd;
+       int err;
+
+       l2cap_raw_recv(conn, skb);
+
+       while (len >= L2CAP_CMD_HDR_SIZE) {
+               u16 cmd_len;
+               memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
+               data += L2CAP_CMD_HDR_SIZE;
+               len  -= L2CAP_CMD_HDR_SIZE;
+
+               cmd_len = le16_to_cpu(cmd.len);
+
+               BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
+
+               if (cmd_len > len || !cmd.ident) {
+                       BT_DBG("corrupted command");
+                       break;
+               }
+
+               if (conn->hcon->type == LE_LINK)
+                       err = l2cap_le_sig_cmd(conn, &cmd, data);
+               else
+                       err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
+
+               if (err) {
+                       struct l2cap_cmd_rej rej;
+                       BT_DBG("error %d", err);
+
+                       /* FIXME: Map err to a valid reason */
+                       rej.reason = cpu_to_le16(0);
+                       l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+               }
+
+               data += cmd_len;
+               len  -= cmd_len;
+       }
+
+       kfree_skb(skb);
+}
+
+static int l2cap_check_fcs(struct l2cap_pinfo *pi,  struct sk_buff *skb)
+{
+       u16 our_fcs, rcv_fcs;
+       int hdr_size = L2CAP_HDR_SIZE + 2;
+
+       if (pi->fcs == L2CAP_FCS_CRC16) {
+               skb_trim(skb, skb->len - 2);
+               rcv_fcs = get_unaligned_le16(skb->data + skb->len);
+               our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
+
+               if (our_fcs != rcv_fcs)
+                       return -EBADMSG;
+       }
+       return 0;
+}
+
+static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u16 control = 0;
+
+       pi->frames_sent = 0;
+
+       control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+
+       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+               control |= L2CAP_SUPER_RCV_NOT_READY;
+               l2cap_send_sframe(pi, control);
+               pi->conn_state |= L2CAP_CONN_RNR_SENT;
+       }
+
+       if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+               l2cap_retransmit_frames(sk);
+
+       l2cap_ertm_send(sk);
+
+       if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+                       pi->frames_sent == 0) {
+               control |= L2CAP_SUPER_RCV_READY;
+               l2cap_send_sframe(pi, control);
+       }
+}
+
+static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
+{
+       struct sk_buff *next_skb;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int tx_seq_offset, next_tx_seq_offset;
+
+       bt_cb(skb)->tx_seq = tx_seq;
+       bt_cb(skb)->sar = sar;
+
+       next_skb = skb_peek(SREJ_QUEUE(sk));
+       if (!next_skb) {
+               __skb_queue_tail(SREJ_QUEUE(sk), skb);
+               return 0;
+       }
+
+       tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
+       if (tx_seq_offset < 0)
+               tx_seq_offset += 64;
+
+       do {
+               if (bt_cb(next_skb)->tx_seq == tx_seq)
+                       return -EINVAL;
+
+               next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
+                                               pi->buffer_seq) % 64;
+               if (next_tx_seq_offset < 0)
+                       next_tx_seq_offset += 64;
+
+               if (next_tx_seq_offset > tx_seq_offset) {
+                       __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
+                       return 0;
+               }
+
+               if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
+                       break;
+
+       } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
+
+       __skb_queue_tail(SREJ_QUEUE(sk), skb);
+
+       return 0;
+}
+
+static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *_skb;
+       int err;
+
+       switch (control & L2CAP_CTRL_SAR) {
+       case L2CAP_SDU_UNSEGMENTED:
+               if (pi->conn_state & L2CAP_CONN_SAR_SDU)
+                       goto drop;
+
+               err = sock_queue_rcv_skb(sk, skb);
+               if (!err)
+                       return err;
+
+               break;
+
+       case L2CAP_SDU_START:
+               if (pi->conn_state & L2CAP_CONN_SAR_SDU)
+                       goto drop;
+
+               pi->sdu_len = get_unaligned_le16(skb->data);
+
+               if (pi->sdu_len > pi->imtu)
+                       goto disconnect;
+
+               pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
+               if (!pi->sdu)
+                       return -ENOMEM;
+
+               /* pull sdu_len bytes only after alloc, because of Local Busy
+                * condition we have to be sure that this will be executed
+                * only once, i.e., when alloc does not fail */
+               skb_pull(skb, 2);
+
+               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+               pi->conn_state |= L2CAP_CONN_SAR_SDU;
+               pi->partial_sdu_len = skb->len;
+               break;
+
+       case L2CAP_SDU_CONTINUE:
+               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+                       goto disconnect;
+
+               if (!pi->sdu)
+                       goto disconnect;
+
+               pi->partial_sdu_len += skb->len;
+               if (pi->partial_sdu_len > pi->sdu_len)
+                       goto drop;
+
+               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+               break;
+
+       case L2CAP_SDU_END:
+               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+                       goto disconnect;
+
+               if (!pi->sdu)
+                       goto disconnect;
+
+               if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
+                       pi->partial_sdu_len += skb->len;
+
+                       if (pi->partial_sdu_len > pi->imtu)
+                               goto drop;
+
+                       if (pi->partial_sdu_len != pi->sdu_len)
+                               goto drop;
+
+                       memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+               }
+
+               _skb = skb_clone(pi->sdu, GFP_ATOMIC);
+               if (!_skb) {
+                       pi->conn_state |= L2CAP_CONN_SAR_RETRY;
+                       return -ENOMEM;
+               }
+
+               err = sock_queue_rcv_skb(sk, _skb);
+               if (err < 0) {
+                       kfree_skb(_skb);
+                       pi->conn_state |= L2CAP_CONN_SAR_RETRY;
+                       return err;
+               }
+
+               pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
+               pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
+
+               kfree_skb(pi->sdu);
+               break;
+       }
+
+       kfree_skb(skb);
+       return 0;
+
+drop:
+       kfree_skb(pi->sdu);
+       pi->sdu = NULL;
+
+disconnect:
+       l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+       kfree_skb(skb);
+       return 0;
+}
+
+static int l2cap_try_push_rx_skb(struct sock *sk)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb;
+       u16 control;
+       int err;
+
+       while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
+               control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+               err = l2cap_ertm_reassembly_sdu(sk, skb, control);
+               if (err < 0) {
+                       skb_queue_head(BUSY_QUEUE(sk), skb);
+                       return -EBUSY;
+               }
+
+               pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+       }
+
+       if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
+               goto done;
+
+       control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+       control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+       l2cap_send_sframe(pi, control);
+       l2cap_pi(sk)->retry_count = 1;
+
+       del_timer(&pi->retrans_timer);
+       __mod_monitor_timer();
+
+       l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
+
+done:
+       pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+       pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
+
+       BT_DBG("sk %p, Exit local busy", sk);
+
+       return 0;
+}
+
+static void l2cap_busy_work(struct work_struct *work)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       struct l2cap_pinfo *pi =
+               container_of(work, struct l2cap_pinfo, busy_work);
+       struct sock *sk = (struct sock *)pi;
+       int n_tries = 0, timeo = HZ/5, err;
+       struct sk_buff *skb;
+
+       lock_sock(sk);
+
+       add_wait_queue(sk_sleep(sk), &wait);
+       while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
+                       err = -EBUSY;
+                       l2cap_send_disconn_req(pi->conn, sk, EBUSY);
+                       break;
+               }
+
+               if (!timeo)
+                       timeo = HZ/5;
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
+
+               err = sock_error(sk);
+               if (err)
+                       break;
+
+               if (l2cap_try_push_rx_skb(sk) == 0)
+                       break;
+       }
+
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       release_sock(sk);
+}
+
+static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int sctrl, err;
+
+       if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+               bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
+               __skb_queue_tail(BUSY_QUEUE(sk), skb);
+               return l2cap_try_push_rx_skb(sk);
+
+
+       }
+
+       err = l2cap_ertm_reassembly_sdu(sk, skb, control);
+       if (err >= 0) {
+               pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+               return err;
+       }
+
+       /* Busy Condition */
+       BT_DBG("sk %p, Enter local busy", sk);
+
+       pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+       bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
+       __skb_queue_tail(BUSY_QUEUE(sk), skb);
+
+       sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+       sctrl |= L2CAP_SUPER_RCV_NOT_READY;
+       l2cap_send_sframe(pi, sctrl);
+
+       pi->conn_state |= L2CAP_CONN_RNR_SENT;
+
+       del_timer(&pi->ack_timer);
+
+       queue_work(_busy_wq, &pi->busy_work);
+
+       return err;
+}
+
+static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *_skb;
+       int err = -EINVAL;
+
+       /*
+        * TODO: We have to notify the userland if some data is lost with the
+        * Streaming Mode.
+        */
+
+       switch (control & L2CAP_CTRL_SAR) {
+       case L2CAP_SDU_UNSEGMENTED:
+               if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
+                       kfree_skb(pi->sdu);
+                       break;
+               }
+
+               err = sock_queue_rcv_skb(sk, skb);
+               if (!err)
+                       return 0;
+
+               break;
+
+       case L2CAP_SDU_START:
+               if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
+                       kfree_skb(pi->sdu);
+                       break;
+               }
+
+               pi->sdu_len = get_unaligned_le16(skb->data);
+               skb_pull(skb, 2);
+
+               if (pi->sdu_len > pi->imtu) {
+                       err = -EMSGSIZE;
+                       break;
+               }
+
+               pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
+               if (!pi->sdu) {
+                       err = -ENOMEM;
+                       break;
+               }
+
+               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+               pi->conn_state |= L2CAP_CONN_SAR_SDU;
+               pi->partial_sdu_len = skb->len;
+               err = 0;
+               break;
+
+       case L2CAP_SDU_CONTINUE:
+               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+                       break;
+
+               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+               pi->partial_sdu_len += skb->len;
+               if (pi->partial_sdu_len > pi->sdu_len)
+                       kfree_skb(pi->sdu);
+               else
+                       err = 0;
+
+               break;
+
+       case L2CAP_SDU_END:
+               if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+                       break;
+
+               memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+               pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
+               pi->partial_sdu_len += skb->len;
+
+               if (pi->partial_sdu_len > pi->imtu)
+                       goto drop;
+
+               if (pi->partial_sdu_len == pi->sdu_len) {
+                       _skb = skb_clone(pi->sdu, GFP_ATOMIC);
+                       err = sock_queue_rcv_skb(sk, _skb);
+                       if (err < 0)
+                               kfree_skb(_skb);
+               }
+               err = 0;
+
+drop:
+               kfree_skb(pi->sdu);
+               break;
+       }
+
+       kfree_skb(skb);
+       return err;
+}
+
+static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
+{
+       struct sk_buff *skb;
+       u16 control;
+
+       while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
+               if (bt_cb(skb)->tx_seq != tx_seq)
+                       break;
+
+               skb = skb_dequeue(SREJ_QUEUE(sk));
+               control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+               l2cap_ertm_reassembly_sdu(sk, skb, control);
+               l2cap_pi(sk)->buffer_seq_srej =
+                       (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
+               tx_seq = (tx_seq + 1) % 64;
+       }
+}
+
+static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct srej_list *l, *tmp;
+       u16 control;
+
+       list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
+               if (l->tx_seq == tx_seq) {
+                       list_del(&l->list);
+                       kfree(l);
+                       return;
+               }
+               control = L2CAP_SUPER_SELECT_REJECT;
+               control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+               l2cap_send_sframe(pi, control);
+               list_del(&l->list);
+               list_add_tail(&l->list, SREJ_LIST(sk));
+       }
+}
+
+static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct srej_list *new;
+       u16 control;
+
+       while (tx_seq != pi->expected_tx_seq) {
+               control = L2CAP_SUPER_SELECT_REJECT;
+               control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+               l2cap_send_sframe(pi, control);
+
+               new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+               new->tx_seq = pi->expected_tx_seq;
+               pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
+               list_add_tail(&new->list, SREJ_LIST(sk));
+       }
+       pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
+}
+
+static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u8 tx_seq = __get_txseq(rx_control);
+       u8 req_seq = __get_reqseq(rx_control);
+       u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+       int tx_seq_offset, expected_tx_seq_offset;
+       int num_to_ack = (pi->tx_win/6) + 1;
+       int err = 0;
+
+       BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
+                                                               rx_control);
+
+       if (L2CAP_CTRL_FINAL & rx_control &&
+                       l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
+               del_timer(&pi->monitor_timer);
+               if (pi->unacked_frames > 0)
+                       __mod_retrans_timer();
+               pi->conn_state &= ~L2CAP_CONN_WAIT_F;
+       }
+
+       pi->expected_ack_seq = req_seq;
+       l2cap_drop_acked_frames(sk);
+
+       if (tx_seq == pi->expected_tx_seq)
+               goto expected;
+
+       tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
+       if (tx_seq_offset < 0)
+               tx_seq_offset += 64;
+
+       /* invalid tx_seq */
+       if (tx_seq_offset >= pi->tx_win) {
+               l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+               goto drop;
+       }
+
+       if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
+               goto drop;
+
+       if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+               struct srej_list *first;
+
+               first = list_first_entry(SREJ_LIST(sk),
+                               struct srej_list, list);
+               if (tx_seq == first->tx_seq) {
+                       l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+                       l2cap_check_srej_gap(sk, tx_seq);
+
+                       list_del(&first->list);
+                       kfree(first);
+
+                       if (list_empty(SREJ_LIST(sk))) {
+                               pi->buffer_seq = pi->buffer_seq_srej;
+                               pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
+                               l2cap_send_ack(pi);
+                               BT_DBG("sk %p, Exit SREJ_SENT", sk);
+                       }
+               } else {
+                       struct srej_list *l;
+
+                       /* duplicated tx_seq */
+                       if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
+                               goto drop;
+
+                       list_for_each_entry(l, SREJ_LIST(sk), list) {
+                               if (l->tx_seq == tx_seq) {
+                                       l2cap_resend_srejframe(sk, tx_seq);
+                                       return 0;
+                               }
+                       }
+                       l2cap_send_srejframe(sk, tx_seq);
+               }
+       } else {
+               expected_tx_seq_offset =
+                       (pi->expected_tx_seq - pi->buffer_seq) % 64;
+               if (expected_tx_seq_offset < 0)
+                       expected_tx_seq_offset += 64;
+
+               /* duplicated tx_seq */
+               if (tx_seq_offset < expected_tx_seq_offset)
+                       goto drop;
+
+               pi->conn_state |= L2CAP_CONN_SREJ_SENT;
+
+               BT_DBG("sk %p, Enter SREJ", sk);
+
+               INIT_LIST_HEAD(SREJ_LIST(sk));
+               pi->buffer_seq_srej = pi->buffer_seq;
+
+               __skb_queue_head_init(SREJ_QUEUE(sk));
+               __skb_queue_head_init(BUSY_QUEUE(sk));
+               l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+
+               pi->conn_state |= L2CAP_CONN_SEND_PBIT;
+
+               l2cap_send_srejframe(sk, tx_seq);
+
+               del_timer(&pi->ack_timer);
+       }
+       return 0;
+
+expected:
+       pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
+
+       if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+               bt_cb(skb)->tx_seq = tx_seq;
+               bt_cb(skb)->sar = sar;
+               __skb_queue_tail(SREJ_QUEUE(sk), skb);
+               return 0;
+       }
+
+       err = l2cap_push_rx_skb(sk, skb, rx_control);
+       if (err < 0)
+               return 0;
+
+       if (rx_control & L2CAP_CTRL_FINAL) {
+               if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+                       pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+               else
+                       l2cap_retransmit_frames(sk);
+       }
+
+       __mod_ack_timer();
+
+       pi->num_acked = (pi->num_acked + 1) % num_to_ack;
+       if (pi->num_acked == num_to_ack - 1)
+               l2cap_send_ack(pi);
+
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
+                                               rx_control);
+
+       pi->expected_ack_seq = __get_reqseq(rx_control);
+       l2cap_drop_acked_frames(sk);
+
+       if (rx_control & L2CAP_CTRL_POLL) {
+               pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+               if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+                       if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+                                       (pi->unacked_frames > 0))
+                               __mod_retrans_timer();
+
+                       pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+                       l2cap_send_srejtail(sk);
+               } else {
+                       l2cap_send_i_or_rr_or_rnr(sk);
+               }
+
+       } else if (rx_control & L2CAP_CTRL_FINAL) {
+               pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+               if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+                       pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+               else
+                       l2cap_retransmit_frames(sk);
+
+       } else {
+               if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+                               (pi->unacked_frames > 0))
+                       __mod_retrans_timer();
+
+               pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+               if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
+                       l2cap_send_ack(pi);
+               else
+                       l2cap_ertm_send(sk);
+       }
+}
+
+static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u8 tx_seq = __get_reqseq(rx_control);
+
+       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
+
+       pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+       pi->expected_ack_seq = tx_seq;
+       l2cap_drop_acked_frames(sk);
+
+       if (rx_control & L2CAP_CTRL_FINAL) {
+               if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+                       pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+               else
+                       l2cap_retransmit_frames(sk);
+       } else {
+               l2cap_retransmit_frames(sk);
+
+               if (pi->conn_state & L2CAP_CONN_WAIT_F)
+                       pi->conn_state |= L2CAP_CONN_REJ_ACT;
+       }
+}
+static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u8 tx_seq = __get_reqseq(rx_control);
+
+       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
+
+       pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+       if (rx_control & L2CAP_CTRL_POLL) {
+               pi->expected_ack_seq = tx_seq;
+               l2cap_drop_acked_frames(sk);
+
+               pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+               l2cap_retransmit_one_frame(sk, tx_seq);
+
+               l2cap_ertm_send(sk);
+
+               if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+                       pi->srej_save_reqseq = tx_seq;
+                       pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+               }
+       } else if (rx_control & L2CAP_CTRL_FINAL) {
+               if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
+                               pi->srej_save_reqseq == tx_seq)
+                       pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+               else
+                       l2cap_retransmit_one_frame(sk, tx_seq);
+       } else {
+               l2cap_retransmit_one_frame(sk, tx_seq);
+               if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+                       pi->srej_save_reqseq = tx_seq;
+                       pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+               }
+       }
+}
+
+static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u8 tx_seq = __get_reqseq(rx_control);
+
+       BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
+
+       pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+       pi->expected_ack_seq = tx_seq;
+       l2cap_drop_acked_frames(sk);
+
+       if (rx_control & L2CAP_CTRL_POLL)
+               pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+       if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
+               del_timer(&pi->retrans_timer);
+               if (rx_control & L2CAP_CTRL_POLL)
+                       l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
+               return;
+       }
+
+       if (rx_control & L2CAP_CTRL_POLL)
+               l2cap_send_srejtail(sk);
+       else
+               l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
+}
+
+static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+{
+       BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+
+       if (L2CAP_CTRL_FINAL & rx_control &&
+                       l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
+               del_timer(&l2cap_pi(sk)->monitor_timer);
+               if (l2cap_pi(sk)->unacked_frames > 0)
+                       __mod_retrans_timer();
+               l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
+       }
+
+       switch (rx_control & L2CAP_CTRL_SUPERVISE) {
+       case L2CAP_SUPER_RCV_READY:
+               l2cap_data_channel_rrframe(sk, rx_control);
+               break;
+
+       case L2CAP_SUPER_REJECT:
+               l2cap_data_channel_rejframe(sk, rx_control);
+               break;
+
+       case L2CAP_SUPER_SELECT_REJECT:
+               l2cap_data_channel_srejframe(sk, rx_control);
+               break;
+
+       case L2CAP_SUPER_RCV_NOT_READY:
+               l2cap_data_channel_rnrframe(sk, rx_control);
+               break;
+       }
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       u16 control;
+       u8 req_seq;
+       int len, next_tx_seq_offset, req_seq_offset;
+
+       control = get_unaligned_le16(skb->data);
+       skb_pull(skb, 2);
+       len = skb->len;
+
+       /*
+        * We can just drop the corrupted I-frame here.
+        * Receiver will miss it and start proper recovery
+        * procedures and ask retransmission.
+        */
+       if (l2cap_check_fcs(pi, skb))
+               goto drop;
+
+       if (__is_sar_start(control) && __is_iframe(control))
+               len -= 2;
+
+       if (pi->fcs == L2CAP_FCS_CRC16)
+               len -= 2;
+
+       if (len > pi->mps) {
+               l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+               goto drop;
+       }
+
+       req_seq = __get_reqseq(control);
+       req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
+       if (req_seq_offset < 0)
+               req_seq_offset += 64;
+
+       next_tx_seq_offset =
+               (pi->next_tx_seq - pi->expected_ack_seq) % 64;
+       if (next_tx_seq_offset < 0)
+               next_tx_seq_offset += 64;
+
+       /* check for invalid req-seq */
+       if (req_seq_offset > next_tx_seq_offset) {
+               l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+               goto drop;
+       }
+
+       if (__is_iframe(control)) {
+               if (len < 0) {
+                       l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+                       goto drop;
+               }
+
+               l2cap_data_channel_iframe(sk, control, skb);
+       } else {
+               if (len != 0) {
+                       BT_ERR("%d", len);
+                       l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+                       goto drop;
+               }
+
+               l2cap_data_channel_sframe(sk, control, skb);
+       }
+
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
+{
+       struct sock *sk;
+       struct l2cap_pinfo *pi;
+       u16 control;
+       u8 tx_seq;
+       int len;
+
+       sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
+       if (!sk) {
+               BT_DBG("unknown cid 0x%4.4x", cid);
+               goto drop;
+       }
+
+       pi = l2cap_pi(sk);
+
+       BT_DBG("sk %p, len %d", sk, skb->len);
+
+       if (sk->sk_state != BT_CONNECTED)
+               goto drop;
+
+       switch (pi->mode) {
+       case L2CAP_MODE_BASIC:
+               /* If socket recv buffers overflows we drop data here
+                * which is *bad* because L2CAP has to be reliable.
+                * But we don't have any other choice. L2CAP doesn't
+                * provide flow control mechanism. */
+
+               if (pi->imtu < skb->len)
+                       goto drop;
+
+               if (!sock_queue_rcv_skb(sk, skb))
+                       goto done;
+               break;
+
+       case L2CAP_MODE_ERTM:
+               if (!sock_owned_by_user(sk)) {
+                       l2cap_ertm_data_rcv(sk, skb);
+               } else {
+                       if (sk_add_backlog(sk, skb))
+                               goto drop;
+               }
+
+               goto done;
+
+       case L2CAP_MODE_STREAMING:
+               control = get_unaligned_le16(skb->data);
+               skb_pull(skb, 2);
+               len = skb->len;
+
+               if (l2cap_check_fcs(pi, skb))
+                       goto drop;
+
+               if (__is_sar_start(control))
+                       len -= 2;
+
+               if (pi->fcs == L2CAP_FCS_CRC16)
+                       len -= 2;
+
+               if (len > pi->mps || len < 0 || __is_sframe(control))
+                       goto drop;
+
+               tx_seq = __get_txseq(control);
+
+               if (pi->expected_tx_seq == tx_seq)
+                       pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
+               else
+                       pi->expected_tx_seq = (tx_seq + 1) % 64;
+
+               l2cap_streaming_reassembly_sdu(sk, skb, control);
+
+               goto done;
+
+       default:
+               BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
+               break;
+       }
+
+drop:
+       kfree_skb(skb);
+
+done:
+       if (sk)
+               bh_unlock_sock(sk);
+
+       return 0;
+}
+
+static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
+{
+       struct sock *sk;
+
+       sk = l2cap_get_sock_by_psm(0, psm, conn->src);
+       if (!sk)
+               goto drop;
+
+       bh_lock_sock(sk);
+
+       BT_DBG("sk %p, len %d", sk, skb->len);
+
+       if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
+               goto drop;
+
+       if (l2cap_pi(sk)->imtu < skb->len)
+               goto drop;
+
+       if (!sock_queue_rcv_skb(sk, skb))
+               goto done;
+
+drop:
+       kfree_skb(skb);
+
+done:
+       if (sk)
+               bh_unlock_sock(sk);
+       return 0;
+}
+
+static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+       struct l2cap_hdr *lh = (void *) skb->data;
+       u16 cid, len;
+       __le16 psm;
+
+       skb_pull(skb, L2CAP_HDR_SIZE);
+       cid = __le16_to_cpu(lh->cid);
+       len = __le16_to_cpu(lh->len);
+
+       if (len != skb->len) {
+               kfree_skb(skb);
+               return;
+       }
+
+       BT_DBG("len %d, cid 0x%4.4x", len, cid);
+
+       switch (cid) {
+       case L2CAP_CID_LE_SIGNALING:
+       case L2CAP_CID_SIGNALING:
+               l2cap_sig_channel(conn, skb);
+               break;
+
+       case L2CAP_CID_CONN_LESS:
+               psm = get_unaligned_le16(skb->data);
+               skb_pull(skb, 2);
+               l2cap_conless_channel(conn, psm, skb);
+               break;
+
+       default:
+               l2cap_data_channel(conn, cid, skb);
+               break;
+       }
+}
+
+/* ---- L2CAP interface with lower layer (HCI) ---- */
+
+static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+       int exact = 0, lm1 = 0, lm2 = 0;
+       register struct sock *sk;
+       struct hlist_node *node;
+
+       if (type != ACL_LINK)
+               return -EINVAL;
+
+       BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+
+       /* Find listening sockets and check their link_mode */
+       read_lock(&l2cap_sk_list.lock);
+       sk_for_each(sk, node, &l2cap_sk_list.head) {
+               if (sk->sk_state != BT_LISTEN)
+                       continue;
+
+               if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
+                       lm1 |= HCI_LM_ACCEPT;
+                       if (l2cap_pi(sk)->role_switch)
+                               lm1 |= HCI_LM_MASTER;
+                       exact++;
+               } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+                       lm2 |= HCI_LM_ACCEPT;
+                       if (l2cap_pi(sk)->role_switch)
+                               lm2 |= HCI_LM_MASTER;
+               }
+       }
+       read_unlock(&l2cap_sk_list.lock);
+
+       return exact ? lm1 : lm2;
+}
+
+static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+{
+       struct l2cap_conn *conn;
+
+       BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
+
+       if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
+               return -EINVAL;
+
+       if (!status) {
+               conn = l2cap_conn_add(hcon, status);
+               if (conn)
+                       l2cap_conn_ready(conn);
+       } else
+               l2cap_conn_del(hcon, bt_err(status));
+
+       return 0;
+}
+
+static int l2cap_disconn_ind(struct hci_conn *hcon)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+
+       BT_DBG("hcon %p", hcon);
+
+       if (hcon->type != ACL_LINK || !conn)
+               return 0x13;
+
+       return conn->disc_reason;
+}
+
+static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+{
+       BT_DBG("hcon %p reason %d", hcon, reason);
+
+       if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
+               return -EINVAL;
+
+       l2cap_conn_del(hcon, bt_err(reason));
+
+       return 0;
+}
+
+static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
+{
+       if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
+               return;
+
+       if (encrypt == 0x00) {
+               if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
+                       l2cap_sock_clear_timer(sk);
+                       l2cap_sock_set_timer(sk, HZ * 5);
+               } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
+                       __l2cap_sock_close(sk, ECONNREFUSED);
+       } else {
+               if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
+                       l2cap_sock_clear_timer(sk);
+       }
+}
+
+static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+{
+       struct l2cap_chan_list *l;
+       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct sock *sk;
+
+       if (!conn)
+               return 0;
+
+       l = &conn->chan_list;
+
+       BT_DBG("conn %p", conn);
+
+       read_lock(&l->lock);
+
+       for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+               bh_lock_sock(sk);
+
+               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
+                       bh_unlock_sock(sk);
+                       continue;
+               }
+
+               if (!status && (sk->sk_state == BT_CONNECTED ||
+                                               sk->sk_state == BT_CONFIG)) {
+                       l2cap_check_encryption(sk, encrypt);
+                       bh_unlock_sock(sk);
+                       continue;
+               }
+
+               if (sk->sk_state == BT_CONNECT) {
+                       if (!status) {
+                               struct l2cap_conn_req req;
+                               req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+                               req.psm  = l2cap_pi(sk)->psm;
+
+                               l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+                               l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
+
+                               l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_REQ, sizeof(req), &req);
+                       } else {
+                               l2cap_sock_clear_timer(sk);
+                               l2cap_sock_set_timer(sk, HZ / 10);
+                       }
+               } else if (sk->sk_state == BT_CONNECT2) {
+                       struct l2cap_conn_rsp rsp;
+                       __u16 result;
+
+                       if (!status) {
+                               sk->sk_state = BT_CONFIG;
+                               result = L2CAP_CR_SUCCESS;
+                       } else {
+                               sk->sk_state = BT_DISCONN;
+                               l2cap_sock_set_timer(sk, HZ / 10);
+                               result = L2CAP_CR_SEC_BLOCK;
+                       }
+
+                       rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+                       rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+                       rsp.result = cpu_to_le16(result);
+                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+               }
+
+               bh_unlock_sock(sk);
+       }
+
+       read_unlock(&l->lock);
+
+       return 0;
+}
+
+static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+
+       if (!conn)
+               conn = l2cap_conn_add(hcon, 0);
+
+       if (!conn)
+               goto drop;
+
+       BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
+
+       if (!(flags & ACL_CONT)) {
+               struct l2cap_hdr *hdr;
+               struct sock *sk;
+               u16 cid;
+               int len;
+
+               if (conn->rx_len) {
+                       BT_ERR("Unexpected start frame (len %d)", skb->len);
+                       kfree_skb(conn->rx_skb);
+                       conn->rx_skb = NULL;
+                       conn->rx_len = 0;
+                       l2cap_conn_unreliable(conn, ECOMM);
+               }
+
+               /* Start fragment always begin with Basic L2CAP header */
+               if (skb->len < L2CAP_HDR_SIZE) {
+                       BT_ERR("Frame is too short (len %d)", skb->len);
+                       l2cap_conn_unreliable(conn, ECOMM);
+                       goto drop;
+               }
+
+               hdr = (struct l2cap_hdr *) skb->data;
+               len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
+               cid = __le16_to_cpu(hdr->cid);
+
+               if (len == skb->len) {
+                       /* Complete frame received */
+                       l2cap_recv_frame(conn, skb);
+                       return 0;
+               }
+
+               BT_DBG("Start: total len %d, frag len %d", len, skb->len);
+
+               if (skb->len > len) {
+                       BT_ERR("Frame is too long (len %d, expected len %d)",
+                               skb->len, len);
+                       l2cap_conn_unreliable(conn, ECOMM);
+                       goto drop;
+               }
+
+               sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
+
+               if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
+                       BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
+                                       len, l2cap_pi(sk)->imtu);
+                       bh_unlock_sock(sk);
+                       l2cap_conn_unreliable(conn, ECOMM);
+                       goto drop;
+               }
+
+               if (sk)
+                       bh_unlock_sock(sk);
+
+               /* Allocate skb for the complete frame (with header) */
+               conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
+               if (!conn->rx_skb)
+                       goto drop;
+
+               skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+                                                               skb->len);
+               conn->rx_len = len - skb->len;
+       } else {
+               BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
+
+               if (!conn->rx_len) {
+                       BT_ERR("Unexpected continuation frame (len %d)", skb->len);
+                       l2cap_conn_unreliable(conn, ECOMM);
+                       goto drop;
+               }
+
+               if (skb->len > conn->rx_len) {
+                       BT_ERR("Fragment is too long (len %d, expected %d)",
+                                       skb->len, conn->rx_len);
+                       kfree_skb(conn->rx_skb);
+                       conn->rx_skb = NULL;
+                       conn->rx_len = 0;
+                       l2cap_conn_unreliable(conn, ECOMM);
+                       goto drop;
+               }
+
+               skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+                                                               skb->len);
+               conn->rx_len -= skb->len;
+
+               if (!conn->rx_len) {
+                       /* Complete frame received */
+                       l2cap_recv_frame(conn, conn->rx_skb);
+                       conn->rx_skb = NULL;
+               }
+       }
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static int l2cap_debugfs_show(struct seq_file *f, void *p)
+{
+       struct sock *sk;
+       struct hlist_node *node;
+
+       read_lock_bh(&l2cap_sk_list.lock);
+
+       sk_for_each(sk, node, &l2cap_sk_list.head) {
+               struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+               seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
+                                       batostr(&bt_sk(sk)->src),
+                                       batostr(&bt_sk(sk)->dst),
+                                       sk->sk_state, __le16_to_cpu(pi->psm),
+                                       pi->scid, pi->dcid,
+                                       pi->imtu, pi->omtu, pi->sec_level,
+                                       pi->mode);
+       }
+
+       read_unlock_bh(&l2cap_sk_list.lock);
+
+       return 0;
+}
+
+static int l2cap_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, l2cap_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations l2cap_debugfs_fops = {
+       .open           = l2cap_debugfs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static struct dentry *l2cap_debugfs;
+
+static struct hci_proto l2cap_hci_proto = {
+       .name           = "L2CAP",
+       .id             = HCI_PROTO_L2CAP,
+       .connect_ind    = l2cap_connect_ind,
+       .connect_cfm    = l2cap_connect_cfm,
+       .disconn_ind    = l2cap_disconn_ind,
+       .disconn_cfm    = l2cap_disconn_cfm,
+       .security_cfm   = l2cap_security_cfm,
+       .recv_acldata   = l2cap_recv_acldata
+};
+
+int __init l2cap_init(void)
+{
+       int err;
+
+       err = l2cap_init_sockets();
+       if (err < 0)
+               return err;
+
+       _busy_wq = create_singlethread_workqueue("l2cap");
+       if (!_busy_wq) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       err = hci_register_proto(&l2cap_hci_proto);
+       if (err < 0) {
+               BT_ERR("L2CAP protocol registration failed");
+               bt_sock_unregister(BTPROTO_L2CAP);
+               goto error;
+       }
+
+       if (bt_debugfs) {
+               l2cap_debugfs = debugfs_create_file("l2cap", 0444,
+                                       bt_debugfs, NULL, &l2cap_debugfs_fops);
+               if (!l2cap_debugfs)
+                       BT_ERR("Failed to create L2CAP debug file");
+       }
+
+       BT_INFO("L2CAP socket layer initialized");
+
+       return 0;
+
+error:
+       destroy_workqueue(_busy_wq);
+       l2cap_cleanup_sockets();
+       return err;
+}
+
+void l2cap_exit(void)
+{
+       debugfs_remove(l2cap_debugfs);
+
+       flush_workqueue(_busy_wq);
+       destroy_workqueue(_busy_wq);
+
+       if (hci_unregister_proto(&l2cap_hci_proto) < 0)
+               BT_ERR("L2CAP protocol unregistration failed");
+
+       l2cap_cleanup_sockets();
+}
+
+module_param(disable_ertm, bool, 0644);
+MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644 (file)
index 0000000..fc85e7a
--- /dev/null
@@ -0,0 +1,1156 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2000-2001 Qualcomm Incorporated
+   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+   Copyright (C) 2010 Google Inc.
+
+   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP sockets. */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+/* ---- L2CAP timers ---- */
+static void l2cap_sock_timeout(unsigned long arg)
+{
+       struct sock *sk = (struct sock *) arg;
+       int reason;
+
+       BT_DBG("sock %p state %d", sk, sk->sk_state);
+
+       bh_lock_sock(sk);
+
+       if (sock_owned_by_user(sk)) {
+               /* sk is owned by user. Try again later */
+               l2cap_sock_set_timer(sk, HZ / 5);
+               bh_unlock_sock(sk);
+               sock_put(sk);
+               return;
+       }
+
+       if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
+               reason = ECONNREFUSED;
+       else if (sk->sk_state == BT_CONNECT &&
+                               l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+               reason = ECONNREFUSED;
+       else
+               reason = ETIMEDOUT;
+
+       __l2cap_sock_close(sk, reason);
+
+       bh_unlock_sock(sk);
+
+       l2cap_sock_kill(sk);
+       sock_put(sk);
+}
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+       BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+void l2cap_sock_clear_timer(struct sock *sk)
+{
+       BT_DBG("sock %p state %d", sk, sk->sk_state);
+       sk_stop_timer(sk, &sk->sk_timer);
+}
+
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
+{
+       struct sock *sk;
+       struct hlist_node *node;
+       sk_for_each(sk, node, &l2cap_sk_list.head)
+               if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
+                       goto found;
+       sk = NULL;
+found:
+       return sk;
+}
+
+static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+       struct sock *sk = sock->sk;
+       struct sockaddr_l2 la;
+       int len, err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       if (!addr || addr->sa_family != AF_BLUETOOTH)
+               return -EINVAL;
+
+       memset(&la, 0, sizeof(la));
+       len = min_t(unsigned int, sizeof(la), alen);
+       memcpy(&la, addr, len);
+
+       if (la.l2_cid && la.l2_psm)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if (sk->sk_state != BT_OPEN) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       if (la.l2_psm) {
+               __u16 psm = __le16_to_cpu(la.l2_psm);
+
+               /* PSM must be odd and lsb of upper byte must be 0 */
+               if ((psm & 0x0101) != 0x0001) {
+                       err = -EINVAL;
+                       goto done;
+               }
+
+               /* Restrict usage of well-known PSMs */
+               if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
+                       err = -EACCES;
+                       goto done;
+               }
+       }
+
+       write_lock_bh(&l2cap_sk_list.lock);
+
+       if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+               err = -EADDRINUSE;
+       } else {
+               /* Save source address */
+               bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+               l2cap_pi(sk)->psm   = la.l2_psm;
+               l2cap_pi(sk)->sport = la.l2_psm;
+               sk->sk_state = BT_BOUND;
+
+               if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+                                       __le16_to_cpu(la.l2_psm) == 0x0003)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+       }
+
+       if (la.l2_cid)
+               l2cap_pi(sk)->scid = la.l2_cid;
+
+       write_unlock_bh(&l2cap_sk_list.lock);
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct sockaddr_l2 la;
+       int len, err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       if (!addr || alen < sizeof(addr->sa_family) ||
+           addr->sa_family != AF_BLUETOOTH)
+               return -EINVAL;
+
+       memset(&la, 0, sizeof(la));
+       len = min_t(unsigned int, sizeof(la), alen);
+       memcpy(&la, addr, len);
+
+       if (la.l2_cid && la.l2_psm)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+                       && !(la.l2_psm || la.l2_cid)) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       switch (l2cap_pi(sk)->mode) {
+       case L2CAP_MODE_BASIC:
+               break;
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               if (!disable_ertm)
+                       break;
+               /* fall through */
+       default:
+               err = -ENOTSUPP;
+               goto done;
+       }
+
+       switch (sk->sk_state) {
+       case BT_CONNECT:
+       case BT_CONNECT2:
+       case BT_CONFIG:
+               /* Already connecting */
+               goto wait;
+
+       case BT_CONNECTED:
+               /* Already connected */
+               err = -EISCONN;
+               goto done;
+
+       case BT_OPEN:
+       case BT_BOUND:
+               /* Can connect */
+               break;
+
+       default:
+               err = -EBADFD;
+               goto done;
+       }
+
+       /* PSM must be odd and lsb of upper byte must be 0 */
+       if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
+                               sk->sk_type != SOCK_RAW && !la.l2_cid) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       /* Set destination address and psm */
+       bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
+       l2cap_pi(sk)->psm = la.l2_psm;
+       l2cap_pi(sk)->dcid = la.l2_cid;
+
+       err = l2cap_do_connect(sk);
+       if (err)
+               goto done;
+
+wait:
+       err = bt_sock_wait_state(sk, BT_CONNECTED,
+                       sock_sndtimeo(sk, flags & O_NONBLOCK));
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_listen(struct socket *sock, int backlog)
+{
+       struct sock *sk = sock->sk;
+       int err = 0;
+
+       BT_DBG("sk %p backlog %d", sk, backlog);
+
+       lock_sock(sk);
+
+       if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+                       || sk->sk_state != BT_BOUND) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       switch (l2cap_pi(sk)->mode) {
+       case L2CAP_MODE_BASIC:
+               break;
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               if (!disable_ertm)
+                       break;
+               /* fall through */
+       default:
+               err = -ENOTSUPP;
+               goto done;
+       }
+
+       if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
+               bdaddr_t *src = &bt_sk(sk)->src;
+               u16 psm;
+
+               err = -EINVAL;
+
+               write_lock_bh(&l2cap_sk_list.lock);
+
+               for (psm = 0x1001; psm < 0x1100; psm += 2)
+                       if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
+                               l2cap_pi(sk)->psm   = cpu_to_le16(psm);
+                               l2cap_pi(sk)->sport = cpu_to_le16(psm);
+                               err = 0;
+                               break;
+                       }
+
+               write_unlock_bh(&l2cap_sk_list.lock);
+
+               if (err < 0)
+                       goto done;
+       }
+
+       sk->sk_max_ack_backlog = backlog;
+       sk->sk_ack_backlog = 0;
+       sk->sk_state = BT_LISTEN;
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       struct sock *sk = sock->sk, *nsk;
+       long timeo;
+       int err = 0;
+
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+       if (sk->sk_state != BT_LISTEN) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+       BT_DBG("sk %p timeo %ld", sk, timeo);
+
+       /* Wait for an incoming connection. (wake-one). */
+       add_wait_queue_exclusive(sk_sleep(sk), &wait);
+       while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (!timeo) {
+                       err = -EAGAIN;
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+               if (sk->sk_state != BT_LISTEN) {
+                       err = -EBADFD;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
+               }
+       }
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       if (err)
+               goto done;
+
+       newsock->state = SS_CONNECTED;
+
+       BT_DBG("new socket %p", nsk);
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+{
+       struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+       struct sock *sk = sock->sk;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       addr->sa_family = AF_BLUETOOTH;
+       *len = sizeof(struct sockaddr_l2);
+
+       if (peer) {
+               la->l2_psm = l2cap_pi(sk)->psm;
+               bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+       } else {
+               la->l2_psm = l2cap_pi(sk)->sport;
+               bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+               la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
+       }
+
+       return 0;
+}
+
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+{
+       struct sock *sk = sock->sk;
+       struct l2cap_options opts;
+       struct l2cap_conninfo cinfo;
+       int len, err = 0;
+       u32 opt;
+
+       BT_DBG("sk %p", sk);
+
+       if (get_user(len, optlen))
+               return -EFAULT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case L2CAP_OPTIONS:
+               memset(&opts, 0, sizeof(opts));
+               opts.imtu     = l2cap_pi(sk)->imtu;
+               opts.omtu     = l2cap_pi(sk)->omtu;
+               opts.flush_to = l2cap_pi(sk)->flush_to;
+               opts.mode     = l2cap_pi(sk)->mode;
+               opts.fcs      = l2cap_pi(sk)->fcs;
+               opts.max_tx   = l2cap_pi(sk)->max_tx;
+               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+               len = min_t(unsigned int, len, sizeof(opts));
+               if (copy_to_user(optval, (char *) &opts, len))
+                       err = -EFAULT;
+
+               break;
+
+       case L2CAP_LM:
+               switch (l2cap_pi(sk)->sec_level) {
+               case BT_SECURITY_LOW:
+                       opt = L2CAP_LM_AUTH;
+                       break;
+               case BT_SECURITY_MEDIUM:
+                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
+                       break;
+               case BT_SECURITY_HIGH:
+                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+                                                       L2CAP_LM_SECURE;
+                       break;
+               default:
+                       opt = 0;
+                       break;
+               }
+
+               if (l2cap_pi(sk)->role_switch)
+                       opt |= L2CAP_LM_MASTER;
+
+               if (l2cap_pi(sk)->force_reliable)
+                       opt |= L2CAP_LM_RELIABLE;
+
+               if (put_user(opt, (u32 __user *) optval))
+                       err = -EFAULT;
+               break;
+
+       case L2CAP_CONNINFO:
+               if (sk->sk_state != BT_CONNECTED &&
+                                       !(sk->sk_state == BT_CONNECT2 &&
+                                               bt_sk(sk)->defer_setup)) {
+                       err = -ENOTCONN;
+                       break;
+               }
+
+               cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+               memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+
+               len = min_t(unsigned int, len, sizeof(cinfo));
+               if (copy_to_user(optval, (char *) &cinfo, len))
+                       err = -EFAULT;
+
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+{
+       struct sock *sk = sock->sk;
+       struct bt_security sec;
+       int len, err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       if (level == SOL_L2CAP)
+               return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
+
+       if (level != SOL_BLUETOOTH)
+               return -ENOPROTOOPT;
+
+       if (get_user(len, optlen))
+               return -EFAULT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case BT_SECURITY:
+               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+                               && sk->sk_type != SOCK_RAW) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               sec.level = l2cap_pi(sk)->sec_level;
+
+               len = min_t(unsigned int, len, sizeof(sec));
+               if (copy_to_user(optval, (char *) &sec, len))
+                       err = -EFAULT;
+
+               break;
+
+       case BT_DEFER_SETUP:
+               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+                       err = -EFAULT;
+
+               break;
+
+       case BT_FLUSHABLE:
+               if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
+                       err = -EFAULT;
+
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+{
+       struct sock *sk = sock->sk;
+       struct l2cap_options opts;
+       int len, err = 0;
+       u32 opt;
+
+       BT_DBG("sk %p", sk);
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case L2CAP_OPTIONS:
+               if (sk->sk_state == BT_CONNECTED) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               opts.imtu     = l2cap_pi(sk)->imtu;
+               opts.omtu     = l2cap_pi(sk)->omtu;
+               opts.flush_to = l2cap_pi(sk)->flush_to;
+               opts.mode     = l2cap_pi(sk)->mode;
+               opts.fcs      = l2cap_pi(sk)->fcs;
+               opts.max_tx   = l2cap_pi(sk)->max_tx;
+               opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+               len = min_t(unsigned int, sizeof(opts), optlen);
+               if (copy_from_user((char *) &opts, optval, len)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               l2cap_pi(sk)->mode = opts.mode;
+               switch (l2cap_pi(sk)->mode) {
+               case L2CAP_MODE_BASIC:
+                       l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
+                       break;
+               case L2CAP_MODE_ERTM:
+               case L2CAP_MODE_STREAMING:
+                       if (!disable_ertm)
+                               break;
+                       /* fall through */
+               default:
+                       err = -EINVAL;
+                       break;
+               }
+
+               l2cap_pi(sk)->imtu = opts.imtu;
+               l2cap_pi(sk)->omtu = opts.omtu;
+               l2cap_pi(sk)->fcs  = opts.fcs;
+               l2cap_pi(sk)->max_tx = opts.max_tx;
+               l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
+               break;
+
+       case L2CAP_LM:
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opt & L2CAP_LM_AUTH)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
+               if (opt & L2CAP_LM_ENCRYPT)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
+               if (opt & L2CAP_LM_SECURE)
+                       l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
+
+               l2cap_pi(sk)->role_switch    = (opt & L2CAP_LM_MASTER);
+               l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+{
+       struct sock *sk = sock->sk;
+       struct bt_security sec;
+       int len, err = 0;
+       u32 opt;
+
+       BT_DBG("sk %p", sk);
+
+       if (level == SOL_L2CAP)
+               return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
+
+       if (level != SOL_BLUETOOTH)
+               return -ENOPROTOOPT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case BT_SECURITY:
+               if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+                               && sk->sk_type != SOCK_RAW) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               sec.level = BT_SECURITY_LOW;
+
+               len = min_t(unsigned int, sizeof(sec), optlen);
+               if (copy_from_user((char *) &sec, optval, len)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (sec.level < BT_SECURITY_LOW ||
+                                       sec.level > BT_SECURITY_HIGH) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               l2cap_pi(sk)->sec_level = sec.level;
+               break;
+
+       case BT_DEFER_SETUP:
+               if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               bt_sk(sk)->defer_setup = opt;
+               break;
+
+       case BT_FLUSHABLE:
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opt > BT_FLUSHABLE_ON) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (opt == BT_FLUSHABLE_OFF) {
+                       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+                       /* proceed futher only when we have l2cap_conn and
+                          No Flush support in the LM */
+                       if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
+                               err = -EINVAL;
+                               break;
+                       }
+               }
+
+               l2cap_pi(sk)->flushable = opt;
+               break;
+
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+{
+       struct sock *sk = sock->sk;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       struct sk_buff *skb;
+       u16 control;
+       int err;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       err = sock_error(sk);
+       if (err)
+               return err;
+
+       if (msg->msg_flags & MSG_OOB)
+               return -EOPNOTSUPP;
+
+       lock_sock(sk);
+
+       if (sk->sk_state != BT_CONNECTED) {
+               err = -ENOTCONN;
+               goto done;
+       }
+
+       /* Connectionless channel */
+       if (sk->sk_type == SOCK_DGRAM) {
+               skb = l2cap_create_connless_pdu(sk, msg, len);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+               } else {
+                       l2cap_do_send(sk, skb);
+                       err = len;
+               }
+               goto done;
+       }
+
+       switch (pi->mode) {
+       case L2CAP_MODE_BASIC:
+               /* Check outgoing MTU */
+               if (len > pi->omtu) {
+                       err = -EMSGSIZE;
+                       goto done;
+               }
+
+               /* Create a basic PDU */
+               skb = l2cap_create_basic_pdu(sk, msg, len);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+                       goto done;
+               }
+
+               l2cap_do_send(sk, skb);
+               err = len;
+               break;
+
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               /* Entire SDU fits into one PDU */
+               if (len <= pi->remote_mps) {
+                       control = L2CAP_SDU_UNSEGMENTED;
+                       skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
+                       if (IS_ERR(skb)) {
+                               err = PTR_ERR(skb);
+                               goto done;
+                       }
+                       __skb_queue_tail(TX_QUEUE(sk), skb);
+
+                       if (sk->sk_send_head == NULL)
+                               sk->sk_send_head = skb;
+
+               } else {
+               /* Segment SDU into multiples PDUs */
+                       err = l2cap_sar_segment_sdu(sk, msg, len);
+                       if (err < 0)
+                               goto done;
+               }
+
+               if (pi->mode == L2CAP_MODE_STREAMING) {
+                       l2cap_streaming_send(sk);
+               } else {
+                       if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+                                       (pi->conn_state & L2CAP_CONN_WAIT_F)) {
+                               err = len;
+                               break;
+                       }
+                       err = l2cap_ertm_send(sk);
+               }
+
+               if (err >= 0)
+                       err = len;
+               break;
+
+       default:
+               BT_DBG("bad state %1.1x", pi->mode);
+               err = -EBADFD;
+       }
+
+done:
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+{
+       struct sock *sk = sock->sk;
+
+       lock_sock(sk);
+
+       if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+               struct l2cap_conn_rsp rsp;
+               struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+               u8 buf[128];
+
+               sk->sk_state = BT_CONFIG;
+
+               rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+               rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+               rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+               rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+               l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+               if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
+                       release_sock(sk);
+                       return 0;
+               }
+
+               l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+               l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+                               l2cap_build_conf_req(sk, buf), buf);
+               l2cap_pi(sk)->num_conf_req++;
+
+               release_sock(sk);
+               return 0;
+       }
+
+       release_sock(sk);
+
+       if (sock->type == SOCK_STREAM)
+               return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+
+       return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+}
+
+/* Kill socket (only if zapped and orphan)
+ * Must be called on unlocked socket.
+ */
+void l2cap_sock_kill(struct sock *sk)
+{
+       if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+               return;
+
+       BT_DBG("sk %p state %d", sk, sk->sk_state);
+
+       /* Kill poor orphan */
+       bt_sock_unlink(&l2cap_sk_list, sk);
+       sock_set_flag(sk, SOCK_DEAD);
+       sock_put(sk);
+}
+
+/* Must be called on unlocked socket. */
+static void l2cap_sock_close(struct sock *sk)
+{
+       l2cap_sock_clear_timer(sk);
+       lock_sock(sk);
+       __l2cap_sock_close(sk, ECONNRESET);
+       release_sock(sk);
+       l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+       struct sock *sk;
+
+       BT_DBG("parent %p", parent);
+
+       /* Close not yet accepted channels */
+       while ((sk = bt_accept_dequeue(parent, NULL)))
+               l2cap_sock_close(sk);
+
+       parent->sk_state = BT_CLOSED;
+       sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+void __l2cap_sock_close(struct sock *sk, int reason)
+{
+       struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+       BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+
+       switch (sk->sk_state) {
+       case BT_LISTEN:
+               l2cap_sock_cleanup_listen(sk);
+               break;
+
+       case BT_CONNECTED:
+       case BT_CONFIG:
+               if ((sk->sk_type == SOCK_SEQPACKET ||
+                                       sk->sk_type == SOCK_STREAM) &&
+                                       conn->hcon->type == ACL_LINK) {
+                       l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+                       l2cap_send_disconn_req(conn, sk, reason);
+               } else
+                       l2cap_chan_del(sk, reason);
+               break;
+
+       case BT_CONNECT2:
+               if ((sk->sk_type == SOCK_SEQPACKET ||
+                                       sk->sk_type == SOCK_STREAM) &&
+                                       conn->hcon->type == ACL_LINK) {
+                       struct l2cap_conn_rsp rsp;
+                       __u16 result;
+
+                       if (bt_sk(sk)->defer_setup)
+                               result = L2CAP_CR_SEC_BLOCK;
+                       else
+                               result = L2CAP_CR_BAD_PSM;
+
+                       rsp.scid   = cpu_to_le16(l2cap_pi(sk)->dcid);
+                       rsp.dcid   = cpu_to_le16(l2cap_pi(sk)->scid);
+                       rsp.result = cpu_to_le16(result);
+                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+                       l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+                                       L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+               } else
+                       l2cap_chan_del(sk, reason);
+               break;
+
+       case BT_CONNECT:
+       case BT_DISCONN:
+               l2cap_chan_del(sk, reason);
+               break;
+
+       default:
+               sock_set_flag(sk, SOCK_ZAPPED);
+               break;
+       }
+}
+
+static int l2cap_sock_shutdown(struct socket *sock, int how)
+{
+       struct sock *sk = sock->sk;
+       int err = 0;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+       if (!sk->sk_shutdown) {
+               if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+                       err = __l2cap_wait_ack(sk);
+
+               sk->sk_shutdown = SHUTDOWN_MASK;
+               l2cap_sock_clear_timer(sk);
+               __l2cap_sock_close(sk, 0);
+
+               if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+                       err = bt_sock_wait_state(sk, BT_CLOSED,
+                                                       sk->sk_lingertime);
+       }
+
+       if (!err && sk->sk_err)
+               err = -sk->sk_err;
+
+       release_sock(sk);
+       return err;
+}
+
+static int l2cap_sock_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       int err;
+
+       BT_DBG("sock %p, sk %p", sock, sk);
+
+       if (!sk)
+               return 0;
+
+       err = l2cap_sock_shutdown(sock, 2);
+
+       sock_orphan(sk);
+       l2cap_sock_kill(sk);
+       return err;
+}
+
+static void l2cap_sock_destruct(struct sock *sk)
+{
+       BT_DBG("sk %p", sk);
+
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_write_queue);
+}
+
+void l2cap_sock_init(struct sock *sk, struct sock *parent)
+{
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+       BT_DBG("sk %p", sk);
+
+       if (parent) {
+               sk->sk_type = parent->sk_type;
+               bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+
+               pi->imtu = l2cap_pi(parent)->imtu;
+               pi->omtu = l2cap_pi(parent)->omtu;
+               pi->conf_state = l2cap_pi(parent)->conf_state;
+               pi->mode = l2cap_pi(parent)->mode;
+               pi->fcs  = l2cap_pi(parent)->fcs;
+               pi->max_tx = l2cap_pi(parent)->max_tx;
+               pi->tx_win = l2cap_pi(parent)->tx_win;
+               pi->sec_level = l2cap_pi(parent)->sec_level;
+               pi->role_switch = l2cap_pi(parent)->role_switch;
+               pi->force_reliable = l2cap_pi(parent)->force_reliable;
+               pi->flushable = l2cap_pi(parent)->flushable;
+       } else {
+               pi->imtu = L2CAP_DEFAULT_MTU;
+               pi->omtu = 0;
+               if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
+                       pi->mode = L2CAP_MODE_ERTM;
+                       pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+               } else {
+                       pi->mode = L2CAP_MODE_BASIC;
+               }
+               pi->max_tx = L2CAP_DEFAULT_MAX_TX;
+               pi->fcs  = L2CAP_FCS_CRC16;
+               pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+               pi->sec_level = BT_SECURITY_LOW;
+               pi->role_switch = 0;
+               pi->force_reliable = 0;
+               pi->flushable = BT_FLUSHABLE_OFF;
+       }
+
+       /* Default config options */
+       pi->conf_len = 0;
+       pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+       skb_queue_head_init(TX_QUEUE(sk));
+       skb_queue_head_init(SREJ_QUEUE(sk));
+       skb_queue_head_init(BUSY_QUEUE(sk));
+       INIT_LIST_HEAD(SREJ_LIST(sk));
+}
+
+static struct proto l2cap_proto = {
+       .name           = "L2CAP",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct l2cap_pinfo)
+};
+
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+{
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+       if (!sk)
+               return NULL;
+
+       sock_init_data(sock, sk);
+       INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+
+       sk->sk_destruct = l2cap_sock_destruct;
+       sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+
+       sock_reset_flag(sk, SOCK_ZAPPED);
+
+       sk->sk_protocol = proto;
+       sk->sk_state = BT_OPEN;
+
+       setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
+
+       bt_sock_link(&l2cap_sk_list, sk);
+       return sk;
+}
+
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+                            int kern)
+{
+       struct sock *sk;
+
+       BT_DBG("sock %p", sock);
+
+       sock->state = SS_UNCONNECTED;
+
+       if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
+                       sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+               return -ESOCKTNOSUPPORT;
+
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+               return -EPERM;
+
+       sock->ops = &l2cap_sock_ops;
+
+       sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+       if (!sk)
+               return -ENOMEM;
+
+       l2cap_sock_init(sk, NULL);
+       return 0;
+}
+
+const struct proto_ops l2cap_sock_ops = {
+       .family         = PF_BLUETOOTH,
+       .owner          = THIS_MODULE,
+       .release        = l2cap_sock_release,
+       .bind           = l2cap_sock_bind,
+       .connect        = l2cap_sock_connect,
+       .listen         = l2cap_sock_listen,
+       .accept         = l2cap_sock_accept,
+       .getname        = l2cap_sock_getname,
+       .sendmsg        = l2cap_sock_sendmsg,
+       .recvmsg        = l2cap_sock_recvmsg,
+       .poll           = bt_sock_poll,
+       .ioctl          = bt_sock_ioctl,
+       .mmap           = sock_no_mmap,
+       .socketpair     = sock_no_socketpair,
+       .shutdown       = l2cap_sock_shutdown,
+       .setsockopt     = l2cap_sock_setsockopt,
+       .getsockopt     = l2cap_sock_getsockopt
+};
+
+static const struct net_proto_family l2cap_sock_family_ops = {
+       .family = PF_BLUETOOTH,
+       .owner  = THIS_MODULE,
+       .create = l2cap_sock_create,
+};
+
+int __init l2cap_init_sockets(void)
+{
+       int err;
+
+       err = proto_register(&l2cap_proto, 0);
+       if (err < 0)
+               return err;
+
+       err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
+       if (err < 0)
+               goto error;
+
+       BT_INFO("L2CAP socket layer initialized");
+
+       return 0;
+
+error:
+       BT_ERR("L2CAP socket registration failed");
+       proto_unregister(&l2cap_proto);
+       return err;
+}
+
+void l2cap_cleanup_sockets(void)
+{
+       if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
+               BT_ERR("L2CAP socket unregistration failed");
+
+       proto_unregister(&l2cap_proto);
+}
index f827fd9083808f9126148cd8541fd29ad552b81b..f5ef7a3374c78491705bc784126a55689a0ae4be 100644 (file)
@@ -22,7 +22,7 @@
 
 /* Bluetooth HCI Management interface */
 
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #define MGMT_VERSION   0
 #define MGMT_REVISION  1
 
+struct pending_cmd {
+       struct list_head list;
+       __u16 opcode;
+       int index;
+       void *cmd;
+       struct sock *sk;
+};
+
+LIST_HEAD(cmd_list);
+
 static int cmd_status(struct sock *sk, u16 cmd, u8 status)
 {
        struct sk_buff *skb;
@@ -59,29 +69,26 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
        return 0;
 }
 
-static int read_version(struct sock *sk)
+static int cmd_complete(struct sock *sk, u16 cmd, void *rp, size_t rp_len)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
        struct mgmt_ev_cmd_complete *ev;
-       struct mgmt_rp_read_version *rp;
 
        BT_DBG("sock %p", sk);
 
-       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
        if (!skb)
                return -ENOMEM;
 
        hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
 
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode);
+       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+       hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
 
-       rp = (void *) skb_put(skb, sizeof(*rp));
-       rp->version = MGMT_VERSION;
-       put_unaligned_le16(MGMT_REVISION, &rp->revision);
+       ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+       put_unaligned_le16(cmd, &ev->opcode);
+       memcpy(ev->data, rp, rp_len);
 
        if (sock_queue_rcv_skb(sk, skb) < 0)
                kfree_skb(skb);
@@ -89,16 +96,25 @@ static int read_version(struct sock *sk)
        return 0;
 }
 
+static int read_version(struct sock *sk)
+{
+       struct mgmt_rp_read_version rp;
+
+       BT_DBG("sock %p", sk);
+
+       rp.version = MGMT_VERSION;
+       put_unaligned_le16(MGMT_REVISION, &rp.revision);
+
+       return cmd_complete(sk, MGMT_OP_READ_VERSION, &rp, sizeof(rp));
+}
+
 static int read_index_list(struct sock *sk)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-       struct mgmt_ev_cmd_complete *ev;
        struct mgmt_rp_read_index_list *rp;
        struct list_head *p;
-       size_t body_len;
+       size_t rp_len;
        u16 count;
-       int i;
+       int i, err;
 
        BT_DBG("sock %p", sk);
 
@@ -109,43 +125,43 @@ static int read_index_list(struct sock *sk)
                count++;
        }
 
-       body_len = sizeof(*ev) + sizeof(*rp) + (2 * count);
-       skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC);
-       if (!skb)
+       rp_len = sizeof(*rp) + (2 * count);
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               read_unlock(&hci_dev_list_lock);
                return -ENOMEM;
+       }
 
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->len = cpu_to_le16(body_len);
-
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
-
-       rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
        put_unaligned_le16(count, &rp->num_controllers);
 
        i = 0;
        list_for_each(p, &hci_dev_list) {
                struct hci_dev *d = list_entry(p, struct hci_dev, list);
+
+               hci_del_off_timer(d);
+
+               set_bit(HCI_MGMT, &d->flags);
+
+               if (test_bit(HCI_SETUP, &d->flags))
+                       continue;
+
                put_unaligned_le16(d->id, &rp->index[i++]);
                BT_DBG("Added hci%u", d->id);
        }
 
        read_unlock(&hci_dev_list_lock);
 
-       if (sock_queue_rcv_skb(sk, skb) < 0)
-               kfree_skb(skb);
+       err = cmd_complete(sk, MGMT_OP_READ_INDEX_LIST, rp, rp_len);
 
-       return 0;
+       kfree(rp);
+
+       return err;
 }
 
 static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
 {
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-       struct mgmt_ev_cmd_complete *ev;
-       struct mgmt_rp_read_info *rp;
-       struct mgmt_cp_read_info *cp;
+       struct mgmt_rp_read_info rp;
+       struct mgmt_cp_read_info *cp = (void *) data;
        struct hci_dev *hdev;
        u16 dev_id;
 
@@ -154,18 +170,143 @@ static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
        if (len != 2)
                return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
 
-       skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
-       if (!skb)
+       dev_id = get_unaligned_le16(&cp->index);
+
+       BT_DBG("request for hci%u", dev_id);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+
+       hci_del_off_timer(hdev);
+
+       hci_dev_lock_bh(hdev);
+
+       set_bit(HCI_MGMT, &hdev->flags);
+
+       put_unaligned_le16(hdev->id, &rp.index);
+       rp.type = hdev->dev_type;
+
+       rp.powered = test_bit(HCI_UP, &hdev->flags);
+       rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
+       rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
+       rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+
+       if (test_bit(HCI_AUTH, &hdev->flags))
+               rp.sec_mode = 3;
+       else if (hdev->ssp_mode > 0)
+               rp.sec_mode = 4;
+       else
+               rp.sec_mode = 2;
+
+       bacpy(&rp.bdaddr, &hdev->bdaddr);
+       memcpy(rp.features, hdev->features, 8);
+       memcpy(rp.dev_class, hdev->dev_class, 3);
+       put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+       rp.hci_ver = hdev->hci_ver;
+       put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return cmd_complete(sk, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+       sock_put(cmd->sk);
+       kfree(cmd->cmd);
+       kfree(cmd);
+}
+
+static int mgmt_pending_add(struct sock *sk, u16 opcode, int index,
+                                                       void *data, u16 len)
+{
+       struct pending_cmd *cmd;
+
+       cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
                return -ENOMEM;
 
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
-       hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+       cmd->opcode = opcode;
+       cmd->index = index;
 
-       ev = (void *) skb_put(skb, sizeof(*ev));
-       put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
+       cmd->cmd = kmalloc(len, GFP_ATOMIC);
+       if (!cmd->cmd) {
+               kfree(cmd);
+               return -ENOMEM;
+       }
+
+       memcpy(cmd->cmd, data, len);
+
+       cmd->sk = sk;
+       sock_hold(sk);
+
+       list_add(&cmd->list, &cmd_list);
+
+       return 0;
+}
+
+static void mgmt_pending_foreach(u16 opcode, int index,
+                               void (*cb)(struct pending_cmd *cmd, void *data),
+                               void *data)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &cmd_list) {
+               struct pending_cmd *cmd;
+
+               cmd = list_entry(p, struct pending_cmd, list);
+
+               if (cmd->opcode != opcode)
+                       continue;
+
+               if (index >= 0 && cmd->index != index)
+                       continue;
+
+               cb(cmd, data);
+       }
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+{
+       struct list_head *p;
+
+       list_for_each(p, &cmd_list) {
+               struct pending_cmd *cmd;
+
+               cmd = list_entry(p, struct pending_cmd, list);
+
+               if (cmd->opcode != opcode)
+                       continue;
+
+               if (index >= 0 && cmd->index != index)
+                       continue;
+
+               return cmd;
+       }
 
-       rp = (void *) skb_put(skb, sizeof(*rp));
+       return NULL;
+}
+
+static void mgmt_pending_remove(u16 opcode, int index)
+{
+       struct pending_cmd *cmd;
+
+       cmd = mgmt_pending_find(opcode, index);
+       if (cmd == NULL)
+               return;
+
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+}
+
+static int set_powered(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct mgmt_mode *cp;
+       struct hci_dev *hdev;
+       u16 dev_id;
+       int ret, up;
 
        cp = (void *) data;
        dev_id = get_unaligned_le16(&cp->index);
@@ -173,100 +314,151 @@ static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", dev_id);
 
        hdev = hci_dev_get(dev_id);
-       if (!hdev) {
-               kfree_skb(skb);
-               return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
-       }
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_POWERED, ENODEV);
 
        hci_dev_lock_bh(hdev);
 
-       put_unaligned_le16(hdev->id, &rp->index);
-       rp->type = hdev->dev_type;
+       up = test_bit(HCI_UP, &hdev->flags);
+       if ((cp->val && up) || (!cp->val && !up)) {
+               ret = cmd_status(sk, MGMT_OP_SET_POWERED, EALREADY);
+               goto failed;
+       }
 
-       rp->powered = test_bit(HCI_UP, &hdev->flags);
-       rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
-       rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
+       if (mgmt_pending_find(MGMT_OP_SET_POWERED, dev_id)) {
+               ret = cmd_status(sk, MGMT_OP_SET_POWERED, EBUSY);
+               goto failed;
+       }
 
-       if (test_bit(HCI_AUTH, &hdev->flags))
-               rp->sec_mode = 3;
-       else if (hdev->ssp_mode > 0)
-               rp->sec_mode = 4;
+       ret = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, dev_id, data, len);
+       if (ret < 0)
+               goto failed;
+
+       if (cp->val)
+               queue_work(hdev->workqueue, &hdev->power_on);
        else
-               rp->sec_mode = 2;
+               queue_work(hdev->workqueue, &hdev->power_off);
 
-       bacpy(&rp->bdaddr, &hdev->bdaddr);
-       memcpy(rp->features, hdev->features, 8);
-       memcpy(rp->dev_class, hdev->dev_class, 3);
-       put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
-       rp->hci_ver = hdev->hci_ver;
-       put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
+       ret = 0;
 
+failed:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
+       return ret;
+}
 
-       if (sock_queue_rcv_skb(sk, skb) < 0)
-               kfree_skb(skb);
+static int set_discoverable(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct mgmt_mode *cp;
+       struct hci_dev *hdev;
+       u16 dev_id;
+       u8 scan;
+       int err;
 
-       return 0;
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       BT_DBG("request for hci%u", dev_id);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+               goto failed;
+       }
+
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) ||
+                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) {
+               err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+               goto failed;
+       }
+
+       if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+                                       test_bit(HCI_PSCAN, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+               goto failed;
+       }
+
+       err = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, dev_id, data, len);
+       if (err < 0)
+               goto failed;
+
+       scan = SCAN_PAGE;
+
+       if (cp->val)
+               scan |= SCAN_INQUIRY;
+
+       err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       if (err < 0)
+               mgmt_pending_remove(MGMT_OP_SET_DISCOVERABLE, dev_id);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
 }
 
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+static int set_connectable(struct sock *sk, unsigned char *data, u16 len)
 {
-       unsigned char *buf;
-       struct mgmt_hdr *hdr;
-       u16 opcode, len;
+       struct mgmt_mode *cp;
+       struct hci_dev *hdev;
+       u16 dev_id;
+       u8 scan;
        int err;
 
-       BT_DBG("got %zu bytes", msglen);
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
 
-       if (msglen < sizeof(*hdr))
-               return -EINVAL;
+       BT_DBG("request for hci%u", dev_id);
 
-       buf = kmalloc(msglen, GFP_ATOMIC);
-       if (!buf)
-               return -ENOMEM;
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENODEV);
 
-       if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
-               err = -EFAULT;
-               goto done;
-       }
+       hci_dev_lock_bh(hdev);
 
-       hdr = (struct mgmt_hdr *) buf;
-       opcode = get_unaligned_le16(&hdr->opcode);
-       len = get_unaligned_le16(&hdr->len);
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+               goto failed;
+       }
 
-       if (len != msglen - sizeof(*hdr)) {
-               err = -EINVAL;
-               goto done;
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) ||
+                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) {
+               err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EBUSY);
+               goto failed;
        }
 
-       switch (opcode) {
-       case MGMT_OP_READ_VERSION:
-               err = read_version(sk);
-               break;
-       case MGMT_OP_READ_INDEX_LIST:
-               err = read_index_list(sk);
-               break;
-       case MGMT_OP_READ_INFO:
-               err = read_controller_info(sk, buf + sizeof(*hdr), len);
-               break;
-       default:
-               BT_DBG("Unknown op %u", opcode);
-               err = cmd_status(sk, opcode, 0x01);
-               break;
+       if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EALREADY);
+               goto failed;
        }
 
+       err = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, dev_id, data, len);
        if (err < 0)
-               goto done;
+               goto failed;
 
-       err = msglen;
+       if (cp->val)
+               scan = SCAN_PAGE;
+       else
+               scan = 0;
+
+       err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       if (err < 0)
+               mgmt_pending_remove(MGMT_OP_SET_CONNECTABLE, dev_id);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
 
-done:
-       kfree(buf);
        return err;
 }
 
-static int mgmt_event(u16 event, void *data, u16 data_len)
+static int mgmt_event(u16 event, void *data, u16 data_len, struct sock *skip_sk)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
@@ -283,26 +475,918 @@ static int mgmt_event(u16 event, void *data, u16 data_len)
 
        memcpy(skb_put(skb, data_len), data, data_len);
 
-       hci_send_to_sock(NULL, skb);
+       hci_send_to_sock(NULL, skb, skip_sk);
        kfree_skb(skb);
 
        return 0;
 }
 
-int mgmt_index_added(u16 index)
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
 {
-       struct mgmt_ev_index_added ev;
+       struct mgmt_mode rp;
 
-       put_unaligned_le16(index, &ev.index);
+       put_unaligned_le16(index, &rp.index);
+       rp.val = val;
 
-       return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev));
+       return cmd_complete(sk, opcode, &rp, sizeof(rp));
 }
 
-int mgmt_index_removed(u16 index)
+static int set_pairable(struct sock *sk, unsigned char *data, u16 len)
 {
-       struct mgmt_ev_index_added ev;
+       struct mgmt_mode *cp, ev;
+       struct hci_dev *hdev;
+       u16 dev_id;
+       int err;
 
-       put_unaligned_le16(index, &ev.index);
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       BT_DBG("request for hci%u", dev_id);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_PAIRABLE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (cp->val)
+               set_bit(HCI_PAIRABLE, &hdev->flags);
+       else
+               clear_bit(HCI_PAIRABLE, &hdev->flags);
+
+       err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, dev_id, cp->val);
+       if (err < 0)
+               goto failed;
+
+       put_unaligned_le16(dev_id, &ev.index);
+       ev.val = cp->val;
+
+       err = mgmt_event(MGMT_EV_PAIRABLE, &ev, sizeof(ev), sk);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+       struct list_head *p;
+       u8 val = 0;
+
+       list_for_each(p, &hdev->uuids) {
+               struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+
+               val |= uuid->svc_hint;
+       }
+
+       return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+       u8 cod[3];
+
+       BT_DBG("%s", hdev->name);
+
+       if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+               return 0;
+
+       cod[0] = hdev->minor_class;
+       cod[1] = hdev->major_class;
+       cod[2] = get_service_classes(hdev);
+
+       if (memcmp(cod, hdev->dev_class, 3) == 0)
+               return 0;
+
+       return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static int add_uuid(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct mgmt_cp_add_uuid *cp;
+       struct hci_dev *hdev;
+       struct bt_uuid *uuid;
+       u16 dev_id;
+       int err;
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       BT_DBG("request for hci%u", dev_id);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_ADD_UUID, ENODEV);
 
-       return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev));
+       hci_dev_lock_bh(hdev);
+
+       uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+       if (!uuid) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       memcpy(uuid->uuid, cp->uuid, 16);
+       uuid->svc_hint = cp->svc_hint;
+
+       list_add(&uuid->list, &hdev->uuids);
+
+       err = update_class(hdev);
+       if (err < 0)
+               goto failed;
+
+       err = cmd_complete(sk, MGMT_OP_ADD_UUID, &dev_id, sizeof(dev_id));
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int remove_uuid(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct list_head *p, *n;
+       struct mgmt_cp_add_uuid *cp;
+       struct hci_dev *hdev;
+       u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+       u16 dev_id;
+       int err, found;
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       BT_DBG("request for hci%u", dev_id);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_REMOVE_UUID, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
+               err = hci_uuids_clear(hdev);
+               goto unlock;
+       }
+
+       found = 0;
+
+       list_for_each_safe(p, n, &hdev->uuids) {
+               struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
+
+               if (memcmp(match->uuid, cp->uuid, 16) != 0)
+                       continue;
+
+               list_del(&match->list);
+               found++;
+       }
+
+       if (found == 0) {
+               err = cmd_status(sk, MGMT_OP_REMOVE_UUID, ENOENT);
+               goto unlock;
+       }
+
+       err = update_class(hdev);
+       if (err < 0)
+               goto unlock;
+
+       err = cmd_complete(sk, MGMT_OP_REMOVE_UUID, &dev_id, sizeof(dev_id));
+
+unlock:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int set_dev_class(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_set_dev_class *cp;
+       u16 dev_id;
+       int err;
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       BT_DBG("request for hci%u", dev_id);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_DEV_CLASS, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       hdev->major_class = cp->major;
+       hdev->minor_class = cp->minor;
+
+       err = update_class(hdev);
+
+       if (err == 0)
+               err = cmd_complete(sk, MGMT_OP_SET_DEV_CLASS, &dev_id,
+                                                       sizeof(dev_id));
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int set_service_cache(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_set_service_cache *cp;
+       u16 dev_id;
+       int err;
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       BT_DBG("hci%u enable %d", dev_id, cp->enable);
+
+       if (cp->enable) {
+               set_bit(HCI_SERVICE_CACHE, &hdev->flags);
+               err = 0;
+       } else {
+               clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
+               err = update_class(hdev);
+       }
+
+       if (err == 0)
+               err = cmd_complete(sk, MGMT_OP_SET_SERVICE_CACHE, &dev_id,
+                                                       sizeof(dev_id));
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int load_keys(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_load_keys *cp;
+       u16 dev_id, key_count, expected_len;
+       int i;
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+       key_count = get_unaligned_le16(&cp->key_count);
+
+       expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+       if (expected_len != len) {
+               BT_ERR("load_keys: expected %u bytes, got %u bytes",
+                                                       len, expected_len);
+               return -EINVAL;
+       }
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_LOAD_KEYS, ENODEV);
+
+       BT_DBG("hci%u debug_keys %u key_count %u", dev_id, cp->debug_keys,
+                                                               key_count);
+
+       hci_dev_lock_bh(hdev);
+
+       hci_link_keys_clear(hdev);
+
+       set_bit(HCI_LINK_KEYS, &hdev->flags);
+
+       if (cp->debug_keys)
+               set_bit(HCI_DEBUG_KEYS, &hdev->flags);
+       else
+               clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
+
+       for (i = 0; i < key_count; i++) {
+               struct mgmt_key_info *key = &cp->keys[i];
+
+               hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
+                                                               key->pin_len);
+       }
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return 0;
+}
+
+static int remove_key(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_remove_key *cp;
+       struct hci_conn *conn;
+       u16 dev_id;
+       int err;
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_REMOVE_KEY, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       err = hci_remove_link_key(hdev, &cp->bdaddr);
+       if (err < 0) {
+               err = cmd_status(sk, MGMT_OP_REMOVE_KEY, -err);
+               goto unlock;
+       }
+
+       err = 0;
+
+       if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+               goto unlock;
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+       if (conn) {
+               struct hci_cp_disconnect dc;
+
+               put_unaligned_le16(conn->handle, &dc.handle);
+               dc.reason = 0x13; /* Remote User Terminated Connection */
+               err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
+       }
+
+unlock:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int disconnect(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_disconnect *cp;
+       struct hci_cp_disconnect dc;
+       struct hci_conn *conn;
+       u16 dev_id;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_DISCONNECT, ENETDOWN);
+               goto failed;
+       }
+
+       if (mgmt_pending_find(MGMT_OP_DISCONNECT, dev_id)) {
+               err = cmd_status(sk, MGMT_OP_DISCONNECT, EBUSY);
+               goto failed;
+       }
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+       if (!conn) {
+               err = cmd_status(sk, MGMT_OP_DISCONNECT, ENOTCONN);
+               goto failed;
+       }
+
+       err = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, dev_id, data, len);
+       if (err < 0)
+               goto failed;
+
+       put_unaligned_le16(conn->handle, &dc.handle);
+       dc.reason = 0x13; /* Remote User Terminated Connection */
+
+       err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       if (err < 0)
+               mgmt_pending_remove(MGMT_OP_DISCONNECT, dev_id);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int get_connections(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct mgmt_cp_get_connections *cp;
+       struct mgmt_rp_get_connections *rp;
+       struct hci_dev *hdev;
+       struct list_head *p;
+       size_t rp_len;
+       u16 dev_id, count;
+       int i, err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_GET_CONNECTIONS, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       count = 0;
+       list_for_each(p, &hdev->conn_hash.list) {
+               count++;
+       }
+
+       rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       put_unaligned_le16(dev_id, &rp->index);
+       put_unaligned_le16(count, &rp->conn_count);
+
+       read_lock(&hci_dev_list_lock);
+
+       i = 0;
+       list_for_each(p, &hdev->conn_hash.list) {
+               struct hci_conn *c = list_entry(p, struct hci_conn, list);
+
+               bacpy(&rp->conn[i++], &c->dst);
+       }
+
+       read_unlock(&hci_dev_list_lock);
+
+       err = cmd_complete(sk, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
+
+unlock:
+       kfree(rp);
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+       return err;
+}
+
+static int pin_code_reply(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_pin_code_reply *cp;
+       struct hci_cp_pin_code_reply reply;
+       u16 dev_id;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+               goto failed;
+       }
+
+       err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, dev_id, data, len);
+       if (err < 0)
+               goto failed;
+
+       bacpy(&reply.bdaddr, &cp->bdaddr);
+       reply.pin_len = cp->pin_len;
+       memcpy(reply.pin_code, cp->pin_code, 16);
+
+       err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
+       if (err < 0)
+               mgmt_pending_remove(MGMT_OP_PIN_CODE_REPLY, dev_id);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int pin_code_neg_reply(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_pin_code_neg_reply *cp;
+       u16 dev_id;
+       int err;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENETDOWN);
+               goto failed;
+       }
+
+       err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, dev_id,
+                                                               data, len);
+       if (err < 0)
+               goto failed;
+
+       err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(bdaddr_t),
+                                                               &cp->bdaddr);
+       if (err < 0)
+               mgmt_pending_remove(MGMT_OP_PIN_CODE_NEG_REPLY, dev_id);
+
+failed:
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return err;
+}
+
+static int set_io_capability(struct sock *sk, unsigned char *data, u16 len)
+{
+       struct hci_dev *hdev;
+       struct mgmt_cp_set_io_capability *cp;
+       u16 dev_id;
+
+       BT_DBG("");
+
+       cp = (void *) data;
+       dev_id = get_unaligned_le16(&cp->index);
+
+       hdev = hci_dev_get(dev_id);
+       if (!hdev)
+               return cmd_status(sk, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+
+       hci_dev_lock_bh(hdev);
+
+       hdev->io_capability = cp->io_capability;
+
+       BT_DBG("%s IO capability set to 0x%02x", hdev->name,
+                                               hdev->io_capability);
+
+       hci_dev_unlock_bh(hdev);
+       hci_dev_put(hdev);
+
+       return cmd_complete(sk, MGMT_OP_SET_IO_CAPABILITY,
+                                               &dev_id, sizeof(dev_id));
+}
+
+int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
+{
+       unsigned char *buf;
+       struct mgmt_hdr *hdr;
+       u16 opcode, len;
+       int err;
+
+       BT_DBG("got %zu bytes", msglen);
+
+       if (msglen < sizeof(*hdr))
+               return -EINVAL;
+
+       buf = kmalloc(msglen, GFP_ATOMIC);
+       if (!buf)
+               return -ENOMEM;
+
+       if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
+               err = -EFAULT;
+               goto done;
+       }
+
+       hdr = (struct mgmt_hdr *) buf;
+       opcode = get_unaligned_le16(&hdr->opcode);
+       len = get_unaligned_le16(&hdr->len);
+
+       if (len != msglen - sizeof(*hdr)) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       switch (opcode) {
+       case MGMT_OP_READ_VERSION:
+               err = read_version(sk);
+               break;
+       case MGMT_OP_READ_INDEX_LIST:
+               err = read_index_list(sk);
+               break;
+       case MGMT_OP_READ_INFO:
+               err = read_controller_info(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_POWERED:
+               err = set_powered(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_DISCOVERABLE:
+               err = set_discoverable(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_CONNECTABLE:
+               err = set_connectable(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_PAIRABLE:
+               err = set_pairable(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_ADD_UUID:
+               err = add_uuid(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_REMOVE_UUID:
+               err = remove_uuid(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_DEV_CLASS:
+               err = set_dev_class(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_SERVICE_CACHE:
+               err = set_service_cache(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_LOAD_KEYS:
+               err = load_keys(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_REMOVE_KEY:
+               err = remove_key(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_DISCONNECT:
+               err = disconnect(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_GET_CONNECTIONS:
+               err = get_connections(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_PIN_CODE_REPLY:
+               err = pin_code_reply(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_PIN_CODE_NEG_REPLY:
+               err = pin_code_neg_reply(sk, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_SET_IO_CAPABILITY:
+               err = set_io_capability(sk, buf + sizeof(*hdr), len);
+               break;
+       default:
+               BT_DBG("Unknown op %u", opcode);
+               err = cmd_status(sk, opcode, 0x01);
+               break;
+       }
+
+       if (err < 0)
+               goto done;
+
+       err = msglen;
+
+done:
+       kfree(buf);
+       return err;
+}
+
+int mgmt_index_added(u16 index)
+{
+       struct mgmt_ev_index_added ev;
+
+       put_unaligned_le16(index, &ev.index);
+
+       return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_index_removed(u16 index)
+{
+       struct mgmt_ev_index_added ev;
+
+       put_unaligned_le16(index, &ev.index);
+
+       return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev), NULL);
+}
+
+struct cmd_lookup {
+       u8 val;
+       struct sock *sk;
+};
+
+static void mode_rsp(struct pending_cmd *cmd, void *data)
+{
+       struct mgmt_mode *cp = cmd->cmd;
+       struct cmd_lookup *match = data;
+
+       if (cp->val != match->val)
+               return;
+
+       send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+
+       list_del(&cmd->list);
+
+       if (match->sk == NULL) {
+               match->sk = cmd->sk;
+               sock_hold(match->sk);
+       }
+
+       mgmt_pending_free(cmd);
+}
+
+int mgmt_powered(u16 index, u8 powered)
+{
+       struct mgmt_mode ev;
+       struct cmd_lookup match = { powered, NULL };
+       int ret;
+
+       mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+
+       put_unaligned_le16(index, &ev.index);
+       ev.val = powered;
+
+       ret = mgmt_event(MGMT_EV_POWERED, &ev, sizeof(ev), match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       return ret;
+}
+
+int mgmt_discoverable(u16 index, u8 discoverable)
+{
+       struct mgmt_mode ev;
+       struct cmd_lookup match = { discoverable, NULL };
+       int ret;
+
+       mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index,
+                                                       mode_rsp, &match);
+
+       put_unaligned_le16(index, &ev.index);
+       ev.val = discoverable;
+
+       ret = mgmt_event(MGMT_EV_DISCOVERABLE, &ev, sizeof(ev), match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       return ret;
+}
+
+int mgmt_connectable(u16 index, u8 connectable)
+{
+       struct mgmt_mode ev;
+       struct cmd_lookup match = { connectable, NULL };
+       int ret;
+
+       mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+
+       put_unaligned_le16(index, &ev.index);
+       ev.val = connectable;
+
+       ret = mgmt_event(MGMT_EV_CONNECTABLE, &ev, sizeof(ev), match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       return ret;
+}
+
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
+{
+       struct mgmt_ev_new_key ev;
+
+       memset(&ev, 0, sizeof(ev));
+
+       put_unaligned_le16(index, &ev.index);
+
+       bacpy(&ev.key.bdaddr, &key->bdaddr);
+       ev.key.type = key->type;
+       memcpy(ev.key.val, key->val, 16);
+       ev.key.pin_len = key->pin_len;
+       ev.old_key_type = old_key_type;
+
+       return mgmt_event(MGMT_EV_NEW_KEY, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_connected(u16 index, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_connected ev;
+
+       put_unaligned_le16(index, &ev.index);
+       bacpy(&ev.bdaddr, bdaddr);
+
+       return mgmt_event(MGMT_EV_CONNECTED, &ev, sizeof(ev), NULL);
+}
+
+static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+{
+       struct mgmt_cp_disconnect *cp = cmd->cmd;
+       struct sock **sk = data;
+       struct mgmt_rp_disconnect rp;
+
+       put_unaligned_le16(cmd->index, &rp.index);
+       bacpy(&rp.bdaddr, &cp->bdaddr);
+
+       cmd_complete(cmd->sk, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
+
+       *sk = cmd->sk;
+       sock_hold(*sk);
+
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+}
+
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_disconnected ev;
+       struct sock *sk = NULL;
+       int err;
+
+       mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+
+       put_unaligned_le16(index, &ev.index);
+       bacpy(&ev.bdaddr, bdaddr);
+
+       err = mgmt_event(MGMT_EV_DISCONNECTED, &ev, sizeof(ev), sk);
+
+       if (sk)
+               sock_put(sk);
+
+       return err;
+}
+
+int mgmt_disconnect_failed(u16 index)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+       if (!cmd)
+               return -ENOENT;
+
+       err = cmd_status(cmd->sk, MGMT_OP_DISCONNECT, EIO);
+
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+
+       return err;
+}
+
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       struct mgmt_ev_connect_failed ev;
+
+       put_unaligned_le16(index, &ev.index);
+       bacpy(&ev.bdaddr, bdaddr);
+       ev.status = status;
+
+       return mgmt_event(MGMT_EV_CONNECT_FAILED, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_pin_code_request ev;
+
+       put_unaligned_le16(index, &ev.index);
+       bacpy(&ev.bdaddr, bdaddr);
+
+       return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+       if (!cmd)
+               return -ENOENT;
+
+       if (status != 0)
+               err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_REPLY, status);
+       else
+               err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_REPLY,
+                                               bdaddr, sizeof(*bdaddr));
+
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+
+       return err;
+}
+
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+       if (!cmd)
+               return -ENOENT;
+
+       if (status != 0)
+               err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY, status);
+       else
+               err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY,
+                                               bdaddr, sizeof(*bdaddr));
+
+       list_del(&cmd->list);
+       mgmt_pending_free(cmd);
+
+       return err;
 }
index 6b83776534fb45e29946f9666cb7a23654d4a431..c9973932456f74e68e4dfd8b7f20199a65c9bd4e 100644 (file)
@@ -2154,8 +2154,6 @@ static int __init rfcomm_init(void)
 {
        int err;
 
-       l2cap_load();
-
        hci_register_cb(&rfcomm_cb);
 
        rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
index 960c6d1637da0d3ee23cc998e5890b84123f76c9..c9348ddda877cdfb9ba182cbfd9889572969ff91 100644 (file)
@@ -50,8 +50,6 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/sco.h>
 
-#define VERSION "0.6"
-
 static int disable_esco;
 
 static const struct proto_ops sco_sock_ops;
@@ -703,6 +701,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
                        break;
                }
 
+               memset(&cinfo, 0, sizeof(cinfo));
                cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
                memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
 
@@ -1023,7 +1022,7 @@ static struct hci_proto sco_hci_proto = {
        .recv_scodata   = sco_recv_scodata
 };
 
-static int __init sco_init(void)
+int __init sco_init(void)
 {
        int err;
 
@@ -1051,7 +1050,6 @@ static int __init sco_init(void)
                        BT_ERR("Failed to create SCO debug file");
        }
 
-       BT_INFO("SCO (Voice Link) ver %s", VERSION);
        BT_INFO("SCO socket layer initialized");
 
        return 0;
@@ -1061,7 +1059,7 @@ error:
        return err;
 }
 
-static void __exit sco_exit(void)
+void __exit sco_exit(void)
 {
        debugfs_remove(sco_debugfs);
 
@@ -1074,14 +1072,5 @@ static void __exit sco_exit(void)
        proto_unregister(&sco_proto);
 }
 
-module_init(sco_init);
-module_exit(sco_exit);
-
 module_param(disable_esco, bool, 0644);
 MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-2");
index 556443566e9c417bbb07da73d0cafd750d11de73..1461b19efd38255292f695c496ad5258222be028 100644 (file)
@@ -297,6 +297,21 @@ void br_netpoll_disable(struct net_bridge_port *p)
 
 #endif
 
+static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
+
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       return br_add_if(br, slave_dev);
+}
+
+static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       return br_del_if(br, slave_dev);
+}
+
 static const struct ethtool_ops br_ethtool_ops = {
        .get_drvinfo    = br_getinfo,
        .get_link       = ethtool_op_get_link,
@@ -326,6 +341,8 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_netpoll_cleanup     = br_netpoll_cleanup,
        .ndo_poll_controller     = br_poll_controller,
 #endif
+       .ndo_add_slave           = br_add_slave,
+       .ndo_del_slave           = br_del_slave,
 };
 
 static void br_dev_free(struct net_device *dev)
index d9d1e2bac1d6efc826ed3ef826a91cd06bba78ca..dce8f0009a12dc75d3e51327216186c430b3c27d 100644 (file)
@@ -148,6 +148,8 @@ static void del_nbp(struct net_bridge_port *p)
 
        netdev_rx_handler_unregister(dev);
 
+       netdev_set_master(dev, NULL);
+
        br_multicast_del_port(p);
 
        kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -365,7 +367,7 @@ int br_min_mtu(const struct net_bridge *br)
 void br_features_recompute(struct net_bridge *br)
 {
        struct net_bridge_port *p;
-       unsigned long features, mask;
+       u32 features, mask;
 
        features = mask = br->feature_mask;
        if (list_empty(&br->port_list))
@@ -379,7 +381,7 @@ void br_features_recompute(struct net_bridge *br)
        }
 
 done:
-       br->dev->features = netdev_fix_features(features, NULL);
+       br->dev->features = netdev_fix_features(br->dev, features);
 }
 
 /* called with RTNL */
@@ -429,10 +431,14 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
                goto err3;
 
-       err = netdev_rx_handler_register(dev, br_handle_frame, p);
+       err = netdev_set_master(dev, br->dev);
        if (err)
                goto err3;
 
+       err = netdev_rx_handler_register(dev, br_handle_frame, p);
+       if (err)
+               goto err4;
+
        dev->priv_flags |= IFF_BRIDGE_PORT;
 
        dev_disable_lro(dev);
@@ -455,6 +461,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        kobject_uevent(&p->kobj, KOBJ_ADD);
 
        return 0;
+
+err4:
+       netdev_set_master(dev, NULL);
 err3:
        sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
index 4b5b66d07bba7d43a521d2f450386ceea1bf2ca9..45b57b173f70ff2ca1c6b37cabc4e0ef6681e13f 100644 (file)
@@ -428,14 +428,15 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
                        if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
                                goto free_skb;
 
-                       if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+                       rt = ip_route_output_key(dev_net(dev), &fl);
+                       if (!IS_ERR(rt)) {
                                /* - Bridged-and-DNAT'ed traffic doesn't
                                 *   require ip_forwarding. */
-                               if (((struct dst_entry *)rt)->dev == dev) {
-                                       skb_dst_set(skb, (struct dst_entry *)rt);
+                               if (rt->dst.dev == dev) {
+                                       skb_dst_set(skb, &rt->dst);
                                        goto bridged_dnat;
                                }
-                               dst_release((struct dst_entry *)rt);
+                               ip_rt_put(rt);
                        }
 free_skb:
                        kfree_skb(skb);
index 4e1b620b6be64be2b063003c08f9df43820cb2a1..f7afc364d77708de77d650ca1be700f898a52425 100644 (file)
@@ -182,7 +182,7 @@ struct net_bridge
        struct br_cpu_netstats __percpu *stats;
        spinlock_t                      hash_lock;
        struct hlist_head               hash[BR_HASH_SIZE];
-       unsigned long                   feature_mask;
+       u32                             feature_mask;
 #ifdef CONFIG_BRIDGE_NETFILTER
        struct rtable                   fake_rtable;
        bool                            nf_call_iptables;
index 50a46afc2bcc28c2714c8b0f9cdde95432c569fd..2ed0056a39a88afb27beac4fc0e57c3c00bae347 100644 (file)
 #include <linux/netfilter_bridge/ebtables.h>
 #include <linux/netfilter_bridge/ebt_ip6.h>
 
-struct tcpudphdr {
-       __be16 src;
-       __be16 dst;
+union pkthdr {
+       struct {
+               __be16 src;
+               __be16 dst;
+       } tcpudphdr;
+       struct {
+               u8 type;
+               u8 code;
+       } icmphdr;
 };
 
 static bool
@@ -33,8 +39,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
        const struct ebt_ip6_info *info = par->matchinfo;
        const struct ipv6hdr *ih6;
        struct ipv6hdr _ip6h;
-       const struct tcpudphdr *pptr;
-       struct tcpudphdr _ports;
+       const union pkthdr *pptr;
+       union pkthdr _pkthdr;
 
        ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
        if (ih6 == NULL)
@@ -56,26 +62,34 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        return false;
                if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
                        return false;
-               if (!(info->bitmask & EBT_IP6_DPORT) &&
-                   !(info->bitmask & EBT_IP6_SPORT))
+               if (!(info->bitmask & ( EBT_IP6_DPORT |
+                                       EBT_IP6_SPORT | EBT_IP6_ICMP6)))
                        return true;
-               pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
-                                         &_ports);
+
+               /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+               pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
+                                         &_pkthdr);
                if (pptr == NULL)
                        return false;
                if (info->bitmask & EBT_IP6_DPORT) {
-                       u32 dst = ntohs(pptr->dst);
+                       u16 dst = ntohs(pptr->tcpudphdr.dst);
                        if (FWINV(dst < info->dport[0] ||
                                  dst > info->dport[1], EBT_IP6_DPORT))
                                return false;
                }
                if (info->bitmask & EBT_IP6_SPORT) {
-                       u32 src = ntohs(pptr->src);
+                       u16 src = ntohs(pptr->tcpudphdr.src);
                        if (FWINV(src < info->sport[0] ||
                                  src > info->sport[1], EBT_IP6_SPORT))
                        return false;
                }
-               return true;
+               if ((info->bitmask & EBT_IP6_ICMP6) &&
+                    FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
+                          pptr->icmphdr.type > info->icmpv6_type[1] ||
+                          pptr->icmphdr.code < info->icmpv6_code[0] ||
+                          pptr->icmphdr.code > info->icmpv6_code[1],
+                                                       EBT_IP6_ICMP6))
+                       return false;
        }
        return true;
 }
@@ -103,6 +117,14 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
                return -EINVAL;
        if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
                return -EINVAL;
+       if (info->bitmask & EBT_IP6_ICMP6) {
+               if ((info->invflags & EBT_IP6_PROTO) ||
+                    info->protocol != IPPROTO_ICMPV6)
+                       return -EINVAL;
+               if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
+                   info->icmpv6_code[0] > info->icmpv6_code[1])
+                       return -EINVAL;
+       }
        return 0;
 }
 
index 16df0532d4b9a7d5ff41c3f1e1df889d204128a0..893669caa8de6c56960f804da8d2125f07823fb3 100644 (file)
@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
        if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
                return -ENOMEM;
 
+       tmp.name[sizeof(tmp.name) - 1] = 0;
+
        countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
        newinfo = vmalloc(sizeof(*newinfo) + countersize);
        if (!newinfo)
@@ -1764,6 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
 
        newinfo->entries_size = size;
 
+       xt_compat_init_offsets(AF_INET, info->nentries);
        return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
                                                        entries, newinfo);
 }
index c665de778b6026bdbcbea8696ca246f2647c9882..f1f98d967d8a1051ceb703f3a0aaa6bd74d1ee83 100644 (file)
 #include <asm/atomic.h>
 
 #define MAX_PHY_LAYERS 7
-#define PHY_NAME_LEN 20
 
 #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
-#define RFM_FRAGMENT_SIZE 4030
 
 /* Information about CAIF physical interfaces held by Config Module in order
  * to manage physical interfaces
index d3ed264ad6c42b932d84a721b270f88d0d6b574b..27dab26ad3b84ff213497bcd9029b7af75688560 100644 (file)
@@ -18,7 +18,6 @@
 #define DGM_CMD_BIT  0x80
 #define DGM_FLOW_OFF 0x81
 #define DGM_FLOW_ON  0x80
-#define DGM_CTRL_PKT_SIZE 1
 #define DGM_MTU 1500
 
 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
index 9297f7dea9d8c164d4d5652d7930866154556350..8303fe3ebf891e820439c8359518022fe8c29a54 100644 (file)
@@ -25,7 +25,6 @@ struct cfserl {
        spinlock_t sync;
        bool usestx;
 };
-#define STXLEN(layr) (layr->usestx ? 1 : 0)
 
 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
index efad410e4c829d098fd7b02f2aaa106d4ea8f782..315c0d60136887ed6e1001a73618839f91931afd 100644 (file)
@@ -20,7 +20,7 @@
 #define UTIL_REMOTE_SHUTDOWN 0x82
 #define UTIL_FLOW_OFF 0x81
 #define UTIL_FLOW_ON  0x80
-#define UTIL_CTRL_PKT_SIZE 1
+
 static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
 
index 3b425b189a99f7dc55885ccf61fa2d951ec12ee1..c3b1dec4acf648777409bbac36ea9b659e84f03a 100644 (file)
@@ -17,7 +17,7 @@
 #define VEI_FLOW_OFF 0x81
 #define VEI_FLOW_ON  0x80
 #define VEI_SET_PIN  0x82
-#define VEI_CTRL_PKT_SIZE 1
+
 #define container_obj(layr) container_of(layr, struct cfsrvl, layer)
 
 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
index 6561021d22d1fef9b58ec54ec2b394400fb7ba25..0d39032e9621c92e6397d954c11e3ea20c6ef7ea 100644 (file)
 #include <trace/events/skb.h>
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
+#include <linux/cpu_rmap.h>
 
 #include "net-sysfs.h"
 
@@ -1297,7 +1298,7 @@ static int __dev_close(struct net_device *dev)
        return retval;
 }
 
-int dev_close_many(struct list_head *head)
+static int dev_close_many(struct list_head *head)
 {
        struct net_device *dev, *tmp;
        LIST_HEAD(tmp_list);
@@ -1605,6 +1606,48 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
        rcu_read_unlock();
 }
 
+/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+ * @dev: Network device
+ * @txq: number of queues available
+ *
+ * If real_num_tx_queues is changed the tc mappings may no longer be
+ * valid. To resolve this verify the tc mapping remains valid and if
+ * not NULL the mapping. With no priorities mapping to this
+ * offset/count pair it will no longer be used. In the worst case TC0
+ * is invalid nothing can be done so disable priority mappings. If is
+ * expected that drivers will fix this mapping if they can before
+ * calling netif_set_real_num_tx_queues.
+ */
+static void netif_setup_tc(struct net_device *dev, unsigned int txq)
+{
+       int i;
+       struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
+
+       /* If TC0 is invalidated disable TC mapping */
+       if (tc->offset + tc->count > txq) {
+               pr_warning("Number of in use tx queues changed "
+                          "invalidating tc mappings. Priority "
+                          "traffic classification disabled!\n");
+               dev->num_tc = 0;
+               return;
+       }
+
+       /* Invalidated prio to tc mappings set to TC0 */
+       for (i = 1; i < TC_BITMASK + 1; i++) {
+               int q = netdev_get_prio_tc_map(dev, i);
+
+               tc = &dev->tc_to_txq[q];
+               if (tc->offset + tc->count > txq) {
+                       pr_warning("Number of in use tx queues "
+                                  "changed. Priority %i to tc "
+                                  "mapping %i is no longer valid "
+                                  "setting map to 0\n",
+                                  i, q);
+                       netdev_set_prio_tc_map(dev, i, 0);
+               }
+       }
+}
+
 /*
  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1616,7 +1659,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
        if (txq < 1 || txq > dev->num_tx_queues)
                return -EINVAL;
 
-       if (dev->reg_state == NETREG_REGISTERED) {
+       if (dev->reg_state == NETREG_REGISTERED ||
+           dev->reg_state == NETREG_UNREGISTERING) {
                ASSERT_RTNL();
 
                rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -1624,6 +1668,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
                if (rc)
                        return rc;
 
+               if (dev->num_tc)
+                       netif_setup_tc(dev, txq);
+
                if (txq < dev->real_num_tx_queues)
                        qdisc_reset_all_tx_gt(dev, txq);
        }
@@ -1823,7 +1870,7 @@ EXPORT_SYMBOL(skb_checksum_help);
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
@@ -2011,7 +2058,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
                 protocol == htons(ETH_P_FCOE)));
 }
 
-static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
 {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
@@ -2023,10 +2070,10 @@ static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features
        return features;
 }
 
-int netif_skb_features(struct sk_buff *skb)
+u32 netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       int features = skb->dev->features;
+       u32 features = skb->dev->features;
 
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2071,7 +2118,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        int rc = NETDEV_TX_OK;
 
        if (likely(!skb->next)) {
-               int features;
+               u32 features;
 
                /*
                 * If device doesnt need skb->dst, release it right now while
@@ -2173,6 +2220,8 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                  unsigned int num_tx_queues)
 {
        u32 hash;
+       u16 qoffset = 0;
+       u16 qcount = num_tx_queues;
 
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
@@ -2181,13 +2230,19 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                return hash;
        }
 
+       if (dev->num_tc) {
+               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+               qoffset = dev->tc_to_txq[tc].offset;
+               qcount = dev->tc_to_txq[tc].count;
+       }
+
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
                hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
 
-       return (u16) (((u64) hash * num_tx_queues) >> 32);
+       return (u16) (((u64) hash * qcount) >> 32) + qoffset;
 }
 EXPORT_SYMBOL(__skb_tx_hash);
 
@@ -2284,15 +2339,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                                 struct netdev_queue *txq)
 {
        spinlock_t *root_lock = qdisc_lock(q);
-       bool contended = qdisc_is_running(q);
+       bool contended;
        int rc;
 
+       qdisc_skb_cb(skb)->pkt_len = skb->len;
+       qdisc_calculate_pkt_len(skb, q);
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
         * This permits __QDISC_STATE_RUNNING owner to get the lock more often
         * and dequeue packets faster.
         */
+       contended = qdisc_is_running(q);
        if (unlikely(contended))
                spin_lock(&q->busylock);
 
@@ -2310,7 +2368,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
                        skb_dst_force(skb);
 
-               qdisc_skb_cb(skb)->pkt_len = skb->len;
                qdisc_bstats_update(q, skb);
 
                if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2325,7 +2382,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                rc = NET_XMIT_SUCCESS;
        } else {
                skb_dst_force(skb);
-               rc = qdisc_enqueue_root(skb, q);
+               rc = q->enqueue(skb, q) & NET_XMIT_MASK;
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
@@ -2544,6 +2601,54 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+static struct rps_dev_flow *
+set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+           struct rps_dev_flow *rflow, u16 next_cpu)
+{
+       u16 tcpu;
+
+       tcpu = rflow->cpu = next_cpu;
+       if (tcpu != RPS_NO_CPU) {
+#ifdef CONFIG_RFS_ACCEL
+               struct netdev_rx_queue *rxqueue;
+               struct rps_dev_flow_table *flow_table;
+               struct rps_dev_flow *old_rflow;
+               u32 flow_id;
+               u16 rxq_index;
+               int rc;
+
+               /* Should we steer this flow to a different hardware queue? */
+               if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
+                   !(dev->features & NETIF_F_NTUPLE))
+                       goto out;
+               rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
+               if (rxq_index == skb_get_rx_queue(skb))
+                       goto out;
+
+               rxqueue = dev->_rx + rxq_index;
+               flow_table = rcu_dereference(rxqueue->rps_flow_table);
+               if (!flow_table)
+                       goto out;
+               flow_id = skb->rxhash & flow_table->mask;
+               rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+                                                       rxq_index, flow_id);
+               if (rc < 0)
+                       goto out;
+               old_rflow = rflow;
+               rflow = &flow_table->flows[flow_id];
+               rflow->cpu = next_cpu;
+               rflow->filter = rc;
+               if (old_rflow->filter == rflow->filter)
+                       old_rflow->filter = RPS_NO_FILTER;
+       out:
+#endif
+               rflow->last_qtail =
+                       per_cpu(softnet_data, tcpu).input_queue_head;
+       }
+
+       return rflow;
+}
+
 /*
  * get_rps_cpu is called from netif_receive_skb and returns the target
  * CPU from the RPS map of the receiving queue for a given skb.
@@ -2615,12 +2720,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                if (unlikely(tcpu != next_cpu) &&
                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
-                     rflow->last_qtail)) >= 0)) {
-                       tcpu = rflow->cpu = next_cpu;
-                       if (tcpu != RPS_NO_CPU)
-                               rflow->last_qtail = per_cpu(softnet_data,
-                                   tcpu).input_queue_head;
-               }
+                     rflow->last_qtail)) >= 0))
+                       rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+
                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
                        *rflowp = rflow;
                        cpu = tcpu;
@@ -2641,6 +2743,46 @@ done:
        return cpu;
 }
 
+#ifdef CONFIG_RFS_ACCEL
+
+/**
+ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
+ * @dev: Device on which the filter was set
+ * @rxq_index: RX queue index
+ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
+ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
+ *
+ * Drivers that implement ndo_rx_flow_steer() should periodically call
+ * this function for each installed filter and remove the filters for
+ * which it returns %true.
+ */
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+                        u32 flow_id, u16 filter_id)
+{
+       struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
+       struct rps_dev_flow_table *flow_table;
+       struct rps_dev_flow *rflow;
+       bool expire = true;
+       int cpu;
+
+       rcu_read_lock();
+       flow_table = rcu_dereference(rxqueue->rps_flow_table);
+       if (flow_table && flow_id <= flow_table->mask) {
+               rflow = &flow_table->flows[flow_id];
+               cpu = ACCESS_ONCE(rflow->cpu);
+               if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+                   ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+                          rflow->last_qtail) <
+                    (int)(10 * flow_table->mask)))
+                       expire = false;
+       }
+       rcu_read_unlock();
+       return expire;
+}
+EXPORT_SYMBOL(rps_may_expire_flow);
+
+#endif /* CONFIG_RFS_ACCEL */
+
 /* Called from hardirq (IPI) context */
 static void rps_trigger_softirq(void *data)
 {
@@ -2962,64 +3104,31 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
 
-static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
-                                             struct net_device *master)
-{
-       if (skb->pkt_type == PACKET_HOST) {
-               u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
-
-               memcpy(dest, master->dev_addr, ETH_ALEN);
-       }
-}
-
-/* On bonding slaves other than the currently active slave, suppress
- * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
- * ARP on active-backup slaves with arp_validate enabled.
- */
-int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+static void vlan_on_bond_hook(struct sk_buff *skb)
 {
-       struct net_device *dev = skb->dev;
-
-       if (master->priv_flags & IFF_MASTER_ARPMON)
-               dev->last_rx = jiffies;
-
-       if ((master->priv_flags & IFF_MASTER_ALB) &&
-           (master->priv_flags & IFF_BRIDGE_PORT)) {
-               /* Do address unmangle. The local destination address
-                * will be always the one master has. Provides the right
-                * functionality in a bridge.
-                */
-               skb_bond_set_mac_by_master(skb, master);
-       }
-
-       if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
-               if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
-                   skb->protocol == __cpu_to_be16(ETH_P_ARP))
-                       return 0;
-
-               if (master->priv_flags & IFF_MASTER_ALB) {
-                       if (skb->pkt_type != PACKET_BROADCAST &&
-                           skb->pkt_type != PACKET_MULTICAST)
-                               return 0;
-               }
-               if (master->priv_flags & IFF_MASTER_8023AD &&
-                   skb->protocol == __cpu_to_be16(ETH_P_SLOW))
-                       return 0;
+       /*
+        * Make sure ARP frames received on VLAN interfaces stacked on
+        * bonding interfaces still make their way to any base bonding
+        * device that may have registered for a specific ptype.
+        */
+       if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
+           vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
+           skb->protocol == htons(ETH_P_ARP)) {
+               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
-               return 1;
+               if (!skb2)
+                       return;
+               skb2->dev = vlan_dev_real_dev(skb->dev);
+               netif_rx(skb2);
        }
-       return 0;
 }
-EXPORT_SYMBOL(__skb_bond_should_drop);
 
 static int __netif_receive_skb(struct sk_buff *skb)
 {
        struct packet_type *ptype, *pt_prev;
        rx_handler_func_t *rx_handler;
        struct net_device *orig_dev;
-       struct net_device *master;
-       struct net_device *null_or_orig;
-       struct net_device *orig_or_bond;
+       struct net_device *null_or_dev;
        int ret = NET_RX_DROP;
        __be16 type;
 
@@ -3034,28 +3143,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
        if (!skb->skb_iif)
                skb->skb_iif = skb->dev->ifindex;
-
-       /*
-        * bonding note: skbs received on inactive slaves should only
-        * be delivered to pkt handlers that are exact matches.  Also
-        * the deliver_no_wcard flag will be set.  If packet handlers
-        * are sensitive to duplicate packets these skbs will need to
-        * be dropped at the handler.
-        */
-       null_or_orig = NULL;
        orig_dev = skb->dev;
-       master = ACCESS_ONCE(orig_dev->master);
-       if (skb->deliver_no_wcard)
-               null_or_orig = orig_dev;
-       else if (master) {
-               if (skb_bond_should_drop(skb, master)) {
-                       skb->deliver_no_wcard = 1;
-                       null_or_orig = orig_dev; /* deliver only exact match */
-               } else
-                       skb->dev = master;
-       }
 
-       __this_cpu_inc(softnet_data.processed);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
        skb->mac_len = skb->network_header - skb->mac_header;
@@ -3064,6 +3153,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
        rcu_read_lock();
 
+another_round:
+
+       __this_cpu_inc(softnet_data.processed);
+
 #ifdef CONFIG_NET_CLS_ACT
        if (skb->tc_verd & TC_NCLS) {
                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3072,8 +3165,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
 #endif
 
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
-                   ptype->dev == orig_dev) {
+               if (!ptype->dev || ptype->dev == skb->dev) {
                        if (pt_prev)
                                ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = ptype;
@@ -3087,16 +3179,20 @@ static int __netif_receive_skb(struct sk_buff *skb)
 ncls:
 #endif
 
-       /* Handle special case of bridge or macvlan */
        rx_handler = rcu_dereference(skb->dev->rx_handler);
        if (rx_handler) {
+               struct net_device *prev_dev;
+
                if (pt_prev) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }
+               prev_dev = skb->dev;
                skb = rx_handler(skb);
                if (!skb)
                        goto out;
+               if (skb->dev != prev_dev)
+                       goto another_round;
        }
 
        if (vlan_tx_tag_present(skb)) {
@@ -3111,24 +3207,17 @@ ncls:
                        goto out;
        }
 
-       /*
-        * Make sure frames received on VLAN interfaces stacked on
-        * bonding interfaces still make their way to any base bonding
-        * device that may have registered for a specific ptype.  The
-        * handler may have to adjust skb->dev and orig_dev.
-        */
-       orig_or_bond = orig_dev;
-       if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
-           (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
-               orig_or_bond = vlan_dev_real_dev(skb->dev);
-       }
+       vlan_on_bond_hook(skb);
+
+       /* deliver only exact match when indicated */
+       null_or_dev = skb->deliver_no_wcard ? skb->dev : NULL;
 
        type = skb->protocol;
        list_for_each_entry_rcu(ptype,
                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-               if (ptype->type == type && (ptype->dev == null_or_orig ||
-                    ptype->dev == skb->dev || ptype->dev == orig_dev ||
-                    ptype->dev == orig_or_bond)) {
+               if (ptype->type == type &&
+                   (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
+                    ptype->dev == orig_dev)) {
                        if (pt_prev)
                                ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = ptype;
@@ -3925,12 +4014,15 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       struct net_device *dev = (v == SEQ_START_TOKEN) ?
-                                 first_net_device(seq_file_net(seq)) :
-                                 next_net_device((struct net_device *)v);
+       struct net_device *dev = v;
+
+       if (v == SEQ_START_TOKEN)
+               dev = first_net_device_rcu(seq_file_net(seq));
+       else
+               dev = next_net_device_rcu(dev);
 
        ++*pos;
-       return rcu_dereference(dev);
+       return dev;
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4214,15 +4306,14 @@ static int __init dev_proc_init(void)
 
 
 /**
- *     netdev_set_master       -       set up master/slave pair
+ *     netdev_set_master       -       set up master pointer
  *     @slave: slave device
  *     @master: new master device
  *
  *     Changes the master device of the slave. Pass %NULL to break the
  *     bonding. The caller must hold the RTNL semaphore. On a failure
  *     a negative errno code is returned. On success the reference counts
- *     are adjusted, %RTM_NEWLINK is sent to the routing socket and the
- *     function returns zero.
+ *     are adjusted and the function returns zero.
  */
 int netdev_set_master(struct net_device *slave, struct net_device *master)
 {
@@ -4242,6 +4333,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
                synchronize_net();
                dev_put(old);
        }
+       return 0;
+}
+EXPORT_SYMBOL(netdev_set_master);
+
+/**
+ *     netdev_set_bond_master  -       set up bonding master/slave pair
+ *     @slave: slave device
+ *     @master: new master device
+ *
+ *     Changes the master device of the slave. Pass %NULL to break the
+ *     bonding. The caller must hold the RTNL semaphore. On a failure
+ *     a negative errno code is returned. On success %RTM_NEWLINK is sent
+ *     to the routing socket and the function returns zero.
+ */
+int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
+{
+       int err;
+
+       ASSERT_RTNL();
+
+       err = netdev_set_master(slave, master);
+       if (err)
+               return err;
        if (master)
                slave->flags |= IFF_SLAVE;
        else
@@ -4250,7 +4364,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
        rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
        return 0;
 }
-EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_set_bond_master);
 
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
@@ -4586,6 +4700,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(dev_set_mtu);
 
+/**
+ *     dev_set_group - Change group this device belongs to
+ *     @dev: device
+ *     @new_group: group this device should belong to
+ */
+void dev_set_group(struct net_device *dev, int new_group)
+{
+       dev->group = new_group;
+}
+EXPORT_SYMBOL(dev_set_group);
+
 /**
  *     dev_set_mac_address - Change Media Access Control Address
  *     @dev: device
@@ -5077,41 +5202,55 @@ static void rollback_registered(struct net_device *dev)
        list_del(&single);
 }
 
-unsigned long netdev_fix_features(unsigned long features, const char *name)
+u32 netdev_fix_features(struct net_device *dev, u32 features)
 {
+       /* Fix illegal checksum combinations */
+       if ((features & NETIF_F_HW_CSUM) &&
+           (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+               netdev_info(dev, "mixed HW and IP checksum settings.\n");
+               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+       }
+
+       if ((features & NETIF_F_NO_CSUM) &&
+           (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+               netdev_info(dev, "mixed no checksumming and other settings.\n");
+               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+       }
+
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
-               if (name)
-                       printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
-                              "checksum feature.\n", name);
+               netdev_info(dev,
+                           "Dropping NETIF_F_SG since no checksum feature.\n");
                features &= ~NETIF_F_SG;
        }
 
        /* TSO requires that SG is present as well. */
        if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
-               if (name)
-                       printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
-                              "SG feature.\n", name);
+               netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
                features &= ~NETIF_F_TSO;
        }
 
+       /* Software GSO depends on SG. */
+       if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
+               netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+               features &= ~NETIF_F_GSO;
+       }
+
+       /* UFO needs SG and checksumming */
        if (features & NETIF_F_UFO) {
                /* maybe split UFO into V4 and V6? */
                if (!((features & NETIF_F_GEN_CSUM) ||
                    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
                            == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-                       if (name)
-                               printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-                                      "since no checksum offload features.\n",
-                                      name);
+                       netdev_info(dev,
+                               "Dropping NETIF_F_UFO since no checksum offload features.\n");
                        features &= ~NETIF_F_UFO;
                }
 
                if (!(features & NETIF_F_SG)) {
-                       if (name)
-                               printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-                                      "since no NETIF_F_SG feature.\n", name);
+                       netdev_info(dev,
+                               "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
                        features &= ~NETIF_F_UFO;
                }
        }
@@ -5120,6 +5259,37 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
 }
 EXPORT_SYMBOL(netdev_fix_features);
 
+void netdev_update_features(struct net_device *dev)
+{
+       u32 features;
+       int err = 0;
+
+       features = netdev_get_wanted_features(dev);
+
+       if (dev->netdev_ops->ndo_fix_features)
+               features = dev->netdev_ops->ndo_fix_features(dev, features);
+
+       /* driver might be less strict about feature dependencies */
+       features = netdev_fix_features(dev, features);
+
+       if (dev->features == features)
+               return;
+
+       netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
+               dev->features, features);
+
+       if (dev->netdev_ops->ndo_set_features)
+               err = dev->netdev_ops->ndo_set_features(dev, features);
+
+       if (!err)
+               dev->features = features;
+       else if (err < 0)
+               netdev_err(dev,
+                       "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
+                       err, features, dev->features);
+}
+EXPORT_SYMBOL(netdev_update_features);
+
 /**
  *     netif_stacked_transfer_operstate -      transfer operstate
  *     @rootdev: the root or lower level device to transfer state from
@@ -5254,27 +5424,19 @@ int register_netdevice(struct net_device *dev)
        if (dev->iflink == -1)
                dev->iflink = dev->ifindex;
 
-       /* Fix illegal checksum combinations */
-       if ((dev->features & NETIF_F_HW_CSUM) &&
-           (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
-                      dev->name);
-               dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
-       }
+       /* Transfer changeable features to wanted_features and enable
+        * software offloads (GSO and GRO).
+        */
+       dev->hw_features |= NETIF_F_SOFT_FEATURES;
+       dev->features |= NETIF_F_SOFT_FEATURES;
+       dev->wanted_features = dev->features & dev->hw_features;
 
-       if ((dev->features & NETIF_F_NO_CSUM) &&
-           (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
-                      dev->name);
-               dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+       /* Avoid warning from netdev_fix_features() for GSO without SG */
+       if (!(dev->wanted_features & NETIF_F_SG)) {
+               dev->wanted_features &= ~NETIF_F_GSO;
+               dev->features &= ~NETIF_F_GSO;
        }
 
-       dev->features = netdev_fix_features(dev->features, dev->name);
-
-       /* Enable software GSO if SG is supported. */
-       if (dev->features & NETIF_F_SG)
-               dev->features |= NETIF_F_GSO;
-
        /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
         * vlan_dev_init() will do the dev->features check, so these features
         * are enabled only if supported by underlying device.
@@ -5291,6 +5453,8 @@ int register_netdevice(struct net_device *dev)
                goto err_uninit;
        dev->reg_state = NETREG_REGISTERED;
 
+       netdev_update_features(dev);
+
        /*
         *      Default initial state at registry is that the
         *      device is present.
@@ -5695,6 +5859,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 #endif
 
        strcpy(dev->name, name);
+       dev->group = INIT_NETDEV_GROUP;
        return dev;
 
 free_all:
@@ -6009,8 +6174,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  *     @one to the master device with current feature set @all.  Will not
  *     enable anything that is off in @mask. Returns the new feature set.
  */
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
-                                       unsigned long mask)
+u32 netdev_increment_features(u32 all, u32 one, u32 mask)
 {
        /* If device needs checksumming, downgrade to it. */
        if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
index b99c7c7ffce2b72b0089ba8c53d4de6e5b131059..91104d35de7d9d6f7d276e8ae02b833e5ddc4338 100644 (file)
@@ -164,7 +164,9 @@ int dst_discard(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dst_discard);
 
-void *dst_alloc(struct dst_ops *ops)
+const u32 dst_default_metrics[RTAX_MAX];
+
+void *dst_alloc(struct dst_ops *ops, int initial_ref)
 {
        struct dst_entry *dst;
 
@@ -175,11 +177,12 @@ void *dst_alloc(struct dst_ops *ops)
        dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
        if (!dst)
                return NULL;
-       atomic_set(&dst->__refcnt, 0);
+       atomic_set(&dst->__refcnt, initial_ref);
        dst->ops = ops;
        dst->lastuse = jiffies;
        dst->path = dst;
        dst->input = dst->output = dst_discard;
+       dst_init_metrics(dst, dst_default_metrics, true);
 #if RT_CACHE_DEBUG >= 2
        atomic_inc(&dst_total);
 #endif
@@ -282,6 +285,42 @@ void dst_release(struct dst_entry *dst)
 }
 EXPORT_SYMBOL(dst_release);
 
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+       u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+
+       if (p) {
+               u32 *old_p = __DST_METRICS_PTR(old);
+               unsigned long prev, new;
+
+               memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+               new = (unsigned long) p;
+               prev = cmpxchg(&dst->_metrics, old, new);
+
+               if (prev != old) {
+                       kfree(p);
+                       p = __DST_METRICS_PTR(prev);
+                       if (prev & DST_METRICS_READ_ONLY)
+                               p = NULL;
+               }
+       }
+       return p;
+}
+EXPORT_SYMBOL(dst_cow_metrics_generic);
+
+/* Caller asserts that dst_metrics_read_only(dst) is false.  */
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+       unsigned long prev, new;
+
+       new = (unsigned long) dst_default_metrics;
+       prev = cmpxchg(&dst->_metrics, old, new);
+       if (prev == old)
+               kfree(__DST_METRICS_PTR(old));
+}
+EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+
 /**
  * skb_dst_set_noref - sets skb dst, without a reference
  * @skb: buffer
index ff2302910b5eb936c7618c24477c018cfdcd159f..c1a71bb738da4db6c8cd62b75d36b5d091316fd8 100644 (file)
@@ -34,12 +34,6 @@ u32 ethtool_op_get_link(struct net_device *dev)
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
-u32 ethtool_op_get_rx_csum(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_rx_csum);
-
 u32 ethtool_op_get_tx_csum(struct net_device *dev)
 {
        return (dev->features & NETIF_F_ALL_CSUM) != 0;
@@ -55,6 +49,7 @@ int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
 
        return 0;
 }
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
 
 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
 {
@@ -171,6 +166,381 @@ EXPORT_SYMBOL(ethtool_ntuple_flush);
 
 /* Handlers for each ethtool command */
 
+#define ETHTOOL_DEV_FEATURE_WORDS      1
+
+static void ethtool_get_features_compat(struct net_device *dev,
+       struct ethtool_get_features_block *features)
+{
+       if (!dev->ethtool_ops)
+               return;
+
+       /* getting RX checksum */
+       if (dev->ethtool_ops->get_rx_csum)
+               if (dev->ethtool_ops->get_rx_csum(dev))
+                       features[0].active |= NETIF_F_RXCSUM;
+
+       /* mark legacy-changeable features */
+       if (dev->ethtool_ops->set_sg)
+               features[0].available |= NETIF_F_SG;
+       if (dev->ethtool_ops->set_tx_csum)
+               features[0].available |= NETIF_F_ALL_CSUM;
+       if (dev->ethtool_ops->set_tso)
+               features[0].available |= NETIF_F_ALL_TSO;
+       if (dev->ethtool_ops->set_rx_csum)
+               features[0].available |= NETIF_F_RXCSUM;
+       if (dev->ethtool_ops->set_flags)
+               features[0].available |= flags_dup_features;
+}
+
+static int ethtool_set_feature_compat(struct net_device *dev,
+       int (*legacy_set)(struct net_device *, u32),
+       struct ethtool_set_features_block *features, u32 mask)
+{
+       u32 do_set;
+
+       if (!legacy_set)
+               return 0;
+
+       if (!(features[0].valid & mask))
+               return 0;
+
+       features[0].valid &= ~mask;
+
+       do_set = !!(features[0].requested & mask);
+
+       if (legacy_set(dev, do_set) < 0)
+               netdev_info(dev,
+                       "Legacy feature change (%s) failed for 0x%08x\n",
+                       do_set ? "set" : "clear", mask);
+
+       return 1;
+}
+
+static int ethtool_set_features_compat(struct net_device *dev,
+       struct ethtool_set_features_block *features)
+{
+       int compat;
+
+       if (!dev->ethtool_ops)
+               return 0;
+
+       compat  = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
+               features, NETIF_F_SG);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
+               features, NETIF_F_ALL_CSUM);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
+               features, NETIF_F_ALL_TSO);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
+               features, NETIF_F_RXCSUM);
+       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
+               features, flags_dup_features);
+
+       return compat;
+}
+
+static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
+{
+       struct ethtool_gfeatures cmd = {
+               .cmd = ETHTOOL_GFEATURES,
+               .size = ETHTOOL_DEV_FEATURE_WORDS,
+       };
+       struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
+               {
+                       .available = dev->hw_features,
+                       .requested = dev->wanted_features,
+                       .active = dev->features,
+                       .never_changed = NETIF_F_NEVER_CHANGE,
+               },
+       };
+       u32 __user *sizeaddr;
+       u32 copy_size;
+
+       ethtool_get_features_compat(dev, features);
+
+       sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
+       if (get_user(copy_size, sizeaddr))
+               return -EFAULT;
+
+       if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
+               copy_size = ETHTOOL_DEV_FEATURE_WORDS;
+
+       if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+               return -EFAULT;
+       useraddr += sizeof(cmd);
+       if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
+{
+       struct ethtool_sfeatures cmd;
+       struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
+       int ret = 0;
+
+       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+               return -EFAULT;
+       useraddr += sizeof(cmd);
+
+       if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
+               return -EINVAL;
+
+       if (copy_from_user(features, useraddr, sizeof(features)))
+               return -EFAULT;
+
+       if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
+               return -EINVAL;
+
+       if (ethtool_set_features_compat(dev, features))
+               ret |= ETHTOOL_F_COMPAT;
+
+       if (features[0].valid & ~dev->hw_features) {
+               features[0].valid &= dev->hw_features;
+               ret |= ETHTOOL_F_UNSUPPORTED;
+       }
+
+       dev->wanted_features &= ~features[0].valid;
+       dev->wanted_features |= features[0].valid & features[0].requested;
+       netdev_update_features(dev);
+
+       if ((dev->wanted_features ^ dev->features) & features[0].valid)
+               ret |= ETHTOOL_F_WISH;
+
+       return ret;
+}
+
+static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
+       /* NETIF_F_SG */              "tx-scatter-gather",
+       /* NETIF_F_IP_CSUM */         "tx-checksum-ipv4",
+       /* NETIF_F_NO_CSUM */         "tx-checksum-unneeded",
+       /* NETIF_F_HW_CSUM */         "tx-checksum-ip-generic",
+       /* NETIF_F_IPV6_CSUM */       "tx_checksum-ipv6",
+       /* NETIF_F_HIGHDMA */         "highdma",
+       /* NETIF_F_FRAGLIST */        "tx-scatter-gather-fraglist",
+       /* NETIF_F_HW_VLAN_TX */      "tx-vlan-hw-insert",
+
+       /* NETIF_F_HW_VLAN_RX */      "rx-vlan-hw-parse",
+       /* NETIF_F_HW_VLAN_FILTER */  "rx-vlan-filter",
+       /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
+       /* NETIF_F_GSO */             "tx-generic-segmentation",
+       /* NETIF_F_LLTX */            "tx-lockless",
+       /* NETIF_F_NETNS_LOCAL */     "netns-local",
+       /* NETIF_F_GRO */             "rx-gro",
+       /* NETIF_F_LRO */             "rx-lro",
+
+       /* NETIF_F_TSO */             "tx-tcp-segmentation",
+       /* NETIF_F_UFO */             "tx-udp-fragmentation",
+       /* NETIF_F_GSO_ROBUST */      "tx-gso-robust",
+       /* NETIF_F_TSO_ECN */         "tx-tcp-ecn-segmentation",
+       /* NETIF_F_TSO6 */            "tx-tcp6-segmentation",
+       /* NETIF_F_FSO */             "tx-fcoe-segmentation",
+       "",
+       "",
+
+       /* NETIF_F_FCOE_CRC */        "tx-checksum-fcoe-crc",
+       /* NETIF_F_SCTP_CSUM */       "tx-checksum-sctp",
+       /* NETIF_F_FCOE_MTU */        "fcoe-mtu",
+       /* NETIF_F_NTUPLE */          "rx-ntuple-filter",
+       /* NETIF_F_RXHASH */          "rx-hashing",
+       /* NETIF_F_RXCSUM */          "rx-checksum",
+       "",
+       "",
+};
+
+static int __ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+
+       if (sset == ETH_SS_FEATURES)
+               return ARRAY_SIZE(netdev_features_strings);
+
+       if (ops && ops->get_sset_count && ops->get_strings)
+               return ops->get_sset_count(dev, sset);
+       else
+               return -EOPNOTSUPP;
+}
+
+static void __ethtool_get_strings(struct net_device *dev,
+       u32 stringset, u8 *data)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+
+       if (stringset == ETH_SS_FEATURES)
+               memcpy(data, netdev_features_strings,
+                       sizeof(netdev_features_strings));
+       else
+               /* ops->get_strings is valid because checked earlier */
+               ops->get_strings(dev, stringset, data);
+}
+
+static u32 ethtool_get_feature_mask(u32 eth_cmd)
+{
+       /* feature masks of legacy discrete ethtool ops */
+
+       switch (eth_cmd) {
+       case ETHTOOL_GTXCSUM:
+       case ETHTOOL_STXCSUM:
+               return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
+       case ETHTOOL_GRXCSUM:
+       case ETHTOOL_SRXCSUM:
+               return NETIF_F_RXCSUM;
+       case ETHTOOL_GSG:
+       case ETHTOOL_SSG:
+               return NETIF_F_SG;
+       case ETHTOOL_GTSO:
+       case ETHTOOL_STSO:
+               return NETIF_F_ALL_TSO;
+       case ETHTOOL_GUFO:
+       case ETHTOOL_SUFO:
+               return NETIF_F_UFO;
+       case ETHTOOL_GGSO:
+       case ETHTOOL_SGSO:
+               return NETIF_F_GSO;
+       case ETHTOOL_GGRO:
+       case ETHTOOL_SGRO:
+               return NETIF_F_GRO;
+       default:
+               BUG();
+       }
+}
+
+static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+
+       if (!ops)
+               return NULL;
+
+       switch (ethcmd) {
+       case ETHTOOL_GTXCSUM:
+               return ops->get_tx_csum;
+       case ETHTOOL_GRXCSUM:
+               return ops->get_rx_csum;
+       case ETHTOOL_SSG:
+               return ops->get_sg;
+       case ETHTOOL_STSO:
+               return ops->get_tso;
+       case ETHTOOL_SUFO:
+               return ops->get_ufo;
+       default:
+               return NULL;
+       }
+}
+
+static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
+{
+       return !!(dev->features & NETIF_F_ALL_CSUM);
+}
+
+static int ethtool_get_one_feature(struct net_device *dev,
+       char __user *useraddr, u32 ethcmd)
+{
+       u32 mask = ethtool_get_feature_mask(ethcmd);
+       struct ethtool_value edata = {
+               .cmd = ethcmd,
+               .data = !!(dev->features & mask),
+       };
+
+       /* compatibility with discrete get_ ops */
+       if (!(dev->hw_features & mask)) {
+               u32 (*actor)(struct net_device *);
+
+               actor = __ethtool_get_one_feature_actor(dev, ethcmd);
+
+               /* bug compatibility with old get_rx_csum */
+               if (ethcmd == ETHTOOL_GRXCSUM && !actor)
+                       actor = __ethtool_get_rx_csum_oldbug;
+
+               if (actor)
+                       edata.data = actor(dev);
+       }
+
+       if (copy_to_user(useraddr, &edata, sizeof(edata)))
+               return -EFAULT;
+       return 0;
+}
+
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_sg(struct net_device *dev, u32 data);
+static int __ethtool_set_tso(struct net_device *dev, u32 data);
+static int __ethtool_set_ufo(struct net_device *dev, u32 data);
+
+static int ethtool_set_one_feature(struct net_device *dev,
+       void __user *useraddr, u32 ethcmd)
+{
+       struct ethtool_value edata;
+       u32 mask;
+
+       if (copy_from_user(&edata, useraddr, sizeof(edata)))
+               return -EFAULT;
+
+       mask = ethtool_get_feature_mask(ethcmd);
+       mask &= dev->hw_features;
+       if (mask) {
+               if (edata.data)
+                       dev->wanted_features |= mask;
+               else
+                       dev->wanted_features &= ~mask;
+
+               netdev_update_features(dev);
+               return 0;
+       }
+
+       /* Driver is not converted to ndo_fix_features or does not
+        * support changing this offload. In the latter case it won't
+        * have corresponding ethtool_ops field set.
+        *
+        * Following part is to be removed after all drivers advertise
+        * their changeable features in netdev->hw_features and stop
+        * using discrete offload setting ops.
+        */
+
+       switch (ethcmd) {
+       case ETHTOOL_STXCSUM:
+               return __ethtool_set_tx_csum(dev, edata.data);
+       case ETHTOOL_SRXCSUM:
+               return __ethtool_set_rx_csum(dev, edata.data);
+       case ETHTOOL_SSG:
+               return __ethtool_set_sg(dev, edata.data);
+       case ETHTOOL_STSO:
+               return __ethtool_set_tso(dev, edata.data);
+       case ETHTOOL_SUFO:
+               return __ethtool_set_ufo(dev, edata.data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
+{
+       u32 changed;
+
+       if (data & ~flags_dup_features)
+               return -EINVAL;
+
+       /* legacy set_flags() op */
+       if (dev->ethtool_ops->set_flags) {
+               if (unlikely(dev->hw_features & flags_dup_features))
+                       netdev_warn(dev,
+                               "driver BUG: mixed hw_features and set_flags()\n");
+               return dev->ethtool_ops->set_flags(dev, data);
+       }
+
+       /* allow changing only bits set in hw_features */
+       changed = (data ^ dev->wanted_features) & flags_dup_features;
+       if (changed & ~dev->hw_features)
+               return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
+
+       dev->wanted_features =
+               (dev->wanted_features & ~changed) | data;
+
+       netdev_update_features(dev);
+
+       return 0;
+}
+
 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
@@ -251,14 +621,10 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
                                                    void __user *useraddr)
 {
        struct ethtool_sset_info info;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
        u64 sset_mask;
        int i, idx = 0, n_bits = 0, ret, rc;
        u32 *info_buf = NULL;
 
-       if (!ops->get_sset_count)
-               return -EOPNOTSUPP;
-
        if (copy_from_user(&info, useraddr, sizeof(info)))
                return -EFAULT;
 
@@ -285,7 +651,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
                if (!(sset_mask & (1ULL << i)))
                        continue;
 
-               rc = ops->get_sset_count(dev, i);
+               rc = __ethtool_get_sset_count(dev, i);
                if (rc >= 0) {
                        info.sset_mask |= (1ULL << i);
                        info_buf[idx++] = rc;
@@ -1091,6 +1457,9 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
 {
        int err;
 
+       if (data && !(dev->features & NETIF_F_ALL_CSUM))
+               return -EINVAL;
+
        if (!data && dev->ethtool_ops->set_tso) {
                err = dev->ethtool_ops->set_tso(dev, 0);
                if (err)
@@ -1105,145 +1474,55 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
        return dev->ethtool_ops->set_sg(dev, data);
 }
 
-static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
        int err;
 
        if (!dev->ethtool_ops->set_tx_csum)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (!edata.data && dev->ethtool_ops->set_sg) {
+       if (!data && dev->ethtool_ops->set_sg) {
                err = __ethtool_set_sg(dev, 0);
                if (err)
                        return err;
        }
 
-       return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+       return dev->ethtool_ops->set_tx_csum(dev, data);
 }
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
 
-static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
-
        if (!dev->ethtool_ops->set_rx_csum)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (!edata.data && dev->ethtool_ops->set_sg)
+       if (!data)
                dev->features &= ~NETIF_F_GRO;
 
-       return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+       return dev->ethtool_ops->set_rx_csum(dev, data);
 }
 
-static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tso(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
-
-       if (!dev->ethtool_ops->set_sg)
-               return -EOPNOTSUPP;
-
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (edata.data &&
-           !(dev->features & NETIF_F_ALL_CSUM))
-               return -EINVAL;
-
-       return __ethtool_set_sg(dev, edata.data);
-}
-
-static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata;
-
        if (!dev->ethtool_ops->set_tso)
                return -EOPNOTSUPP;
 
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (edata.data && !(dev->features & NETIF_F_SG))
+       if (data && !(dev->features & NETIF_F_SG))
                return -EINVAL;
 
-       return dev->ethtool_ops->set_tso(dev, edata.data);
+       return dev->ethtool_ops->set_tso(dev, data);
 }
 
-static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_ufo(struct net_device *dev, u32 data)
 {
-       struct ethtool_value edata;
-
        if (!dev->ethtool_ops->set_ufo)
                return -EOPNOTSUPP;
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-       if (edata.data && !(dev->features & NETIF_F_SG))
+       if (data && !(dev->features & NETIF_F_SG))
                return -EINVAL;
-       if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+       if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
                (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
                        == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
                return -EINVAL;
-       return dev->ethtool_ops->set_ufo(dev, edata.data);
-}
-
-static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata = { ETHTOOL_GGSO };
-
-       edata.data = dev->features & NETIF_F_GSO;
-       if (copy_to_user(useraddr, &edata, sizeof(edata)))
-               return -EFAULT;
-       return 0;
-}
-
-static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata;
-
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-       if (edata.data)
-               dev->features |= NETIF_F_GSO;
-       else
-               dev->features &= ~NETIF_F_GSO;
-       return 0;
-}
-
-static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata = { ETHTOOL_GGRO };
-
-       edata.data = dev->features & NETIF_F_GRO;
-       if (copy_to_user(useraddr, &edata, sizeof(edata)))
-               return -EFAULT;
-       return 0;
-}
-
-static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
-{
-       struct ethtool_value edata;
-
-       if (copy_from_user(&edata, useraddr, sizeof(edata)))
-               return -EFAULT;
-
-       if (edata.data) {
-               u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
-                               dev->ethtool_ops->get_rx_csum(dev) :
-                               ethtool_op_get_rx_csum(dev);
-
-               if (!rxcsum)
-                       return -EINVAL;
-               dev->features |= NETIF_F_GRO;
-       } else
-               dev->features &= ~NETIF_F_GRO;
-
-       return 0;
+       return dev->ethtool_ops->set_ufo(dev, data);
 }
 
 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
@@ -1287,17 +1566,13 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_gstrings gstrings;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
        u8 *data;
        int ret;
 
-       if (!ops->get_strings || !ops->get_sset_count)
-               return -EOPNOTSUPP;
-
        if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
                return -EFAULT;
 
-       ret = ops->get_sset_count(dev, gstrings.string_set);
+       ret = __ethtool_get_sset_count(dev, gstrings.string_set);
        if (ret < 0)
                return ret;
 
@@ -1307,7 +1582,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        if (!data)
                return -ENOMEM;
 
-       ops->get_strings(dev, gstrings.string_set, data);
+       __ethtool_get_strings(dev, gstrings.string_set, data);
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1317,7 +1592,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
                goto out;
        ret = 0;
 
- out:
+out:
        kfree(data);
        return ret;
 }
@@ -1458,7 +1733,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        void __user *useraddr = ifr->ifr_data;
        u32 ethcmd;
        int rc;
-       unsigned long old_features;
+       u32 old_features;
 
        if (!dev || !netif_device_present(dev))
                return -ENODEV;
@@ -1500,6 +1775,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCLSRLCNT:
        case ETHTOOL_GRXCLSRULE:
        case ETHTOOL_GRXCLSRLALL:
+       case ETHTOOL_GFEATURES:
                break;
        default:
                if (!capable(CAP_NET_ADMIN))
@@ -1570,42 +1846,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SPAUSEPARAM:
                rc = ethtool_set_pauseparam(dev, useraddr);
                break;
-       case ETHTOOL_GRXCSUM:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_rx_csum ?
-                                       dev->ethtool_ops->get_rx_csum :
-                                       ethtool_op_get_rx_csum));
-               break;
-       case ETHTOOL_SRXCSUM:
-               rc = ethtool_set_rx_csum(dev, useraddr);
-               break;
-       case ETHTOOL_GTXCSUM:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_tx_csum ?
-                                       dev->ethtool_ops->get_tx_csum :
-                                       ethtool_op_get_tx_csum));
-               break;
-       case ETHTOOL_STXCSUM:
-               rc = ethtool_set_tx_csum(dev, useraddr);
-               break;
-       case ETHTOOL_GSG:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_sg ?
-                                       dev->ethtool_ops->get_sg :
-                                       ethtool_op_get_sg));
-               break;
-       case ETHTOOL_SSG:
-               rc = ethtool_set_sg(dev, useraddr);
-               break;
-       case ETHTOOL_GTSO:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_tso ?
-                                       dev->ethtool_ops->get_tso :
-                                       ethtool_op_get_tso));
-               break;
-       case ETHTOOL_STSO:
-               rc = ethtool_set_tso(dev, useraddr);
-               break;
        case ETHTOOL_TEST:
                rc = ethtool_self_test(dev, useraddr);
                break;
@@ -1621,21 +1861,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GPERMADDR:
                rc = ethtool_get_perm_addr(dev, useraddr);
                break;
-       case ETHTOOL_GUFO:
-               rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_ufo ?
-                                       dev->ethtool_ops->get_ufo :
-                                       ethtool_op_get_ufo));
-               break;
-       case ETHTOOL_SUFO:
-               rc = ethtool_set_ufo(dev, useraddr);
-               break;
-       case ETHTOOL_GGSO:
-               rc = ethtool_get_gso(dev, useraddr);
-               break;
-       case ETHTOOL_SGSO:
-               rc = ethtool_set_gso(dev, useraddr);
-               break;
        case ETHTOOL_GFLAGS:
                rc = ethtool_get_value(dev, useraddr, ethcmd,
                                       (dev->ethtool_ops->get_flags ?
@@ -1643,8 +1868,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
                                        ethtool_op_get_flags));
                break;
        case ETHTOOL_SFLAGS:
-               rc = ethtool_set_value(dev, useraddr,
-                                      dev->ethtool_ops->set_flags);
+               rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
                break;
        case ETHTOOL_GPFLAGS:
                rc = ethtool_get_value(dev, useraddr, ethcmd,
@@ -1666,12 +1890,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SRXCLSRLINS:
                rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
                break;
-       case ETHTOOL_GGRO:
-               rc = ethtool_get_gro(dev, useraddr);
-               break;
-       case ETHTOOL_SGRO:
-               rc = ethtool_set_gro(dev, useraddr);
-               break;
        case ETHTOOL_FLASHDEV:
                rc = ethtool_flash_device(dev, useraddr);
                break;
@@ -1693,6 +1911,30 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SRXFHINDIR:
                rc = ethtool_set_rxfh_indir(dev, useraddr);
                break;
+       case ETHTOOL_GFEATURES:
+               rc = ethtool_get_features(dev, useraddr);
+               break;
+       case ETHTOOL_SFEATURES:
+               rc = ethtool_set_features(dev, useraddr);
+               break;
+       case ETHTOOL_GTXCSUM:
+       case ETHTOOL_GRXCSUM:
+       case ETHTOOL_GSG:
+       case ETHTOOL_GTSO:
+       case ETHTOOL_GUFO:
+       case ETHTOOL_GGSO:
+       case ETHTOOL_GGRO:
+               rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
+               break;
+       case ETHTOOL_STXCSUM:
+       case ETHTOOL_SRXCSUM:
+       case ETHTOOL_SSG:
+       case ETHTOOL_STSO:
+       case ETHTOOL_SUFO:
+       case ETHTOOL_SGSO:
+       case ETHTOOL_SGRO:
+               rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
+               break;
        default:
                rc = -EOPNOTSUPP;
        }
index afc58374ca961a3b5496727d8836824a3bd83b7d..232b1873bb28988069ef1b1642ad8cd7ef689b8f 100644 (file)
@@ -142,14 +142,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        if (err)
                return err;
 
-       rcu_read_lock_bh();
-       filter = rcu_dereference_bh(sk->sk_filter);
+       rcu_read_lock();
+       filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = sk_run_filter(skb, filter->insns);
 
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return err;
 }
index 127c8a7ffd61fbc350fafe503fe692622e16374e..990703b8863b4d0bdb29619350e7d4aec01bde17 100644 (file)
@@ -172,9 +172,9 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
 
 static u32 flow_hash_code(struct flow_cache *fc,
                          struct flow_cache_percpu *fcp,
-                         struct flowi *key)
+                         const struct flowi *key)
 {
-       u32 *k = (u32 *) key;
+       const u32 *k = (const u32 *) key;
 
        return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
                & (flow_cache_hash_size(fc) - 1);
@@ -186,17 +186,17 @@ typedef unsigned long flow_compare_t;
  * important assumptions that we can here, such as alignment and
  * constant size.
  */
-static int flow_key_compare(struct flowi *key1, struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
 {
-       flow_compare_t *k1, *k1_lim, *k2;
+       const flow_compare_t *k1, *k1_lim, *k2;
        const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
 
        BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 
-       k1 = (flow_compare_t *) key1;
+       k1 = (const flow_compare_t *) key1;
        k1_lim = k1 + n_elem;
 
-       k2 = (flow_compare_t *) key2;
+       k2 = (const flow_compare_t *) key2;
 
        do {
                if (*k1++ != *k2++)
@@ -207,7 +207,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
 }
 
 struct flow_cache_object *
-flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                  flow_resolve_t resolver, void *ctx)
 {
        struct flow_cache *fc = &flow_cache_global;
index 60a902913429c60ce56a77ba779fb70e65247414..799f06e03a220d4f611cef6f18551f4f58994450 100644 (file)
@@ -316,7 +316,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
 {
        size_t size = entries * sizeof(struct neighbour *);
        struct neigh_hash_table *ret;
-       struct neighbour **buckets;
+       struct neighbour __rcu **buckets;
 
        ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
        if (!ret)
@@ -324,14 +324,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
        if (size <= PAGE_SIZE)
                buckets = kzalloc(size, GFP_ATOMIC);
        else
-               buckets = (struct neighbour **)
+               buckets = (struct neighbour __rcu **)
                          __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
                                           get_order(size));
        if (!buckets) {
                kfree(ret);
                return NULL;
        }
-       rcu_assign_pointer(ret->hash_buckets, buckets);
+       ret->hash_buckets = buckets;
        ret->hash_mask = entries - 1;
        get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
        return ret;
@@ -343,7 +343,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
                                                    struct neigh_hash_table,
                                                    rcu);
        size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
-       struct neighbour **buckets = nht->hash_buckets;
+       struct neighbour __rcu **buckets = nht->hash_buckets;
 
        if (size <= PAGE_SIZE)
                kfree(buckets);
@@ -1540,7 +1540,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
                panic("cannot create neighbour proc dir entry");
 #endif
 
-       tbl->nht = neigh_hash_alloc(8);
+       RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
 
        phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
        tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1602,7 +1602,8 @@ int neigh_table_clear(struct neigh_table *tbl)
        }
        write_unlock(&neigh_tbl_lock);
 
-       call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+       call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
+                neigh_hash_free_rcu);
        tbl->nht = NULL;
 
        kfree(tbl->phash_buckets);
index e23c01be5a5bdf27012ab839f05abd5a9366532f..5ceb257e860c18cbabe352f67cb8760d95bcba84 100644 (file)
@@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW(addr_len, fmt_dec);
 NETDEVICE_SHOW(iflink, fmt_dec);
 NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(features, fmt_hex);
 NETDEVICE_SHOW(type, fmt_dec);
 NETDEVICE_SHOW(link_mode, fmt_dec);
 
@@ -295,6 +295,20 @@ static ssize_t show_ifalias(struct device *dev,
        return ret;
 }
 
+NETDEVICE_SHOW(group, fmt_dec);
+
+static int change_group(struct net_device *net, unsigned long new_group)
+{
+       dev_set_group(net, (int) new_group);
+       return 0;
+}
+
+static ssize_t store_group(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t len)
+{
+       return netdev_store(dev, attr, buf, len, change_group);
+}
+
 static struct device_attribute net_class_attributes[] = {
        __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
        __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -316,6 +330,7 @@ static struct device_attribute net_class_attributes[] = {
        __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
        __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
               store_tx_queue_len),
+       __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
        {}
 };
 
index 02dc2cbcbe8652a0dd9e3b514cc96583da27842d..06be2431753e19a2383db73c2f2c3e63a33c6a13 100644 (file)
@@ -193,6 +193,17 @@ void netpoll_poll_dev(struct net_device *dev)
 
        poll_napi(dev);
 
+       if (dev->priv_flags & IFF_SLAVE) {
+               if (dev->npinfo) {
+                       struct net_device *bond_dev = dev->master;
+                       struct sk_buff *skb;
+                       while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+                               skb->dev = bond_dev;
+                               skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+                       }
+               }
+       }
+
        service_arp_queue(dev->npinfo);
 
        zap_completion_queue();
@@ -313,9 +324,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_tx_queue_stopped(txq)) {
-                                       dev->priv_flags |= IFF_IN_NETPOLL;
                                        status = ops->ndo_start_xmit(skb, dev);
-                                       dev->priv_flags &= ~IFF_IN_NETPOLL;
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
                                }
index b5bada92f63704cc87651cdc384c085bc1cbfe66..f0aec6c39ecdc0bf6a3794aa4b5aeb7e058ad05e 100644 (file)
@@ -251,6 +251,7 @@ struct pktgen_dev {
        int max_pkt_size;       /* = ETH_ZLEN; */
        int pkt_overhead;       /* overhead for MPLS, VLANs, IPSEC etc */
        int nfrags;
+       struct page *page;
        u64 delay;              /* nano-seconds */
 
        __u64 count;            /* Default No packets to send */
@@ -1134,6 +1135,10 @@ static ssize_t pktgen_if_write(struct file *file,
                if (node_possible(value)) {
                        pkt_dev->node = value;
                        sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+                       if (pkt_dev->page) {
+                               put_page(pkt_dev->page);
+                               pkt_dev->page = NULL;
+                       }
                }
                else
                        sprintf(pg_result, "ERROR: node not possible");
@@ -2605,6 +2610,90 @@ static inline __be16 build_tci(unsigned int id, unsigned int cfi,
        return htons(id | (cfi << 12) | (prio << 13));
 }
 
+static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+                               int datalen)
+{
+       struct timeval timestamp;
+       struct pktgen_hdr *pgh;
+
+       pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+       datalen -= sizeof(*pgh);
+
+       if (pkt_dev->nfrags <= 0) {
+               pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
+               memset(pgh + 1, 0, datalen);
+       } else {
+               int frags = pkt_dev->nfrags;
+               int i, len;
+
+
+               if (frags > MAX_SKB_FRAGS)
+                       frags = MAX_SKB_FRAGS;
+               len = datalen - frags * PAGE_SIZE;
+               if (len > 0) {
+                       memset(skb_put(skb, len), 0, len);
+                       datalen = frags * PAGE_SIZE;
+               }
+
+               i = 0;
+               while (datalen > 0) {
+                       if (unlikely(!pkt_dev->page)) {
+                               int node = numa_node_id();
+
+                               if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
+                                       node = pkt_dev->node;
+                               pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+                               if (!pkt_dev->page)
+                                       break;
+                       }
+                       skb_shinfo(skb)->frags[i].page = pkt_dev->page;
+                       get_page(pkt_dev->page);
+                       skb_shinfo(skb)->frags[i].page_offset = 0;
+                       skb_shinfo(skb)->frags[i].size =
+                           (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+                       datalen -= skb_shinfo(skb)->frags[i].size;
+                       skb->len += skb_shinfo(skb)->frags[i].size;
+                       skb->data_len += skb_shinfo(skb)->frags[i].size;
+                       i++;
+                       skb_shinfo(skb)->nr_frags = i;
+               }
+
+               while (i < frags) {
+                       int rem;
+
+                       if (i == 0)
+                               break;
+
+                       rem = skb_shinfo(skb)->frags[i - 1].size / 2;
+                       if (rem == 0)
+                               break;
+
+                       skb_shinfo(skb)->frags[i - 1].size -= rem;
+
+                       skb_shinfo(skb)->frags[i] =
+                           skb_shinfo(skb)->frags[i - 1];
+                       get_page(skb_shinfo(skb)->frags[i].page);
+                       skb_shinfo(skb)->frags[i].page =
+                           skb_shinfo(skb)->frags[i - 1].page;
+                       skb_shinfo(skb)->frags[i].page_offset +=
+                           skb_shinfo(skb)->frags[i - 1].size;
+                       skb_shinfo(skb)->frags[i].size = rem;
+                       i++;
+                       skb_shinfo(skb)->nr_frags = i;
+               }
+       }
+
+       /* Stamp the time, and sequence number,
+        * convert them to network byte order
+        */
+       pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+       pgh->seq_num = htonl(pkt_dev->seq_num);
+
+       do_gettimeofday(&timestamp);
+       pgh->tv_sec = htonl(timestamp.tv_sec);
+       pgh->tv_usec = htonl(timestamp.tv_usec);
+}
+
 static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                                        struct pktgen_dev *pkt_dev)
 {
@@ -2613,7 +2702,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        struct udphdr *udph;
        int datalen, iplen;
        struct iphdr *iph;
-       struct pktgen_hdr *pgh = NULL;
        __be16 protocol = htons(ETH_P_IP);
        __be32 *mpls;
        __be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
@@ -2729,76 +2817,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
                           pkt_dev->pkt_overhead);
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
-
-       if (pkt_dev->nfrags <= 0) {
-               pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-               memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
-       } else {
-               int frags = pkt_dev->nfrags;
-               int i, len;
-
-               pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
-               if (frags > MAX_SKB_FRAGS)
-                       frags = MAX_SKB_FRAGS;
-               if (datalen > frags * PAGE_SIZE) {
-                       len = datalen - frags * PAGE_SIZE;
-                       memset(skb_put(skb, len), 0, len);
-                       datalen = frags * PAGE_SIZE;
-               }
-
-               i = 0;
-               while (datalen > 0) {
-                       struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
-                       skb_shinfo(skb)->frags[i].page = page;
-                       skb_shinfo(skb)->frags[i].page_offset = 0;
-                       skb_shinfo(skb)->frags[i].size =
-                           (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
-                       datalen -= skb_shinfo(skb)->frags[i].size;
-                       skb->len += skb_shinfo(skb)->frags[i].size;
-                       skb->data_len += skb_shinfo(skb)->frags[i].size;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-
-               while (i < frags) {
-                       int rem;
-
-                       if (i == 0)
-                               break;
-
-                       rem = skb_shinfo(skb)->frags[i - 1].size / 2;
-                       if (rem == 0)
-                               break;
-
-                       skb_shinfo(skb)->frags[i - 1].size -= rem;
-
-                       skb_shinfo(skb)->frags[i] =
-                           skb_shinfo(skb)->frags[i - 1];
-                       get_page(skb_shinfo(skb)->frags[i].page);
-                       skb_shinfo(skb)->frags[i].page =
-                           skb_shinfo(skb)->frags[i - 1].page;
-                       skb_shinfo(skb)->frags[i].page_offset +=
-                           skb_shinfo(skb)->frags[i - 1].size;
-                       skb_shinfo(skb)->frags[i].size = rem;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-       }
-
-       /* Stamp the time, and sequence number,
-        * convert them to network byte order
-        */
-       if (pgh) {
-               struct timeval timestamp;
-
-               pgh->pgh_magic = htonl(PKTGEN_MAGIC);
-               pgh->seq_num = htonl(pkt_dev->seq_num);
-
-               do_gettimeofday(&timestamp);
-               pgh->tv_sec = htonl(timestamp.tv_sec);
-               pgh->tv_usec = htonl(timestamp.tv_usec);
-       }
+       pktgen_finalize_skb(pkt_dev, skb, datalen);
 
 #ifdef CONFIG_XFRM
        if (!process_ipsec(pkt_dev, skb, protocol))
@@ -2980,7 +2999,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        struct udphdr *udph;
        int datalen;
        struct ipv6hdr *iph;
-       struct pktgen_hdr *pgh = NULL;
        __be16 protocol = htons(ETH_P_IPV6);
        __be32 *mpls;
        __be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
@@ -3083,75 +3101,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
 
-       if (pkt_dev->nfrags <= 0)
-               pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-       else {
-               int frags = pkt_dev->nfrags;
-               int i;
-
-               pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
-               if (frags > MAX_SKB_FRAGS)
-                       frags = MAX_SKB_FRAGS;
-               if (datalen > frags * PAGE_SIZE) {
-                       skb_put(skb, datalen - frags * PAGE_SIZE);
-                       datalen = frags * PAGE_SIZE;
-               }
-
-               i = 0;
-               while (datalen > 0) {
-                       struct page *page = alloc_pages(GFP_KERNEL, 0);
-                       skb_shinfo(skb)->frags[i].page = page;
-                       skb_shinfo(skb)->frags[i].page_offset = 0;
-                       skb_shinfo(skb)->frags[i].size =
-                           (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
-                       datalen -= skb_shinfo(skb)->frags[i].size;
-                       skb->len += skb_shinfo(skb)->frags[i].size;
-                       skb->data_len += skb_shinfo(skb)->frags[i].size;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-
-               while (i < frags) {
-                       int rem;
-
-                       if (i == 0)
-                               break;
-
-                       rem = skb_shinfo(skb)->frags[i - 1].size / 2;
-                       if (rem == 0)
-                               break;
-
-                       skb_shinfo(skb)->frags[i - 1].size -= rem;
-
-                       skb_shinfo(skb)->frags[i] =
-                           skb_shinfo(skb)->frags[i - 1];
-                       get_page(skb_shinfo(skb)->frags[i].page);
-                       skb_shinfo(skb)->frags[i].page =
-                           skb_shinfo(skb)->frags[i - 1].page;
-                       skb_shinfo(skb)->frags[i].page_offset +=
-                           skb_shinfo(skb)->frags[i - 1].size;
-                       skb_shinfo(skb)->frags[i].size = rem;
-                       i++;
-                       skb_shinfo(skb)->nr_frags = i;
-               }
-       }
-
-       /* Stamp the time, and sequence number,
-        * convert them to network byte order
-        * should we update cloned packets too ?
-        */
-       if (pgh) {
-               struct timeval timestamp;
-
-               pgh->pgh_magic = htonl(PKTGEN_MAGIC);
-               pgh->seq_num = htonl(pkt_dev->seq_num);
-
-               do_gettimeofday(&timestamp);
-               pgh->tv_sec = htonl(timestamp.tv_sec);
-               pgh->tv_usec = htonl(timestamp.tv_usec);
-       }
-       /* pkt_dev->seq_num++; FF: you really mean this? */
+       pktgen_finalize_skb(pkt_dev, skb, datalen);
 
        return skb;
 }
@@ -3884,6 +3834,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
        free_SAs(pkt_dev);
 #endif
        vfree(pkt_dev->flows);
+       if (pkt_dev->page)
+               put_page(pkt_dev->page);
        kfree(pkt_dev);
        return 0;
 }
index 2d65c6bb24c10e8734d7e0ee93644c2cd3298b45..49f7ea5b4c7510ca9caeb9d494486fea71c04c9d 100644 (file)
@@ -868,6 +868,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                   netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
        NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
        NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+       NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
 
        if (dev->ifindex != dev->iflink)
                NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
@@ -1035,6 +1036,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_MAP]              = { .len = sizeof(struct rtnl_link_ifmap) },
        [IFLA_MTU]              = { .type = NLA_U32 },
        [IFLA_LINK]             = { .type = NLA_U32 },
+       [IFLA_MASTER]           = { .type = NLA_U32 },
        [IFLA_TXQLEN]           = { .type = NLA_U32 },
        [IFLA_WEIGHT]           = { .type = NLA_U32 },
        [IFLA_OPERSTATE]        = { .type = NLA_U8 },
@@ -1177,6 +1179,41 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
        return err;
 }
 
+static int do_set_master(struct net_device *dev, int ifindex)
+{
+       struct net_device *master_dev;
+       const struct net_device_ops *ops;
+       int err;
+
+       if (dev->master) {
+               if (dev->master->ifindex == ifindex)
+                       return 0;
+               ops = dev->master->netdev_ops;
+               if (ops->ndo_del_slave) {
+                       err = ops->ndo_del_slave(dev->master, dev);
+                       if (err)
+                               return err;
+               } else {
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       if (ifindex) {
+               master_dev = __dev_get_by_index(dev_net(dev), ifindex);
+               if (!master_dev)
+                       return -EINVAL;
+               ops = master_dev->netdev_ops;
+               if (ops->ndo_add_slave) {
+                       err = ops->ndo_add_slave(master_dev, dev);
+                       if (err)
+                               return err;
+               } else {
+                       return -EOPNOTSUPP;
+               }
+       }
+       return 0;
+}
+
 static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                      struct nlattr **tb, char *ifname, int modified)
 {
@@ -1264,6 +1301,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                modified = 1;
        }
 
+       if (tb[IFLA_GROUP]) {
+               dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+               modified = 1;
+       }
+
        /*
         * Interface selected by interface index but interface
         * name provided implies that a name change has been
@@ -1295,6 +1337,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        goto errout;
        }
 
+       if (tb[IFLA_MASTER]) {
+               err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+               if (err)
+                       goto errout;
+               modified = 1;
+       }
+
        if (tb[IFLA_TXQLEN])
                dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
 
@@ -1541,6 +1590,8 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
                set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
        if (tb[IFLA_LINKMODE])
                dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+       if (tb[IFLA_GROUP])
+               dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
 
        return dev;
 
@@ -1551,6 +1602,24 @@ err:
 }
 EXPORT_SYMBOL(rtnl_create_link);
 
+static int rtnl_group_changelink(struct net *net, int group,
+               struct ifinfomsg *ifm,
+               struct nlattr **tb)
+{
+       struct net_device *dev;
+       int err;
+
+       for_each_netdev(net, dev) {
+               if (dev->group == group) {
+                       err = do_setlink(dev, ifm, tb, NULL, 0);
+                       if (err < 0)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct net *net = sock_net(skb->sk);
@@ -1578,10 +1647,12 @@ replay:
        ifm = nlmsg_data(nlh);
        if (ifm->ifi_index > 0)
                dev = __dev_get_by_index(net, ifm->ifi_index);
-       else if (ifname[0])
-               dev = __dev_get_by_name(net, ifname);
-       else
-               dev = NULL;
+       else {
+               if (ifname[0])
+                       dev = __dev_get_by_name(net, ifname);
+               else
+                       dev = NULL;
+       }
 
        err = validate_linkmsg(dev, tb);
        if (err < 0)
@@ -1645,8 +1716,13 @@ replay:
                        return do_setlink(dev, ifm, tb, ifname, modified);
                }
 
-               if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+               if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+                       if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+                               return rtnl_group_changelink(net,
+                                               nla_get_u32(tb[IFLA_GROUP]),
+                                               ifm, tb);
                        return -ENODEV;
+               }
 
                if (ifm->ifi_index)
                        return -EOPNOTSUPP;
index d883dcc78b6b6f10bfa3554fbba654c98526616d..1eb526a848ff572f01d061c84e8475e203e61436 100644 (file)
@@ -2434,8 +2434,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
                        return -ENOMEM;
 
                /* initialize the next frag */
-               sk->sk_sndmsg_page = page;
-               sk->sk_sndmsg_off = 0;
                skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
                skb->truesize += PAGE_SIZE;
                atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
@@ -2455,7 +2453,6 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
                        return -EFAULT;
 
                /* copy was successful so update the size parameters */
-               sk->sk_sndmsg_off += copy;
                frag->size += copy;
                skb->len += copy;
                skb->data_len += copy;
@@ -2498,7 +2495,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  *     a pointer to the first in a list of new skbs for the segments.
  *     In case of error it returns ERR_PTR(err).
  */
-struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
@@ -2508,7 +2505,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
        unsigned int offset = doffset;
        unsigned int headroom;
        unsigned int len;
-       int sg = features & NETIF_F_SG;
+       int sg = !!(features & NETIF_F_SG);
        int nfrags = skb_shinfo(skb)->nr_frags;
        int err = -ENOMEM;
        int i = 0;
index c44348adba3bd22047d6a9cc95afa35ac802e291..118392f3872e669d0c131ddd9bd3394df414848a 100644 (file)
@@ -1224,6 +1224,59 @@ err:
        return err;
 }
 
+static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
+                               int app_nested_type, int app_info_type,
+                               int app_entry_type)
+{
+       struct dcb_peer_app_info info;
+       struct dcb_app *table = NULL;
+       const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+       u16 app_count;
+       int err;
+
+
+       /**
+        * retrieve the peer app configuration form the driver. If the driver
+        * handlers fail exit without doing anything
+        */
+       err = ops->peer_getappinfo(netdev, &info, &app_count);
+       if (!err && app_count) {
+               table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
+               if (!table)
+                       return -ENOMEM;
+
+               err = ops->peer_getapptable(netdev, table);
+       }
+
+       if (!err) {
+               u16 i;
+               struct nlattr *app;
+
+               /**
+                * build the message, from here on the only possible failure
+                * is due to the skb size
+                */
+               err = -EMSGSIZE;
+
+               app = nla_nest_start(skb, app_nested_type);
+               if (!app)
+                       goto nla_put_failure;
+
+               if (app_info_type)
+                       NLA_PUT(skb, app_info_type, sizeof(info), &info);
+
+               for (i = 0; i < app_count; i++)
+                       NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
+                               &table[i]);
+
+               nla_nest_end(skb, app);
+       }
+       err = 0;
+
+nla_put_failure:
+       kfree(table);
+       return err;
+}
 
 /* Handle IEEE 802.1Qaz GET commands. */
 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
@@ -1288,6 +1341,30 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
        spin_unlock(&dcb_lock);
        nla_nest_end(skb, app);
 
+       /* get peer info if available */
+       if (ops->ieee_peer_getets) {
+               struct ieee_ets ets;
+               err = ops->ieee_peer_getets(netdev, &ets);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+       }
+
+       if (ops->ieee_peer_getpfc) {
+               struct ieee_pfc pfc;
+               err = ops->ieee_peer_getpfc(netdev, &pfc);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+       }
+
+       if (ops->peer_getappinfo && ops->peer_getapptable) {
+               err = dcbnl_build_peer_app(netdev, skb,
+                                          DCB_ATTR_IEEE_PEER_APP,
+                                          DCB_ATTR_IEEE_APP_UNSPEC,
+                                          DCB_ATTR_IEEE_APP);
+               if (err)
+                       goto nla_put_failure;
+       }
+
        nla_nest_end(skb, ieee);
        nlmsg_end(skb, nlh);
 
@@ -1441,6 +1518,71 @@ err:
        return ret;
 }
 
+/* Handle CEE DCBX GET commands. */
+static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
+                        u32 pid, u32 seq, u16 flags)
+{
+       struct sk_buff *skb;
+       struct nlmsghdr *nlh;
+       struct dcbmsg *dcb;
+       struct nlattr *cee;
+       const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+       int err;
+
+       if (!ops)
+               return -EOPNOTSUPP;
+
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOBUFS;
+
+       nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+
+       dcb = NLMSG_DATA(nlh);
+       dcb->dcb_family = AF_UNSPEC;
+       dcb->cmd = DCB_CMD_CEE_GET;
+
+       NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
+
+       cee = nla_nest_start(skb, DCB_ATTR_CEE);
+       if (!cee)
+               goto nla_put_failure;
+
+       /* get peer info if available */
+       if (ops->cee_peer_getpg) {
+               struct cee_pg pg;
+               err = ops->cee_peer_getpg(netdev, &pg);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+       }
+
+       if (ops->cee_peer_getpfc) {
+               struct cee_pfc pfc;
+               err = ops->cee_peer_getpfc(netdev, &pfc);
+               if (!err)
+                       NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+       }
+
+       if (ops->peer_getappinfo && ops->peer_getapptable) {
+               err = dcbnl_build_peer_app(netdev, skb,
+                                          DCB_ATTR_CEE_PEER_APP_TABLE,
+                                          DCB_ATTR_CEE_PEER_APP_INFO,
+                                          DCB_ATTR_CEE_PEER_APP);
+               if (err)
+                       goto nla_put_failure;
+       }
+
+       nla_nest_end(skb, cee);
+       nlmsg_end(skb, nlh);
+
+       return rtnl_unicast(skb, &init_net, pid);
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+nlmsg_failure:
+       kfree_skb(skb);
+       return -1;
+}
+
 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
        struct net *net = sock_net(skb->sk);
@@ -1570,6 +1712,10 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
                                       nlh->nlmsg_flags);
                goto out;
+       case DCB_CMD_CEE_GET:
+               ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
+                                   nlh->nlmsg_flags);
+               goto out;
        default:
                goto errout;
        }
index e96d5e810039a5bcdf3a0503534e7d5ebf596047..fadecd20d75bbfc4849b36c87d7f4b675ce283fd 100644 (file)
@@ -583,6 +583,15 @@ done:
        dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+       return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
 {
        struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
index 45a434f94169f13daf1addc73cdb809d77fae213..7882377bc62e94bab0099c90c37f1ad56d9865d4 100644 (file)
@@ -43,9 +43,9 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct inet_sock *inet = inet_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
        const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
+       __be16 orig_sport, orig_dport;
        struct rtable *rt;
        __be32 daddr, nexthop;
-       int tmp;
        int err;
 
        dp->dccps_role = DCCP_ROLE_CLIENT;
@@ -63,12 +63,14 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                nexthop = inet->opt->faddr;
        }
 
-       tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
-                              RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
-                              IPPROTO_DCCP,
-                              inet->inet_sport, usin->sin_port, sk, 1);
-       if (tmp < 0)
-               return tmp;
+       orig_sport = inet->inet_sport;
+       orig_dport = usin->sin_port;
+       rt = ip_route_connect(nexthop, inet->inet_saddr,
+                             RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
+                             IPPROTO_DCCP,
+                             orig_sport, orig_dport, sk, true);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                ip_rt_put(rt);
@@ -99,11 +101,13 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err != 0)
                goto failure;
 
-       err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport,
-                               inet->inet_dport, sk);
-       if (err != 0)
+       rt = ip_route_newports(rt, IPPROTO_DCCP,
+                              orig_sport, orig_dport,
+                              inet->inet_sport, inet->inet_dport, sk);
+       if (IS_ERR(rt)) {
+               rt = NULL;
                goto failure;
-
+       }
        /* OK, now commit destination to socket.  */
        sk_setup_caps(sk, &rt->dst);
 
@@ -471,7 +475,8 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
                          };
 
        security_skb_classify_flow(skb, &fl);
-       if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
+       rt = ip_route_output_flow(net, &fl, sk);
+       if (IS_ERR(rt)) {
                IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
                return NULL;
        }
index dca711df9b60cea9e8ecf259cfdfc3909c60d3bb..5efc57f5e60565086bc3e09b08f2a352633333ab 100644 (file)
@@ -162,15 +162,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                        fl.fl_ip_sport = inet->inet_sport;
                        security_sk_classify_flow(sk, &fl);
 
-                       err = ip6_dst_lookup(sk, &dst, &fl);
-                       if (err) {
-                               sk->sk_err_soft = -err;
-                               goto out;
-                       }
-
-                       err = xfrm_lookup(net, &dst, &fl, sk, 0);
-                       if (err < 0) {
-                               sk->sk_err_soft = -err;
+                       dst = ip6_dst_lookup_flow(sk, &fl, NULL, false);
+                       if (IS_ERR(dst)) {
+                               sk->sk_err_soft = -PTR_ERR(dst);
                                goto out;
                        }
                } else
@@ -267,16 +261,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
 
        final_p = fl6_update_dst(&fl, opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
-               goto done;
-
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0);
-       if (err < 0)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
                goto done;
+       }
 
        skb = dccp_make_response(sk, dst, req);
        if (skb != NULL) {
@@ -338,14 +328,13 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
        security_skb_classify_flow(rxskb, &fl);
 
        /* sk = NULL, but it is safe for now. RST socket required. */
-       if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
-               if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
-                       skb_dst_set(skb, dst);
-                       ip6_xmit(ctl_sk, skb, &fl, NULL);
-                       DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-                       DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
-                       return;
-               }
+       dst = ip6_dst_lookup_flow(ctl_sk, &fl, NULL, false);
+       if (!IS_ERR(dst)) {
+               skb_dst_set(skb, dst);
+               ip6_xmit(ctl_sk, skb, &fl, NULL);
+               DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
+               DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+               return;
        }
 
        kfree_skb(skb);
@@ -484,7 +473,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
        struct inet6_request_sock *ireq6 = inet6_rsk(req);
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct inet_sock *newinet;
-       struct dccp_sock *newdp;
        struct dccp6_sock *newdp6;
        struct sock *newsk;
        struct ipv6_txoptions *opt;
@@ -498,7 +486,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                        return NULL;
 
                newdp6 = (struct dccp6_sock *)newsk;
-               newdp = dccp_sk(newsk);
                newinet = inet_sk(newsk);
                newinet->pinet6 = &newdp6->inet6;
                newnp = inet6_sk(newsk);
@@ -552,13 +539,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                fl.fl_ip_sport = inet_rsk(req)->loc_port;
                security_sk_classify_flow(sk, &fl);
 
-               if (ip6_dst_lookup(sk, &dst, &fl))
-                       goto out;
-
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-               if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+               dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+               if (IS_ERR(dst))
                        goto out;
        }
 
@@ -578,7 +560,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
        newdp6 = (struct dccp6_sock *)newsk;
        newinet = inet_sk(newsk);
        newinet->pinet6 = &newdp6->inet6;
-       newdp = dccp_sk(newsk);
        newnp = inet6_sk(newsk);
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
@@ -982,19 +963,10 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        final_p = fl6_update_dst(&fl, np->opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto failure;
-
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto failure;
        }
 
        if (saddr == NULL) {
index 5e636365d33cb2e99b1077b59ce9e14c4b5c7c02..484fdbf92bd840203f1aa11e7c976a56299d8bb3 100644 (file)
@@ -112,6 +112,7 @@ static int dn_dst_gc(struct dst_ops *ops);
 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
 static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static void dn_dst_destroy(struct dst_entry *);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
@@ -133,11 +134,18 @@ static struct dst_ops dn_dst_ops = {
        .check =                dn_dst_check,
        .default_advmss =       dn_dst_default_advmss,
        .default_mtu =          dn_dst_default_mtu,
+       .cow_metrics =          dst_cow_metrics_generic,
+       .destroy =              dn_dst_destroy,
        .negative_advice =      dn_dst_negative_advice,
        .link_failure =         dn_dst_link_failure,
        .update_pmtu =          dn_dst_update_pmtu,
 };
 
+static void dn_dst_destroy(struct dst_entry *dst)
+{
+       dst_destroy_metrics_generic(dst);
+}
+
 static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
 {
        __u16 tmp = (__u16 __force)(src ^ dst);
@@ -814,14 +822,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
 {
        struct dn_fib_info *fi = res->fi;
        struct net_device *dev = rt->dst.dev;
+       unsigned int mss_metric;
        struct neighbour *n;
-       unsigned int metric;
 
        if (fi) {
                if (DN_FIB_RES_GW(*res) &&
                    DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
                        rt->rt_gateway = DN_FIB_RES_GW(*res);
-               dst_import_metrics(&rt->dst, fi->fib_metrics);
+               dst_init_metrics(&rt->dst, fi->fib_metrics, true);
        }
        rt->rt_type = res->type;
 
@@ -834,10 +842,10 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
 
        if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
                dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
-       metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
-       if (metric) {
+       mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+       if (mss_metric) {
                unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
-               if (metric > mss)
+               if (mss_metric > mss)
                        dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
        }
        return 0;
@@ -1114,7 +1122,7 @@ make_route:
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
-       rt = dst_alloc(&dn_dst_ops);
+       rt = dst_alloc(&dn_dst_ops, 0);
        if (rt == NULL)
                goto e_nobufs;
 
@@ -1214,7 +1222,11 @@ static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int f
 
        err = __dn_route_output_key(pprt, flp, flags);
        if (err == 0 && flp->proto) {
-               err = xfrm_lookup(&init_net, pprt, flp, NULL, 0);
+               *pprt = xfrm_lookup(&init_net, *pprt, flp, NULL, 0);
+               if (IS_ERR(*pprt)) {
+                       err = PTR_ERR(*pprt);
+                       *pprt = NULL;
+               }
        }
        return err;
 }
@@ -1225,8 +1237,13 @@ int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock
 
        err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
        if (err == 0 && fl->proto) {
-               err = xfrm_lookup(&init_net, pprt, fl, sk,
-                                (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT);
+               if (!(flags & MSG_DONTWAIT))
+                       fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+               *pprt = xfrm_lookup(&init_net, *pprt, fl, sk, 0);
+               if (IS_ERR(*pprt)) {
+                       err = PTR_ERR(*pprt);
+                       *pprt = NULL;
+               }
        }
        return err;
 }
@@ -1375,7 +1392,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
        }
 
 make_route:
-       rt = dst_alloc(&dn_dst_ops);
+       rt = dst_alloc(&dn_dst_ops, 0);
        if (rt == NULL)
                goto e_nobufs;
 
index f2abd37556905923097b0f203358a785b6f722ba..b66600b3f4b55de0e38bb0f86d1c72b40d00b562 100644 (file)
@@ -59,7 +59,6 @@ struct dn_hash
 };
 
 #define dz_key_0(key)          ((key).datum = 0)
-#define dz_prefix(key,dz)      ((key).datum)
 
 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
        for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
index 83277f463af76c0a611ff312d3f39a1020584830..8f4ff5a2c8133b9961b78b969aeed05318fc1d4d 100644 (file)
@@ -18,7 +18,7 @@
 
 static int reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-       return mdiobus_read(ds->master_mii_bus, addr, reg);
+       return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg);
 }
 
 #define REG_READ(addr, reg)                                    \
@@ -34,7 +34,8 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
 
 static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
-       return mdiobus_write(ds->master_mii_bus, addr, reg, val);
+       return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr,
+                            reg, val);
 }
 
 #define REG_WRITE(addr, reg, val)                              \
@@ -50,7 +51,7 @@ static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr)
 {
        int ret;
 
-       ret = mdiobus_read(bus, REG_PORT(0), 0x03);
+       ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
        if (ret >= 0) {
                ret &= 0xfff0;
                if (ret == 0x0600)
index a5a1050595d1866c6cc4ce3df0ade8c94cc183b3..cbb505ba9324fa05caeb9f93e545d9cfaf42e7af 100644 (file)
@@ -55,45 +55,9 @@ config IP_ADVANCED_ROUTER
 
          If unsure, say N here.
 
-choice
-       prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
-       depends on IP_ADVANCED_ROUTER
-       default ASK_IP_FIB_HASH
-
-config ASK_IP_FIB_HASH
-       bool "FIB_HASH"
-       ---help---
-         Current FIB is very proven and good enough for most users.
-
-config IP_FIB_TRIE
-       bool "FIB_TRIE"
-       ---help---
-         Use new experimental LC-trie as FIB lookup algorithm.
-         This improves lookup performance if you have a large
-         number of routes.
-
-         LC-trie is a longest matching prefix lookup algorithm which
-         performs better than FIB_HASH for large routing tables.
-         But, it consumes more memory and is more complex.
-
-         LC-trie is described in:
-
-         IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
-         IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
-         June 1999
-
-         An experimental study of compression methods for dynamic tries
-         Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
-         <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
-
-endchoice
-
-config IP_FIB_HASH
-       def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
-
 config IP_FIB_TRIE_STATS
        bool "FIB TRIE statistics"
-       depends on IP_FIB_TRIE
+       depends on IP_ADVANCED_ROUTER
        ---help---
          Keep track of statistics on structure of FIB TRIE table.
          Useful for testing and measuring TRIE performance.
@@ -140,6 +104,9 @@ config IP_ROUTE_VERBOSE
          handled by the klogd daemon which is responsible for kernel messages
          ("man klogd").
 
+config IP_ROUTE_CLASSID
+       bool
+
 config IP_PNP
        bool "IP: kernel level autoconfiguration"
        help
@@ -657,4 +624,3 @@ config TCP_MD5SIG
          on the Internet.
 
          If unsure, say N.
-
index 4978d22f9a75eafe3fe10d8cf93f6e8a2a7f647a..0dc772d0d1259e7228481d53108981a23b9d61ea 100644 (file)
@@ -10,12 +10,10 @@ obj-y     := route.o inetpeer.o protocol.o \
             tcp_minisocks.o tcp_cong.o \
             datagram.o raw.o udp.o udplite.o \
             arp.o icmp.o devinet.o af_inet.o  igmp.o \
-            fib_frontend.o fib_semantics.o \
+            fib_frontend.o fib_semantics.o fib_trie.o \
             inet_fragment.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
-obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
-obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
index 45b89d7bda5a8afd615b90913957e57f73790124..35a50205501887775b6edb52aa2ed0e43a9a67e8 100644 (file)
@@ -1101,23 +1101,20 @@ int sysctl_ip_dynaddr __read_mostly;
 static int inet_sk_reselect_saddr(struct sock *sk)
 {
        struct inet_sock *inet = inet_sk(sk);
-       int err;
-       struct rtable *rt;
        __be32 old_saddr = inet->inet_saddr;
-       __be32 new_saddr;
        __be32 daddr = inet->inet_daddr;
+       struct rtable *rt;
+       __be32 new_saddr;
 
        if (inet->opt && inet->opt->srr)
                daddr = inet->opt->faddr;
 
        /* Query new route. */
-       err = ip_route_connect(&rt, daddr, 0,
-                              RT_CONN_FLAGS(sk),
-                              sk->sk_bound_dev_if,
-                              sk->sk_protocol,
-                              inet->inet_sport, inet->inet_dport, sk, 0);
-       if (err)
-               return err;
+       rt = ip_route_connect(daddr, 0, RT_CONN_FLAGS(sk),
+                             sk->sk_bound_dev_if, sk->sk_protocol,
+                             inet->inet_sport, inet->inet_dport, sk, false);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
 
        sk_setup_caps(sk, &rt->dst);
 
@@ -1160,7 +1157,7 @@ int inet_sk_rebuild_header(struct sock *sk)
        daddr = inet->inet_daddr;
        if (inet->opt && inet->opt->srr)
                daddr = inet->opt->faddr;
-{
+       {
        struct flowi fl = {
                .oif = sk->sk_bound_dev_if,
                .mark = sk->sk_mark,
@@ -1174,11 +1171,14 @@ int inet_sk_rebuild_header(struct sock *sk)
        };
 
        security_sk_classify_flow(sk, &fl);
-       err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
-}
-       if (!err)
+       rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+       }
+       if (!IS_ERR(rt)) {
+               err = 0;
                sk_setup_caps(sk, &rt->dst);
-       else {
+       } else {
+               err = PTR_ERR(rt);
+
                /* Routing failed... */
                sk->sk_route_caps = 0;
                /*
@@ -1231,7 +1231,7 @@ out:
        return err;
 }
 
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct iphdr *iph;
index 86961bec70abbdda253b68da69ba6780ece39726..325053df6e705f6a413f73f4abb9db4651ee9bb4 100644 (file)
@@ -201,7 +201,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
        top_iph->ttl = 0;
        top_iph->check = 0;
 
-       ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+       if (x->props.flags & XFRM_STATE_ALIGN4)
+               ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+       else
+               ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
 
        ah->reserved = 0;
        ah->spi = x->id.spi;
@@ -299,9 +302,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
        nexthdr = ah->nexthdr;
        ah_hlen = (ah->hdrlen + 2) << 2;
 
-       if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
-           ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
-               goto out;
+       if (x->props.flags & XFRM_STATE_ALIGN4) {
+               if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
+                   ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
+                       goto out;
+       } else {
+               if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
+                   ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
+                       goto out;
+       }
 
        if (!pskb_may_pull(skb, ah_hlen))
                goto out;
@@ -450,8 +459,12 @@ static int ah_init_state(struct xfrm_state *x)
 
        BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
 
-       x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
-                                         ahp->icv_trunc_len);
+       if (x->props.flags & XFRM_STATE_ALIGN4)
+               x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
+                                                 ahp->icv_trunc_len);
+       else
+               x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
+                                                 ahp->icv_trunc_len);
        if (x->props.mode == XFRM_MODE_TUNNEL)
                x->props.header_len += sizeof(struct iphdr);
        x->data = ahp;
index 7927589813b54b5baa50ae29904c5b0e21f6a513..fa9988da1da45e364d9c820552da40d6908ba5a3 100644 (file)
@@ -440,7 +440,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
        /*unsigned long now; */
        struct net *net = dev_net(dev);
 
-       if (ip_route_output_key(net, &rt, &fl) < 0)
+       rt = ip_route_output_key(net, &fl);
+       if (IS_ERR(rt))
                return 1;
        if (rt->dst.dev != dev) {
                NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
@@ -1063,10 +1064,10 @@ static int arp_req_set(struct net *net, struct arpreq *r,
        if (dev == NULL) {
                struct flowi fl = { .fl4_dst = ip,
                                    .fl4_tos = RTO_ONLINK };
-               struct rtable *rt;
-               err = ip_route_output_key(net, &rt, &fl);
-               if (err != 0)
-                       return err;
+               struct rtable *rt = ip_route_output_key(net, &fl);
+
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
                dev = rt->dst.dev;
                ip_rt_put(rt);
                if (!dev)
@@ -1177,7 +1178,6 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
 static int arp_req_delete(struct net *net, struct arpreq *r,
                          struct net_device *dev)
 {
-       int err;
        __be32 ip;
 
        if (r->arp_flags & ATF_PUBL)
@@ -1187,10 +1187,9 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
        if (dev == NULL) {
                struct flowi fl = { .fl4_dst = ip,
                                    .fl4_tos = RTO_ONLINK };
-               struct rtable *rt;
-               err = ip_route_output_key(net, &rt, &fl);
-               if (err != 0)
-                       return err;
+               struct rtable *rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
                dev = rt->dst.dev;
                ip_rt_put(rt);
                if (!dev)
index 174be6caa5c8245b105b3d4c504ed0c977a00f2c..85bd24ca4f6dda83e51170dabdfb7584e932a52c 100644 (file)
@@ -46,11 +46,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                if (!saddr)
                        saddr = inet->mc_addr;
        }
-       err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
-                              RT_CONN_FLAGS(sk), oif,
-                              sk->sk_protocol,
-                              inet->inet_sport, usin->sin_port, sk, 1);
-       if (err) {
+       rt = ip_route_connect(usin->sin_addr.s_addr, saddr,
+                             RT_CONN_FLAGS(sk), oif,
+                             sk->sk_protocol,
+                             inet->inet_sport, usin->sin_port, sk, true);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
                if (err == -ENETUNREACH)
                        IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
                return err;
index 036652c8166d7cd541d5c3bb3b2af46e84779c48..6d85800daeb7aaad75dda0f7cc681d1ac2c462ac 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/hash.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
@@ -92,6 +93,71 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
        [IFA_LABEL]             = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
+/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
+ * value.  So if you change this define, make appropriate changes to
+ * inet_addr_hash as well.
+ */
+#define IN4_ADDR_HSIZE 256
+static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
+static DEFINE_SPINLOCK(inet_addr_hash_lock);
+
+static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+{
+       u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+
+       return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
+               (IN4_ADDR_HSIZE - 1));
+}
+
+static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
+{
+       unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
+
+       spin_lock(&inet_addr_hash_lock);
+       hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
+       spin_unlock(&inet_addr_hash_lock);
+}
+
+static void inet_hash_remove(struct in_ifaddr *ifa)
+{
+       spin_lock(&inet_addr_hash_lock);
+       hlist_del_init_rcu(&ifa->hash);
+       spin_unlock(&inet_addr_hash_lock);
+}
+
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU, or RTNL
+ */
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
+{
+       unsigned int hash = inet_addr_hash(net, addr);
+       struct net_device *result = NULL;
+       struct in_ifaddr *ifa;
+       struct hlist_node *node;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+               struct net_device *dev = ifa->ifa_dev->dev;
+
+               if (!net_eq(dev_net(dev), net))
+                       continue;
+               if (ifa->ifa_local == addr) {
+                       result = dev;
+                       break;
+               }
+       }
+       if (result && devref)
+               dev_hold(result);
+       rcu_read_unlock();
+       return result;
+}
+EXPORT_SYMBOL(__ip_dev_find);
+
 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 
 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
@@ -265,6 +331,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                        }
 
                        if (!do_promote) {
+                               inet_hash_remove(ifa);
                                *ifap1 = ifa->ifa_next;
 
                                rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
@@ -281,6 +348,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
        /* 2. Unlink it */
 
        *ifap = ifa1->ifa_next;
+       inet_hash_remove(ifa1);
 
        /* 3. Announce address deletion */
 
@@ -368,6 +436,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        ifa->ifa_next = *ifap;
        *ifap = ifa;
 
+       inet_hash_insert(dev_net(in_dev->dev), ifa);
+
        /* Send message first, then call notifier.
           Notifier will trigger FIB update, so that
           listeners of netlink will know about new ifaddr */
@@ -521,6 +591,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
        if (tb[IFA_ADDRESS] == NULL)
                tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 
+       INIT_HLIST_NODE(&ifa->hash);
        ifa->ifa_prefixlen = ifm->ifa_prefixlen;
        ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
        ifa->ifa_flags = ifm->ifa_flags;
@@ -728,6 +799,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                if (!ifa) {
                        ret = -ENOBUFS;
                        ifa = inet_alloc_ifa();
+                       INIT_HLIST_NODE(&ifa->hash);
                        if (!ifa)
                                break;
                        if (colon)
@@ -1084,6 +1156,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
                        struct in_ifaddr *ifa = inet_alloc_ifa();
 
                        if (ifa) {
+                               INIT_HLIST_NODE(&ifa->hash);
                                ifa->ifa_local =
                                  ifa->ifa_address = htonl(INADDR_LOOPBACK);
                                ifa->ifa_prefixlen = 8;
@@ -1720,6 +1793,11 @@ static struct rtnl_af_ops inet_af_ops = {
 
 void __init devinet_init(void)
 {
+       int i;
+
+       for (i = 0; i < IN4_ADDR_HSIZE; i++)
+               INIT_HLIST_HEAD(&inet_addr_lst[i]);
+
        register_pernet_subsys(&devinet_ops);
 
        register_gifconf(PF_INET, inet_gifconf);
index 1d2cdd43a878b0e9d9f3b53303f00ef8b00d11b6..fe10bcd0f307fc94f3e67d3f05054359a07aa4a9 100644 (file)
@@ -51,11 +51,11 @@ static int __net_init fib4_rules_init(struct net *net)
 {
        struct fib_table *local_table, *main_table;
 
-       local_table = fib_hash_table(RT_TABLE_LOCAL);
+       local_table = fib_trie_table(RT_TABLE_LOCAL);
        if (local_table == NULL)
                return -ENOMEM;
 
-       main_table  = fib_hash_table(RT_TABLE_MAIN);
+       main_table  = fib_trie_table(RT_TABLE_MAIN);
        if (main_table == NULL)
                goto fail;
 
@@ -82,7 +82,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        if (tb)
                return tb;
 
-       tb = fib_hash_table(id);
+       tb = fib_trie_table(id);
        if (!tb)
                return NULL;
        h = id & (FIB_TABLE_HASHSZ - 1);
@@ -114,21 +114,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
 }
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
 
-void fib_select_default(struct net *net,
-                       const struct flowi *flp, struct fib_result *res)
-{
-       struct fib_table *tb;
-       int table = RT_TABLE_MAIN;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
-               return;
-       table = res->r->table;
-#endif
-       tb = fib_get_table(net, table);
-       if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
-               fib_table_select_default(tb, flp, res);
-}
-
 static void fib_flush(struct net *net)
 {
        int flushed = 0;
@@ -147,46 +132,6 @@ static void fib_flush(struct net *net)
                rt_cache_flush(net, -1);
 }
 
-/**
- * __ip_dev_find - find the first device with a given source address.
- * @net: the net namespace
- * @addr: the source address
- * @devref: if true, take a reference on the found device
- *
- * If a caller uses devref=false, it should be protected by RCU, or RTNL
- */
-struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
-{
-       struct flowi fl = {
-               .fl4_dst = addr,
-       };
-       struct fib_result res = { 0 };
-       struct net_device *dev = NULL;
-       struct fib_table *local_table;
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       res.r = NULL;
-#endif
-
-       rcu_read_lock();
-       local_table = fib_get_table(net, RT_TABLE_LOCAL);
-       if (!local_table ||
-           fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
-               rcu_read_unlock();
-               return NULL;
-       }
-       if (res.type != RTN_LOCAL)
-               goto out;
-       dev = FIB_RES_DEV(res);
-
-       if (dev && devref)
-               dev_hold(dev);
-out:
-       rcu_read_unlock();
-       return dev;
-}
-EXPORT_SYMBOL(__ip_dev_find);
-
 /*
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
@@ -248,19 +193,21 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                        u32 *itag, u32 mark)
 {
        struct in_device *in_dev;
-       struct flowi fl = {
-               .fl4_dst = src,
-               .fl4_src = dst,
-               .fl4_tos = tos,
-               .mark = mark,
-               .iif = oif
-       };
+       struct flowi fl;
        struct fib_result res;
        int no_addr, rpf, accept_local;
        bool dev_match;
        int ret;
        struct net *net;
 
+       fl.oif = 0;
+       fl.iif = oif;
+       fl.mark = mark;
+       fl.fl4_dst = src;
+       fl.fl4_src = dst;
+       fl.fl4_tos = tos;
+       fl.fl4_scope = RT_SCOPE_UNIVERSE;
+
        no_addr = rpf = accept_local = 0;
        in_dev = __in_dev_get_rcu(dev);
        if (in_dev) {
@@ -945,10 +892,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                fib_sync_up(dev);
 #endif
+               fib_update_nh_saddrs(dev);
                rt_cache_flush(dev_net(dev), -1);
                break;
        case NETDEV_DOWN:
                fib_del_ifaddr(ifa);
+               fib_update_nh_saddrs(dev);
                if (ifa->ifa_dev->ifa_list == NULL) {
                        /* Last address was deleted from this interface.
                         * Disable IP.
@@ -1101,5 +1050,5 @@ void __init ip_fib_init(void)
        register_netdevice_notifier(&fib_netdev_notifier);
        register_inetaddr_notifier(&fib_inetaddr_notifier);
 
-       fib_hash_init();
+       fib_trie_init();
 }
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
deleted file mode 100644 (file)
index b3acb04..0000000
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IPv4 FIB: lookup engine and maintenance routines.
- *
- * Authors:    Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/protocol.h>
-#include <net/route.h>
-#include <net/tcp.h>
-#include <net/sock.h>
-#include <net/ip_fib.h>
-
-#include "fib_lookup.h"
-
-static struct kmem_cache *fn_hash_kmem __read_mostly;
-static struct kmem_cache *fn_alias_kmem __read_mostly;
-
-struct fib_node {
-       struct hlist_node       fn_hash;
-       struct list_head        fn_alias;
-       __be32                  fn_key;
-       struct fib_alias        fn_embedded_alias;
-};
-
-#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
-
-struct fn_zone {
-       struct fn_zone __rcu    *fz_next;       /* Next not empty zone  */
-       struct hlist_head __rcu *fz_hash;       /* Hash table pointer   */
-       seqlock_t               fz_lock;
-       u32                     fz_hashmask;    /* (fz_divisor - 1)     */
-
-       u8                      fz_order;       /* Zone order (0..32)   */
-       u8                      fz_revorder;    /* 32 - fz_order        */
-       __be32                  fz_mask;        /* inet_make_mask(order) */
-#define FZ_MASK(fz)            ((fz)->fz_mask)
-
-       struct hlist_head       fz_embedded_hash[EMBEDDED_HASH_SIZE];
-
-       int                     fz_nent;        /* Number of entries    */
-       int                     fz_divisor;     /* Hash size (mask+1)   */
-};
-
-struct fn_hash {
-       struct fn_zone          *fn_zones[33];
-       struct fn_zone __rcu    *fn_zone_list;
-};
-
-static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
-{
-       u32 h = ntohl(key) >> fz->fz_revorder;
-       h ^= (h>>20);
-       h ^= (h>>10);
-       h ^= (h>>5);
-       h &= fz->fz_hashmask;
-       return h;
-}
-
-static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
-{
-       return dst & FZ_MASK(fz);
-}
-
-static unsigned int fib_hash_genid;
-
-#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
-
-static struct hlist_head *fz_hash_alloc(int divisor)
-{
-       unsigned long size = divisor * sizeof(struct hlist_head);
-
-       if (size <= PAGE_SIZE)
-               return kzalloc(size, GFP_KERNEL);
-
-       return (struct hlist_head *)
-               __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-}
-
-/* The fib hash lock must be held when this is called. */
-static inline void fn_rebuild_zone(struct fn_zone *fz,
-                                  struct hlist_head *old_ht,
-                                  int old_divisor)
-{
-       int i;
-
-       for (i = 0; i < old_divisor; i++) {
-               struct hlist_node *node, *n;
-               struct fib_node *f;
-
-               hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
-                       struct hlist_head *new_head;
-
-                       hlist_del_rcu(&f->fn_hash);
-
-                       new_head = rcu_dereference_protected(fz->fz_hash, 1) +
-                                  fn_hash(f->fn_key, fz);
-                       hlist_add_head_rcu(&f->fn_hash, new_head);
-               }
-       }
-}
-
-static void fz_hash_free(struct hlist_head *hash, int divisor)
-{
-       unsigned long size = divisor * sizeof(struct hlist_head);
-
-       if (size <= PAGE_SIZE)
-               kfree(hash);
-       else
-               free_pages((unsigned long)hash, get_order(size));
-}
-
-static void fn_rehash_zone(struct fn_zone *fz)
-{
-       struct hlist_head *ht, *old_ht;
-       int old_divisor, new_divisor;
-       u32 new_hashmask;
-
-       new_divisor = old_divisor = fz->fz_divisor;
-
-       switch (old_divisor) {
-       case EMBEDDED_HASH_SIZE:
-               new_divisor *= EMBEDDED_HASH_SIZE;
-               break;
-       case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
-               new_divisor *= (EMBEDDED_HASH_SIZE/2);
-               break;
-       default:
-               if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
-                       printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
-                       return;
-               }
-               new_divisor = (old_divisor << 1);
-               break;
-       }
-
-       new_hashmask = (new_divisor - 1);
-
-#if RT_CACHE_DEBUG >= 2
-       printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
-              fz->fz_order, old_divisor);
-#endif
-
-       ht = fz_hash_alloc(new_divisor);
-
-       if (ht) {
-               struct fn_zone nfz;
-
-               memcpy(&nfz, fz, sizeof(nfz));
-
-               write_seqlock_bh(&fz->fz_lock);
-               old_ht = rcu_dereference_protected(fz->fz_hash, 1);
-               RCU_INIT_POINTER(nfz.fz_hash, ht);
-               nfz.fz_hashmask = new_hashmask;
-               nfz.fz_divisor = new_divisor;
-               fn_rebuild_zone(&nfz, old_ht, old_divisor);
-               fib_hash_genid++;
-               rcu_assign_pointer(fz->fz_hash, ht);
-               fz->fz_hashmask = new_hashmask;
-               fz->fz_divisor = new_divisor;
-               write_sequnlock_bh(&fz->fz_lock);
-
-               if (old_ht != fz->fz_embedded_hash) {
-                       synchronize_rcu();
-                       fz_hash_free(old_ht, old_divisor);
-               }
-       }
-}
-
-static void fn_free_node_rcu(struct rcu_head *head)
-{
-       struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
-
-       kmem_cache_free(fn_hash_kmem, f);
-}
-
-static inline void fn_free_node(struct fib_node *f)
-{
-       call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
-}
-
-static void fn_free_alias_rcu(struct rcu_head *head)
-{
-       struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
-
-       kmem_cache_free(fn_alias_kmem, fa);
-}
-
-static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
-{
-       fib_release_info(fa->fa_info);
-       if (fa == &f->fn_embedded_alias)
-               fa->fa_info = NULL;
-       else
-               call_rcu(&fa->rcu, fn_free_alias_rcu);
-}
-
-static struct fn_zone *
-fn_new_zone(struct fn_hash *table, int z)
-{
-       int i;
-       struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
-       if (!fz)
-               return NULL;
-
-       seqlock_init(&fz->fz_lock);
-       fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
-       fz->fz_hashmask = fz->fz_divisor - 1;
-       RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
-       fz->fz_order = z;
-       fz->fz_revorder = 32 - z;
-       fz->fz_mask = inet_make_mask(z);
-
-       /* Find the first not empty zone with more specific mask */
-       for (i = z + 1; i <= 32; i++)
-               if (table->fn_zones[i])
-                       break;
-       if (i > 32) {
-               /* No more specific masks, we are the first. */
-               rcu_assign_pointer(fz->fz_next,
-                                  rtnl_dereference(table->fn_zone_list));
-               rcu_assign_pointer(table->fn_zone_list, fz);
-       } else {
-               rcu_assign_pointer(fz->fz_next,
-                                  rtnl_dereference(table->fn_zones[i]->fz_next));
-               rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
-       }
-       table->fn_zones[z] = fz;
-       fib_hash_genid++;
-       return fz;
-}
-
-int fib_table_lookup(struct fib_table *tb,
-                    const struct flowi *flp, struct fib_result *res,
-                    int fib_flags)
-{
-       int err;
-       struct fn_zone *fz;
-       struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-
-       rcu_read_lock();
-       for (fz = rcu_dereference(t->fn_zone_list);
-            fz != NULL;
-            fz = rcu_dereference(fz->fz_next)) {
-               struct hlist_head *head;
-               struct hlist_node *node;
-               struct fib_node *f;
-               __be32 k;
-               unsigned int seq;
-
-               do {
-                       seq = read_seqbegin(&fz->fz_lock);
-                       k = fz_key(flp->fl4_dst, fz);
-
-                       head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
-                       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-                               if (f->fn_key != k)
-                                       continue;
-
-                               err = fib_semantic_match(&f->fn_alias,
-                                                flp, res,
-                                                fz->fz_order, fib_flags);
-                               if (err <= 0)
-                                       goto out;
-                       }
-               } while (read_seqretry(&fz->fz_lock, seq));
-       }
-       err = 1;
-out:
-       rcu_read_unlock();
-       return err;
-}
-
-void fib_table_select_default(struct fib_table *tb,
-                             const struct flowi *flp, struct fib_result *res)
-{
-       int order, last_idx;
-       struct hlist_node *node;
-       struct fib_node *f;
-       struct fib_info *fi = NULL;
-       struct fib_info *last_resort;
-       struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-       struct fn_zone *fz = t->fn_zones[0];
-       struct hlist_head *head;
-
-       if (fz == NULL)
-               return;
-
-       last_idx = -1;
-       last_resort = NULL;
-       order = -1;
-
-       rcu_read_lock();
-       head = rcu_dereference(fz->fz_hash);
-       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-               struct fib_alias *fa;
-
-               list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
-                       struct fib_info *next_fi = fa->fa_info;
-
-                       if (fa->fa_scope != res->scope ||
-                           fa->fa_type != RTN_UNICAST)
-                               continue;
-
-                       if (next_fi->fib_priority > res->fi->fib_priority)
-                               break;
-                       if (!next_fi->fib_nh[0].nh_gw ||
-                           next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
-                               continue;
-
-                       fib_alias_accessed(fa);
-
-                       if (fi == NULL) {
-                               if (next_fi != res->fi)
-                                       break;
-                       } else if (!fib_detect_death(fi, order, &last_resort,
-                                               &last_idx, tb->tb_default)) {
-                               fib_result_assign(res, fi);
-                               tb->tb_default = order;
-                               goto out;
-                       }
-                       fi = next_fi;
-                       order++;
-               }
-       }
-
-       if (order <= 0 || fi == NULL) {
-               tb->tb_default = -1;
-               goto out;
-       }
-
-       if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-                               tb->tb_default)) {
-               fib_result_assign(res, fi);
-               tb->tb_default = order;
-               goto out;
-       }
-
-       if (last_idx >= 0)
-               fib_result_assign(res, last_resort);
-       tb->tb_default = last_idx;
-out:
-       rcu_read_unlock();
-}
-
-/* Insert node F to FZ. */
-static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
-{
-       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
-
-       hlist_add_head_rcu(&f->fn_hash, head);
-}
-
-/* Return the node in FZ matching KEY. */
-static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
-{
-       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
-       struct hlist_node *node;
-       struct fib_node *f;
-
-       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-               if (f->fn_key == key)
-                       return f;
-       }
-
-       return NULL;
-}
-
-
-static struct fib_alias *fib_fast_alloc(struct fib_node *f)
-{
-       struct fib_alias *fa = &f->fn_embedded_alias;
-
-       if (fa->fa_info != NULL)
-               fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-       return fa;
-}
-
-/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
-{
-       struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-       struct fib_node *new_f = NULL;
-       struct fib_node *f;
-       struct fib_alias *fa, *new_fa;
-       struct fn_zone *fz;
-       struct fib_info *fi;
-       u8 tos = cfg->fc_tos;
-       __be32 key;
-       int err;
-
-       if (cfg->fc_dst_len > 32)
-               return -EINVAL;
-
-       fz = table->fn_zones[cfg->fc_dst_len];
-       if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
-               return -ENOBUFS;
-
-       key = 0;
-       if (cfg->fc_dst) {
-               if (cfg->fc_dst & ~FZ_MASK(fz))
-                       return -EINVAL;
-               key = fz_key(cfg->fc_dst, fz);
-       }
-
-       fi = fib_create_info(cfg);
-       if (IS_ERR(fi))
-               return PTR_ERR(fi);
-
-       if (fz->fz_nent > (fz->fz_divisor<<1) &&
-           fz->fz_divisor < FZ_MAX_DIVISOR &&
-           (cfg->fc_dst_len == 32 ||
-            (1 << cfg->fc_dst_len) > fz->fz_divisor))
-               fn_rehash_zone(fz);
-
-       f = fib_find_node(fz, key);
-
-       if (!f)
-               fa = NULL;
-       else
-               fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
-
-       /* Now fa, if non-NULL, points to the first fib alias
-        * with the same keys [prefix,tos,priority], if such key already
-        * exists or to the node before which we will insert new one.
-        *
-        * If fa is NULL, we will need to allocate a new one and
-        * insert to the head of f.
-        *
-        * If f is NULL, no fib node matched the destination key
-        * and we need to allocate a new one of those as well.
-        */
-
-       if (fa && fa->fa_tos == tos &&
-           fa->fa_info->fib_priority == fi->fib_priority) {
-               struct fib_alias *fa_first, *fa_match;
-
-               err = -EEXIST;
-               if (cfg->fc_nlflags & NLM_F_EXCL)
-                       goto out;
-
-               /* We have 2 goals:
-                * 1. Find exact match for type, scope, fib_info to avoid
-                * duplicate routes
-                * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
-                */
-               fa_match = NULL;
-               fa_first = fa;
-               fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-               list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
-                       if (fa->fa_tos != tos)
-                               break;
-                       if (fa->fa_info->fib_priority != fi->fib_priority)
-                               break;
-                       if (fa->fa_type == cfg->fc_type &&
-                           fa->fa_scope == cfg->fc_scope &&
-                           fa->fa_info == fi) {
-                               fa_match = fa;
-                               break;
-                       }
-               }
-
-               if (cfg->fc_nlflags & NLM_F_REPLACE) {
-                       u8 state;
-
-                       fa = fa_first;
-                       if (fa_match) {
-                               if (fa == fa_match)
-                                       err = 0;
-                               goto out;
-                       }
-                       err = -ENOBUFS;
-                       new_fa = fib_fast_alloc(f);
-                       if (new_fa == NULL)
-                               goto out;
-
-                       new_fa->fa_tos = fa->fa_tos;
-                       new_fa->fa_info = fi;
-                       new_fa->fa_type = cfg->fc_type;
-                       new_fa->fa_scope = cfg->fc_scope;
-                       state = fa->fa_state;
-                       new_fa->fa_state = state & ~FA_S_ACCESSED;
-                       fib_hash_genid++;
-                       list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
-
-                       fn_free_alias(fa, f);
-                       if (state & FA_S_ACCESSED)
-                               rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-                       rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
-                                 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
-                       return 0;
-               }
-
-               /* Error if we find a perfect match which
-                * uses the same scope, type, and nexthop
-                * information.
-                */
-               if (fa_match)
-                       goto out;
-
-               if (!(cfg->fc_nlflags & NLM_F_APPEND))
-                       fa = fa_first;
-       }
-
-       err = -ENOENT;
-       if (!(cfg->fc_nlflags & NLM_F_CREATE))
-               goto out;
-
-       err = -ENOBUFS;
-
-       if (!f) {
-               new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
-               if (new_f == NULL)
-                       goto out;
-
-               INIT_HLIST_NODE(&new_f->fn_hash);
-               INIT_LIST_HEAD(&new_f->fn_alias);
-               new_f->fn_key = key;
-               f = new_f;
-       }
-
-       new_fa = fib_fast_alloc(f);
-       if (new_fa == NULL)
-               goto out;
-
-       new_fa->fa_info = fi;
-       new_fa->fa_tos = tos;
-       new_fa->fa_type = cfg->fc_type;
-       new_fa->fa_scope = cfg->fc_scope;
-       new_fa->fa_state = 0;
-
-       /*
-        * Insert new entry to the list.
-        */
-
-       if (new_f)
-               fib_insert_node(fz, new_f);
-       list_add_tail_rcu(&new_fa->fa_list,
-                (fa ? &fa->fa_list : &f->fn_alias));
-       fib_hash_genid++;
-
-       if (new_f)
-               fz->fz_nent++;
-       rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-
-       rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
-                 &cfg->fc_nlinfo, 0);
-       return 0;
-
-out:
-       if (new_f)
-               kmem_cache_free(fn_hash_kmem, new_f);
-       fib_release_info(fi);
-       return err;
-}
-
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
-{
-       struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-       struct fib_node *f;
-       struct fib_alias *fa, *fa_to_delete;
-       struct fn_zone *fz;
-       __be32 key;
-
-       if (cfg->fc_dst_len > 32)
-               return -EINVAL;
-
-       if ((fz  = table->fn_zones[cfg->fc_dst_len]) == NULL)
-               return -ESRCH;
-
-       key = 0;
-       if (cfg->fc_dst) {
-               if (cfg->fc_dst & ~FZ_MASK(fz))
-                       return -EINVAL;
-               key = fz_key(cfg->fc_dst, fz);
-       }
-
-       f = fib_find_node(fz, key);
-
-       if (!f)
-               fa = NULL;
-       else
-               fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
-       if (!fa)
-               return -ESRCH;
-
-       fa_to_delete = NULL;
-       fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-       list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
-               struct fib_info *fi = fa->fa_info;
-
-               if (fa->fa_tos != cfg->fc_tos)
-                       break;
-
-               if ((!cfg->fc_type ||
-                    fa->fa_type == cfg->fc_type) &&
-                   (cfg->fc_scope == RT_SCOPE_NOWHERE ||
-                    fa->fa_scope == cfg->fc_scope) &&
-                   (!cfg->fc_protocol ||
-                    fi->fib_protocol == cfg->fc_protocol) &&
-                   fib_nh_match(cfg, fi) == 0) {
-                       fa_to_delete = fa;
-                       break;
-               }
-       }
-
-       if (fa_to_delete) {
-               int kill_fn;
-
-               fa = fa_to_delete;
-               rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
-                         tb->tb_id, &cfg->fc_nlinfo, 0);
-
-               kill_fn = 0;
-               list_del_rcu(&fa->fa_list);
-               if (list_empty(&f->fn_alias)) {
-                       hlist_del_rcu(&f->fn_hash);
-                       kill_fn = 1;
-               }
-               fib_hash_genid++;
-
-               if (fa->fa_state & FA_S_ACCESSED)
-                       rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-               fn_free_alias(fa, f);
-               if (kill_fn) {
-                       fn_free_node(f);
-                       fz->fz_nent--;
-               }
-
-               return 0;
-       }
-       return -ESRCH;
-}
-
-static int fn_flush_list(struct fn_zone *fz, int idx)
-{
-       struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
-       struct hlist_node *node, *n;
-       struct fib_node *f;
-       int found = 0;
-
-       hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
-               struct fib_alias *fa, *fa_node;
-               int kill_f;
-
-               kill_f = 0;
-               list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
-                       struct fib_info *fi = fa->fa_info;
-
-                       if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
-                               list_del_rcu(&fa->fa_list);
-                               if (list_empty(&f->fn_alias)) {
-                                       hlist_del_rcu(&f->fn_hash);
-                                       kill_f = 1;
-                               }
-                               fib_hash_genid++;
-
-                               fn_free_alias(fa, f);
-                               found++;
-                       }
-               }
-               if (kill_f) {
-                       fn_free_node(f);
-                       fz->fz_nent--;
-               }
-       }
-       return found;
-}
-
-/* caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
-{
-       struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-       struct fn_zone *fz;
-       int found = 0;
-
-       for (fz = rtnl_dereference(table->fn_zone_list);
-            fz != NULL;
-            fz = rtnl_dereference(fz->fz_next)) {
-               int i;
-
-               for (i = fz->fz_divisor - 1; i >= 0; i--)
-                       found += fn_flush_list(fz, i);
-       }
-       return found;
-}
-
-void fib_free_table(struct fib_table *tb)
-{
-       struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-       struct fn_zone *fz, *next;
-
-       next = table->fn_zone_list;
-       while (next != NULL) {
-               fz = next;
-               next = fz->fz_next;
-
-               if (fz->fz_hash != fz->fz_embedded_hash)
-                       fz_hash_free(fz->fz_hash, fz->fz_divisor);
-
-               kfree(fz);
-       }
-
-       kfree(tb);
-}
-
-static inline int
-fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
-                    struct fib_table *tb,
-                    struct fn_zone *fz,
-                    struct hlist_head *head)
-{
-       struct hlist_node *node;
-       struct fib_node *f;
-       int i, s_i;
-
-       s_i = cb->args[4];
-       i = 0;
-       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-               struct fib_alias *fa;
-
-               list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
-                       if (i < s_i)
-                               goto next;
-
-                       if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
-                                         cb->nlh->nlmsg_seq,
-                                         RTM_NEWROUTE,
-                                         tb->tb_id,
-                                         fa->fa_type,
-                                         fa->fa_scope,
-                                         f->fn_key,
-                                         fz->fz_order,
-                                         fa->fa_tos,
-                                         fa->fa_info,
-                                         NLM_F_MULTI) < 0) {
-                               cb->args[4] = i;
-                               return -1;
-                       }
-next:
-                       i++;
-               }
-       }
-       cb->args[4] = i;
-       return skb->len;
-}
-
-static inline int
-fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
-                  struct fib_table *tb,
-                  struct fn_zone *fz)
-{
-       int h, s_h;
-       struct hlist_head *head = rcu_dereference(fz->fz_hash);
-
-       if (head == NULL)
-               return skb->len;
-       s_h = cb->args[3];
-       for (h = s_h; h < fz->fz_divisor; h++) {
-               if (hlist_empty(head + h))
-                       continue;
-               if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
-                       cb->args[3] = h;
-                       return -1;
-               }
-               memset(&cb->args[4], 0,
-                      sizeof(cb->args) - 4*sizeof(cb->args[0]));
-       }
-       cb->args[3] = h;
-       return skb->len;
-}
-
-int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
-                  struct netlink_callback *cb)
-{
-       int m = 0, s_m;
-       struct fn_zone *fz;
-       struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-
-       s_m = cb->args[2];
-       rcu_read_lock();
-       for (fz = rcu_dereference(table->fn_zone_list);
-            fz != NULL;
-            fz = rcu_dereference(fz->fz_next), m++) {
-               if (m < s_m)
-                       continue;
-               if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
-                       cb->args[2] = m;
-                       rcu_read_unlock();
-                       return -1;
-               }
-               memset(&cb->args[3], 0,
-                      sizeof(cb->args) - 3*sizeof(cb->args[0]));
-       }
-       rcu_read_unlock();
-       cb->args[2] = m;
-       return skb->len;
-}
-
-void __init fib_hash_init(void)
-{
-       fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
-                                        0, SLAB_PANIC, NULL);
-
-       fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
-                                         0, SLAB_PANIC, NULL);
-
-}
-
-struct fib_table *fib_hash_table(u32 id)
-{
-       struct fib_table *tb;
-
-       tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
-                    GFP_KERNEL);
-       if (tb == NULL)
-               return NULL;
-
-       tb->tb_id = id;
-       tb->tb_default = -1;
-
-       memset(tb->tb_data, 0, sizeof(struct fn_hash));
-       return tb;
-}
-
-/* ------------------------------------------------------------------------ */
-#ifdef CONFIG_PROC_FS
-
-struct fib_iter_state {
-       struct seq_net_private p;
-       struct fn_zone  *zone;
-       int             bucket;
-       struct hlist_head *hash_head;
-       struct fib_node *fn;
-       struct fib_alias *fa;
-       loff_t pos;
-       unsigned int genid;
-       int valid;
-};
-
-static struct fib_alias *fib_get_first(struct seq_file *seq)
-{
-       struct fib_iter_state *iter = seq->private;
-       struct fib_table *main_table;
-       struct fn_hash *table;
-
-       main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
-       table = (struct fn_hash *)main_table->tb_data;
-
-       iter->bucket    = 0;
-       iter->hash_head = NULL;
-       iter->fn        = NULL;
-       iter->fa        = NULL;
-       iter->pos       = 0;
-       iter->genid     = fib_hash_genid;
-       iter->valid     = 1;
-
-       for (iter->zone = rcu_dereference(table->fn_zone_list);
-            iter->zone != NULL;
-            iter->zone = rcu_dereference(iter->zone->fz_next)) {
-               int maxslot;
-
-               if (!iter->zone->fz_nent)
-                       continue;
-
-               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-               maxslot = iter->zone->fz_divisor;
-
-               for (iter->bucket = 0; iter->bucket < maxslot;
-                    ++iter->bucket, ++iter->hash_head) {
-                       struct hlist_node *node;
-                       struct fib_node *fn;
-
-                       hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-                               struct fib_alias *fa;
-
-                               list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                                       iter->fn = fn;
-                                       iter->fa = fa;
-                                       goto out;
-                               }
-                       }
-               }
-       }
-out:
-       return iter->fa;
-}
-
-static struct fib_alias *fib_get_next(struct seq_file *seq)
-{
-       struct fib_iter_state *iter = seq->private;
-       struct fib_node *fn;
-       struct fib_alias *fa;
-
-       /* Advance FA, if any. */
-       fn = iter->fn;
-       fa = iter->fa;
-       if (fa) {
-               BUG_ON(!fn);
-               list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
-                       iter->fa = fa;
-                       goto out;
-               }
-       }
-
-       fa = iter->fa = NULL;
-
-       /* Advance FN. */
-       if (fn) {
-               struct hlist_node *node = &fn->fn_hash;
-               hlist_for_each_entry_continue(fn, node, fn_hash) {
-                       iter->fn = fn;
-
-                       list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                               iter->fa = fa;
-                               goto out;
-                       }
-               }
-       }
-
-       fn = iter->fn = NULL;
-
-       /* Advance hash chain. */
-       if (!iter->zone)
-               goto out;
-
-       for (;;) {
-               struct hlist_node *node;
-               int maxslot;
-
-               maxslot = iter->zone->fz_divisor;
-
-               while (++iter->bucket < maxslot) {
-                       iter->hash_head++;
-
-                       hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-                               list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                                       iter->fn = fn;
-                                       iter->fa = fa;
-                                       goto out;
-                               }
-                       }
-               }
-
-               iter->zone = rcu_dereference(iter->zone->fz_next);
-
-               if (!iter->zone)
-                       goto out;
-
-               iter->bucket = 0;
-               iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-
-               hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-                       list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-                               iter->fn = fn;
-                               iter->fa = fa;
-                               goto out;
-                       }
-               }
-       }
-out:
-       iter->pos++;
-       return fa;
-}
-
-static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
-{
-       struct fib_iter_state *iter = seq->private;
-       struct fib_alias *fa;
-
-       if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
-               fa   = iter->fa;
-               pos -= iter->pos;
-       } else
-               fa = fib_get_first(seq);
-
-       if (fa)
-               while (pos && (fa = fib_get_next(seq)))
-                       --pos;
-       return pos ? NULL : fa;
-}
-
-static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
-{
-       void *v = NULL;
-
-       rcu_read_lock();
-       if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
-               v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
-       return v;
-}
-
-static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       ++*pos;
-       return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
-}
-
-static void fib_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU)
-{
-       rcu_read_unlock();
-}
-
-static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
-{
-       static const unsigned type2flags[RTN_MAX + 1] = {
-               [7] = RTF_REJECT,
-               [8] = RTF_REJECT,
-       };
-       unsigned flags = type2flags[type];
-
-       if (fi && fi->fib_nh->nh_gw)
-               flags |= RTF_GATEWAY;
-       if (mask == htonl(0xFFFFFFFF))
-               flags |= RTF_HOST;
-       flags |= RTF_UP;
-       return flags;
-}
-
-/*
- *     This outputs /proc/net/route.
- *
- *     It always works in backward compatibility mode.
- *     The format of the file is not supposed to be changed.
- */
-static int fib_seq_show(struct seq_file *seq, void *v)
-{
-       struct fib_iter_state *iter;
-       int len;
-       __be32 prefix, mask;
-       unsigned flags;
-       struct fib_node *f;
-       struct fib_alias *fa;
-       struct fib_info *fi;
-
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
-                          "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
-                          "\tWindow\tIRTT");
-               goto out;
-       }
-
-       iter    = seq->private;
-       f       = iter->fn;
-       fa      = iter->fa;
-       fi      = fa->fa_info;
-       prefix  = f->fn_key;
-       mask    = FZ_MASK(iter->zone);
-       flags   = fib_flag_trans(fa->fa_type, mask, fi);
-       if (fi)
-               seq_printf(seq,
-                        "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
-                        fi->fib_dev ? fi->fib_dev->name : "*", prefix,
-                        fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
-                        mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
-                        fi->fib_window,
-                        fi->fib_rtt >> 3, &len);
-       else
-               seq_printf(seq,
-                        "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
-                        prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
-
-       seq_printf(seq, "%*s\n", 127 - len, "");
-out:
-       return 0;
-}
-
-static const struct seq_operations fib_seq_ops = {
-       .start  = fib_seq_start,
-       .next   = fib_seq_next,
-       .stop   = fib_seq_stop,
-       .show   = fib_seq_show,
-};
-
-static int fib_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open_net(inode, file, &fib_seq_ops,
-                           sizeof(struct fib_iter_state));
-}
-
-static const struct file_operations fib_seq_fops = {
-       .owner          = THIS_MODULE,
-       .open           = fib_seq_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release_net,
-};
-
-int __net_init fib_proc_init(struct net *net)
-{
-       if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
-               return -ENOMEM;
-       return 0;
-}
-
-void __net_exit fib_proc_exit(struct net *net)
-{
-       proc_net_remove(net, "route");
-}
-#endif /* CONFIG_PROC_FS */
index c079cc0ec6515fe212aecd27303c2b5671a5d60d..84db2da5c84810f169308cd203078026293601e9 100644 (file)
@@ -25,7 +25,7 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
 }
 
 /* Exported by fib_semantics.c */
-extern int fib_semantic_match(struct list_head *head,
+extern int fib_semantic_match(struct fib_table *tb, struct list_head *head,
                              const struct flowi *flp,
                              struct fib_result *res, int prefixlen, int fib_flags);
 extern void fib_release_info(struct fib_info *);
@@ -51,4 +51,11 @@ static inline void fib_result_assign(struct fib_result *res,
        res->fi = fi;
 }
 
+struct fib_prop {
+       int     error;
+       u8      scope;
+};
+
+extern const struct fib_prop fib_props[RTN_MAX + 1];
+
 #endif /* _FIB_LOOKUP_H */
index 7981a24f5c7b3b51e5c23c825f98e12b02dfcd87..3018efbaea77ec32f8d42236fe534f6c8a8dc336 100644 (file)
@@ -41,13 +41,13 @@ struct fib4_rule {
        __be32                  srcmask;
        __be32                  dst;
        __be32                  dstmask;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        u32                     tclassid;
 #endif
 };
 
-#ifdef CONFIG_NET_CLS_ROUTE
-u32 fib_rules_tclass(struct fib_result *res)
+#ifdef CONFIG_IP_ROUTE_CLASSID
+u32 fib_rules_tclass(const struct fib_result *res)
 {
        return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
 }
@@ -165,7 +165,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        if (frh->dst_len)
                rule4->dst = nla_get_be32(tb[FRA_DST]);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (tb[FRA_FLOW])
                rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
 #endif
@@ -195,7 +195,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
        if (frh->tos && (rule4->tos != frh->tos))
                return 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
                return 0;
 #endif
@@ -224,7 +224,7 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
        if (rule4->src_len)
                NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (rule4->tclassid)
                NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
 #endif
index 12d3dc3df1b7d683e94cbf42140a9ac40b35029d..d73d7581b51f42687fa8b4d0d3a1c1031802ef2c 100644 (file)
@@ -49,7 +49,7 @@
 static DEFINE_SPINLOCK(fib_info_lock);
 static struct hlist_head *fib_info_hash;
 static struct hlist_head *fib_info_laddrhash;
-static unsigned int fib_hash_size;
+static unsigned int fib_info_hash_size;
 static unsigned int fib_info_cnt;
 
 #define DEVINDEX_HASHBITS 8
@@ -90,11 +90,7 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
 #define endfor_nexthops(fi) }
 
 
-static const struct
-{
-       int     error;
-       u8      scope;
-} fib_props[RTN_MAX + 1] = {
+const struct fib_prop fib_props[RTN_MAX + 1] = {
        [RTN_UNSPEC] = {
                .error  = 0,
                .scope  = RT_SCOPE_NOWHERE,
@@ -152,6 +148,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
 {
        struct fib_info *fi = container_of(head, struct fib_info, rcu);
 
+       if (fi->fib_metrics != (u32 *) dst_default_metrics)
+               kfree(fi->fib_metrics);
        kfree(fi);
 }
 
@@ -200,7 +198,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                    nh->nh_weight != onh->nh_weight ||
 #endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
                    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
@@ -221,7 +219,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val)
 
 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
 {
-       unsigned int mask = (fib_hash_size - 1);
+       unsigned int mask = (fib_info_hash_size - 1);
        unsigned int val = fi->fib_nhs;
 
        val ^= fi->fib_protocol;
@@ -422,7 +420,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
 #endif
@@ -476,7 +474,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        if (nla && nla_get_be32(nla) != nh->nh_gw)
                                return 1;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
                        if (nla && nla_get_u32(nla) != nh->nh_tclassid)
                                return 1;
@@ -613,14 +611,14 @@ out:
 
 static inline unsigned int fib_laddr_hashfn(__be32 val)
 {
-       unsigned int mask = (fib_hash_size - 1);
+       unsigned int mask = (fib_info_hash_size - 1);
 
        return ((__force u32)val ^
                ((__force u32)val >> 7) ^
                ((__force u32)val >> 14)) & mask;
 }
 
-static struct hlist_head *fib_hash_alloc(int bytes)
+static struct hlist_head *fib_info_hash_alloc(int bytes)
 {
        if (bytes <= PAGE_SIZE)
                return kzalloc(bytes, GFP_KERNEL);
@@ -630,7 +628,7 @@ static struct hlist_head *fib_hash_alloc(int bytes)
                                         get_order(bytes));
 }
 
-static void fib_hash_free(struct hlist_head *hash, int bytes)
+static void fib_info_hash_free(struct hlist_head *hash, int bytes)
 {
        if (!hash)
                return;
@@ -641,18 +639,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes)
                free_pages((unsigned long) hash, get_order(bytes));
 }
 
-static void fib_hash_move(struct hlist_head *new_info_hash,
-                         struct hlist_head *new_laddrhash,
-                         unsigned int new_size)
+static void fib_info_hash_move(struct hlist_head *new_info_hash,
+                              struct hlist_head *new_laddrhash,
+                              unsigned int new_size)
 {
        struct hlist_head *old_info_hash, *old_laddrhash;
-       unsigned int old_size = fib_hash_size;
+       unsigned int old_size = fib_info_hash_size;
        unsigned int i, bytes;
 
        spin_lock_bh(&fib_info_lock);
        old_info_hash = fib_info_hash;
        old_laddrhash = fib_info_laddrhash;
-       fib_hash_size = new_size;
+       fib_info_hash_size = new_size;
 
        for (i = 0; i < old_size; i++) {
                struct hlist_head *head = &fib_info_hash[i];
@@ -693,8 +691,8 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
        spin_unlock_bh(&fib_info_lock);
 
        bytes = old_size * sizeof(struct hlist_head *);
-       fib_hash_free(old_info_hash, bytes);
-       fib_hash_free(old_laddrhash, bytes);
+       fib_info_hash_free(old_info_hash, bytes);
+       fib_info_hash_free(old_laddrhash, bytes);
 }
 
 struct fib_info *fib_create_info(struct fib_config *cfg)
@@ -705,6 +703,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        int nhs = 1;
        struct net *net = cfg->fc_nlinfo.nl_net;
 
+       if (cfg->fc_type > RTN_MAX)
+               goto err_inval;
+
        /* Fast check to catch the most weird cases */
        if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
                goto err_inval;
@@ -718,8 +719,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
 #endif
 
        err = -ENOBUFS;
-       if (fib_info_cnt >= fib_hash_size) {
-               unsigned int new_size = fib_hash_size << 1;
+       if (fib_info_cnt >= fib_info_hash_size) {
+               unsigned int new_size = fib_info_hash_size << 1;
                struct hlist_head *new_info_hash;
                struct hlist_head *new_laddrhash;
                unsigned int bytes;
@@ -727,21 +728,27 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                if (!new_size)
                        new_size = 1;
                bytes = new_size * sizeof(struct hlist_head *);
-               new_info_hash = fib_hash_alloc(bytes);
-               new_laddrhash = fib_hash_alloc(bytes);
+               new_info_hash = fib_info_hash_alloc(bytes);
+               new_laddrhash = fib_info_hash_alloc(bytes);
                if (!new_info_hash || !new_laddrhash) {
-                       fib_hash_free(new_info_hash, bytes);
-                       fib_hash_free(new_laddrhash, bytes);
+                       fib_info_hash_free(new_info_hash, bytes);
+                       fib_info_hash_free(new_laddrhash, bytes);
                } else
-                       fib_hash_move(new_info_hash, new_laddrhash, new_size);
+                       fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
 
-               if (!fib_hash_size)
+               if (!fib_info_hash_size)
                        goto failure;
        }
 
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       if (cfg->fc_mx) {
+               fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               if (!fi->fib_metrics)
+                       goto failure;
+       } else
+               fi->fib_metrics = (u32 *) dst_default_metrics;
        fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
@@ -779,7 +786,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                        goto err_inval;
                if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
                        goto err_inval;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
                        goto err_inval;
 #endif
@@ -792,7 +799,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                nh->nh_oif = cfg->fc_oif;
                nh->nh_gw = cfg->fc_gw;
                nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                nh->nh_tclassid = cfg->fc_flow;
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -804,6 +811,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
                        goto err_inval;
                goto link_it;
+       } else {
+               switch (cfg->fc_type) {
+               case RTN_UNICAST:
+               case RTN_LOCAL:
+               case RTN_BROADCAST:
+               case RTN_ANYCAST:
+               case RTN_MULTICAST:
+                       break;
+               default:
+                       goto err_inval;
+               }
        }
 
        if (cfg->fc_scope > RT_SCOPE_HOST)
@@ -835,6 +853,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                                goto err_inval;
        }
 
+       change_nexthops(fi) {
+               nexthop_nh->nh_cfg_scope = cfg->fc_scope;
+               nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev,
+                                                       nexthop_nh->nh_gw,
+                                                       nexthop_nh->nh_cfg_scope);
+       } endfor_nexthops(fi)
+
 link_it:
        ofi = fib_find_info(fi);
        if (ofi) {
@@ -880,84 +905,6 @@ failure:
        return ERR_PTR(err);
 }
 
-/* Note! fib_semantic_match intentionally uses  RCU list functions. */
-int fib_semantic_match(struct list_head *head, const struct flowi *flp,
-                      struct fib_result *res, int prefixlen, int fib_flags)
-{
-       struct fib_alias *fa;
-       int nh_sel = 0;
-
-       list_for_each_entry_rcu(fa, head, fa_list) {
-               int err;
-
-               if (fa->fa_tos &&
-                   fa->fa_tos != flp->fl4_tos)
-                       continue;
-
-               if (fa->fa_scope < flp->fl4_scope)
-                       continue;
-
-               fib_alias_accessed(fa);
-
-               err = fib_props[fa->fa_type].error;
-               if (err == 0) {
-                       struct fib_info *fi = fa->fa_info;
-
-                       if (fi->fib_flags & RTNH_F_DEAD)
-                               continue;
-
-                       switch (fa->fa_type) {
-                       case RTN_UNICAST:
-                       case RTN_LOCAL:
-                       case RTN_BROADCAST:
-                       case RTN_ANYCAST:
-                       case RTN_MULTICAST:
-                               for_nexthops(fi) {
-                                       if (nh->nh_flags & RTNH_F_DEAD)
-                                               continue;
-                                       if (!flp->oif || flp->oif == nh->nh_oif)
-                                               break;
-                               }
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-                               if (nhsel < fi->fib_nhs) {
-                                       nh_sel = nhsel;
-                                       goto out_fill_res;
-                               }
-#else
-                               if (nhsel < 1)
-                                       goto out_fill_res;
-#endif
-                               endfor_nexthops(fi);
-                               continue;
-
-                       default:
-                               pr_warning("fib_semantic_match bad type %#x\n",
-                                          fa->fa_type);
-                               return -EINVAL;
-                       }
-               }
-               return err;
-       }
-       return 1;
-
-out_fill_res:
-       res->prefixlen = prefixlen;
-       res->nh_sel = nh_sel;
-       res->type = fa->fa_type;
-       res->scope = fa->fa_scope;
-       res->fi = fa->fa_info;
-       if (!(fib_flags & FIB_LOOKUP_NOREF))
-               atomic_inc(&res->fi->fib_clntref);
-       return 0;
-}
-
-/* Find appropriate source address to this destination */
-
-__be32 __fib_res_prefsrc(struct fib_result *res)
-{
-       return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
-}
-
 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                  u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
                  struct fib_info *fi, unsigned int flags)
@@ -1002,7 +949,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
 
                if (fi->fib_nh->nh_oif)
                        NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                if (fi->fib_nh[0].nh_tclassid)
                        NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
 #endif
@@ -1027,7 +974,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
 
                        if (nh->nh_gw)
                                NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                        if (nh->nh_tclassid)
                                NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
 #endif
@@ -1125,6 +1072,80 @@ int fib_sync_down_dev(struct net_device *dev, int force)
        return ret;
 }
 
+/* Must be invoked inside of an RCU protected region.  */
+void fib_select_default(struct fib_result *res)
+{
+       struct fib_info *fi = NULL, *last_resort = NULL;
+       struct list_head *fa_head = res->fa_head;
+       struct fib_table *tb = res->table;
+       int order = -1, last_idx = -1;
+       struct fib_alias *fa;
+
+       list_for_each_entry_rcu(fa, fa_head, fa_list) {
+               struct fib_info *next_fi = fa->fa_info;
+
+               if (fa->fa_scope != res->scope ||
+                   fa->fa_type != RTN_UNICAST)
+                       continue;
+
+               if (next_fi->fib_priority > res->fi->fib_priority)
+                       break;
+               if (!next_fi->fib_nh[0].nh_gw ||
+                   next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+                       continue;
+
+               fib_alias_accessed(fa);
+
+               if (fi == NULL) {
+                       if (next_fi != res->fi)
+                               break;
+               } else if (!fib_detect_death(fi, order, &last_resort,
+                                            &last_idx, tb->tb_default)) {
+                       fib_result_assign(res, fi);
+                       tb->tb_default = order;
+                       goto out;
+               }
+               fi = next_fi;
+               order++;
+       }
+
+       if (order <= 0 || fi == NULL) {
+               tb->tb_default = -1;
+               goto out;
+       }
+
+       if (!fib_detect_death(fi, order, &last_resort, &last_idx,
+                               tb->tb_default)) {
+               fib_result_assign(res, fi);
+               tb->tb_default = order;
+               goto out;
+       }
+
+       if (last_idx >= 0)
+               fib_result_assign(res, last_resort);
+       tb->tb_default = last_idx;
+out:
+       return;
+}
+
+void fib_update_nh_saddrs(struct net_device *dev)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct fib_nh *nh;
+       unsigned int hash;
+
+       hash = fib_devindex_hashfn(dev->ifindex);
+       head = &fib_info_devhash[hash];
+       hlist_for_each_entry(nh, node, head, nh_hash) {
+               if (nh->nh_dev != dev)
+                       continue;
+               nh->nh_saddr = inet_select_addr(nh->nh_dev,
+                                               nh->nh_gw,
+                                               nh->nh_cfg_scope);
+       }
+}
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 
 /*
index 0f280348e0fdad70adb37f3b33bdbe6fedd99025..a4109a54477826ca1758863c73046596c1fe9e09 100644 (file)
@@ -95,7 +95,7 @@ typedef unsigned int t_key;
 #define IS_TNODE(n) (!(n->parent & T_LEAF))
 #define IS_LEAF(n) (n->parent & T_LEAF)
 
-struct node {
+struct rt_trie_node {
        unsigned long parent;
        t_key key;
 };
@@ -126,7 +126,7 @@ struct tnode {
                struct work_struct work;
                struct tnode *tnode_free;
        };
-       struct node *child[0];
+       struct rt_trie_node *child[0];
 };
 
 #ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,16 +151,16 @@ struct trie_stat {
 };
 
 struct trie {
-       struct node *trie;
+       struct rt_trie_node *trie;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        struct trie_use_stats stats;
 #endif
 };
 
-static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
                                  int wasfull);
-static struct node *resize(struct trie *t, struct tnode *tn);
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
 static struct tnode *inflate(struct trie *t, struct tnode *tn);
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 /* tnodes to free after resize(); protected by RTNL */
@@ -177,12 +177,12 @@ static const int sync_pages = 128;
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
 
-static inline struct tnode *node_parent(struct node *node)
+static inline struct tnode *node_parent(struct rt_trie_node *node)
 {
        return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
 }
 
-static inline struct tnode *node_parent_rcu(struct node *node)
+static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
 {
        struct tnode *ret = node_parent(node);
 
@@ -192,22 +192,22 @@ static inline struct tnode *node_parent_rcu(struct node *node)
 /* Same as rcu_assign_pointer
  * but that macro() assumes that value is a pointer.
  */
-static inline void node_set_parent(struct node *node, struct tnode *ptr)
+static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
 {
        smp_wmb();
        node->parent = (unsigned long)ptr | NODE_TYPE(node);
 }
 
-static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
 {
        BUG_ON(i >= 1U << tn->bits);
 
        return tn->child[i];
 }
 
-static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
 {
-       struct node *ret = tnode_get_child(tn, i);
+       struct rt_trie_node *ret = tnode_get_child(tn, i);
 
        return rcu_dereference_rtnl(ret);
 }
@@ -217,12 +217,12 @@ static inline int tnode_child_length(const struct tnode *tn)
        return 1 << tn->bits;
 }
 
-static inline t_key mask_pfx(t_key k, unsigned short l)
+static inline t_key mask_pfx(t_key k, unsigned int l)
 {
        return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
 }
 
-static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
+static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
 {
        if (offset < KEYLENGTH)
                return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
@@ -378,7 +378,7 @@ static void __tnode_free_rcu(struct rcu_head *head)
 {
        struct tnode *tn = container_of(head, struct tnode, rcu);
        size_t size = sizeof(struct tnode) +
-                     (sizeof(struct node *) << tn->bits);
+                     (sizeof(struct rt_trie_node *) << tn->bits);
 
        if (size <= PAGE_SIZE)
                kfree(tn);
@@ -402,7 +402,7 @@ static void tnode_free_safe(struct tnode *tn)
        tn->tnode_free = tnode_free_head;
        tnode_free_head = tn;
        tnode_free_size += sizeof(struct tnode) +
-                          (sizeof(struct node *) << tn->bits);
+                          (sizeof(struct rt_trie_node *) << tn->bits);
 }
 
 static void tnode_free_flush(void)
@@ -443,7 +443,7 @@ static struct leaf_info *leaf_info_new(int plen)
 
 static struct tnode *tnode_new(t_key key, int pos, int bits)
 {
-       size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
+       size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
        struct tnode *tn = tnode_alloc(sz);
 
        if (tn) {
@@ -456,7 +456,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
        }
 
        pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
-                sizeof(struct node) << bits);
+                sizeof(struct rt_trie_node) << bits);
        return tn;
 }
 
@@ -465,7 +465,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
  * and no bits are skipped. See discussion in dyntree paper p. 6
  */
 
-static inline int tnode_full(const struct tnode *tn, const struct node *n)
+static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
 {
        if (n == NULL || IS_LEAF(n))
                return 0;
@@ -474,7 +474,7 @@ static inline int tnode_full(const struct tnode *tn, const struct node *n)
 }
 
 static inline void put_child(struct trie *t, struct tnode *tn, int i,
-                            struct node *n)
+                            struct rt_trie_node *n)
 {
        tnode_put_child_reorg(tn, i, n, -1);
 }
@@ -484,10 +484,10 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
   * Update the value of full_children and empty_children.
   */
 
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
                                  int wasfull)
 {
-       struct node *chi = tn->child[i];
+       struct rt_trie_node *chi = tn->child[i];
        int isfull;
 
        BUG_ON(i >= 1<<tn->bits);
@@ -515,7 +515,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
 }
 
 #define MAX_WORK 10
-static struct node *resize(struct trie *t, struct tnode *tn)
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
 {
        int i;
        struct tnode *old_tn;
@@ -605,7 +605,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
 
        /* Keep root node larger  */
 
-       if (!node_parent((struct node *)tn)) {
+       if (!node_parent((struct rt_trie_node *)tn)) {
                inflate_threshold_use = inflate_threshold_root;
                halve_threshold_use = halve_threshold_root;
        } else {
@@ -635,7 +635,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
 
        /* Return if at least one inflate is run */
        if (max_work != MAX_WORK)
-               return (struct node *) tn;
+               return (struct rt_trie_node *) tn;
 
        /*
         * Halve as long as the number of empty children in this
@@ -663,7 +663,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
        if (tn->empty_children == tnode_child_length(tn) - 1) {
 one_child:
                for (i = 0; i < tnode_child_length(tn); i++) {
-                       struct node *n;
+                       struct rt_trie_node *n;
 
                        n = tn->child[i];
                        if (!n)
@@ -676,7 +676,7 @@ one_child:
                        return n;
                }
        }
-       return (struct node *) tn;
+       return (struct rt_trie_node *) tn;
 }
 
 static struct tnode *inflate(struct trie *t, struct tnode *tn)
@@ -723,14 +723,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
                                goto nomem;
                        }
 
-                       put_child(t, tn, 2*i, (struct node *) left);
-                       put_child(t, tn, 2*i+1, (struct node *) right);
+                       put_child(t, tn, 2*i, (struct rt_trie_node *) left);
+                       put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
                }
        }
 
        for (i = 0; i < olen; i++) {
                struct tnode *inode;
-               struct node *node = tnode_get_child(oldtnode, i);
+               struct rt_trie_node *node = tnode_get_child(oldtnode, i);
                struct tnode *left, *right;
                int size, j;
 
@@ -825,7 +825,7 @@ nomem:
 static struct tnode *halve(struct trie *t, struct tnode *tn)
 {
        struct tnode *oldtnode = tn;
-       struct node *left, *right;
+       struct rt_trie_node *left, *right;
        int i;
        int olen = tnode_child_length(tn);
 
@@ -856,7 +856,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
                        if (!newn)
                                goto nomem;
 
-                       put_child(t, tn, i/2, (struct node *)newn);
+                       put_child(t, tn, i/2, (struct rt_trie_node *)newn);
                }
 
        }
@@ -958,7 +958,7 @@ fib_find_node(struct trie *t, u32 key)
 {
        int pos;
        struct tnode *tn;
-       struct node *n;
+       struct rt_trie_node *n;
 
        pos = 0;
        n = rcu_dereference_rtnl(t->trie);
@@ -993,17 +993,17 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
 
        key = tn->key;
 
-       while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
+       while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
                cindex = tkey_extract_bits(key, tp->pos, tp->bits);
                wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
                tn = (struct tnode *) resize(t, (struct tnode *)tn);
 
                tnode_put_child_reorg((struct tnode *)tp, cindex,
-                                     (struct node *)tn, wasfull);
+                                     (struct rt_trie_node *)tn, wasfull);
 
-               tp = node_parent((struct node *) tn);
+               tp = node_parent((struct rt_trie_node *) tn);
                if (!tp)
-                       rcu_assign_pointer(t->trie, (struct node *)tn);
+                       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
 
                tnode_free_flush();
                if (!tp)
@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
        if (IS_TNODE(tn))
                tn = (struct tnode *)resize(t, (struct tnode *)tn);
 
-       rcu_assign_pointer(t->trie, (struct node *)tn);
+       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
        tnode_free_flush();
 }
 
@@ -1025,7 +1025,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
 {
        int pos, newpos;
        struct tnode *tp = NULL, *tn = NULL;
-       struct node *n;
+       struct rt_trie_node *n;
        struct leaf *l;
        int missbit;
        struct list_head *fa_head = NULL;
@@ -1111,10 +1111,10 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
        if (t->trie && n == NULL) {
                /* Case 2: n is NULL, and will just insert a new leaf */
 
-               node_set_parent((struct node *)l, tp);
+               node_set_parent((struct rt_trie_node *)l, tp);
 
                cindex = tkey_extract_bits(key, tp->pos, tp->bits);
-               put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
+               put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
        } else {
                /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
                /*
@@ -1141,18 +1141,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
                        return NULL;
                }
 
-               node_set_parent((struct node *)tn, tp);
+               node_set_parent((struct rt_trie_node *)tn, tp);
 
                missbit = tkey_extract_bits(key, newpos, 1);
-               put_child(t, tn, missbit, (struct node *)l);
+               put_child(t, tn, missbit, (struct rt_trie_node *)l);
                put_child(t, tn, 1-missbit, n);
 
                if (tp) {
                        cindex = tkey_extract_bits(key, tp->pos, tp->bits);
                        put_child(t, (struct tnode *)tp, cindex,
-                                 (struct node *)tn);
+                                 (struct rt_trie_node *)tn);
                } else {
-                       rcu_assign_pointer(t->trie, (struct node *)tn);
+                       rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
                        tp = tn;
                }
        }
@@ -1340,7 +1340,7 @@ err:
 }
 
 /* should be called with rcu_read_lock */
-static int check_leaf(struct trie *t, struct leaf *l,
+static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
                      t_key key,  const struct flowi *flp,
                      struct fib_result *res, int fib_flags)
 {
@@ -1349,23 +1349,58 @@ static int check_leaf(struct trie *t, struct leaf *l,
        struct hlist_node *node;
 
        hlist_for_each_entry_rcu(li, node, hhead, hlist) {
-               int err;
+               struct fib_alias *fa;
                int plen = li->plen;
                __be32 mask = inet_make_mask(plen);
 
                if (l->key != (key & ntohl(mask)))
                        continue;
 
-               err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
+               list_for_each_entry_rcu(fa, &li->falh, fa_list) {
+                       struct fib_info *fi = fa->fa_info;
+                       int nhsel, err;
 
+                       if (fa->fa_tos && fa->fa_tos != flp->fl4_tos)
+                               continue;
+                       if (fa->fa_scope < flp->fl4_scope)
+                               continue;
+                       fib_alias_accessed(fa);
+                       err = fib_props[fa->fa_type].error;
+                       if (err) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
-               if (err <= 0)
-                       t->stats.semantic_match_passed++;
-               else
-                       t->stats.semantic_match_miss++;
+                               t->stats.semantic_match_miss++;
+#endif
+                               return 1;
+                       }
+                       if (fi->fib_flags & RTNH_F_DEAD)
+                               continue;
+                       for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+                               const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+                               if (nh->nh_flags & RTNH_F_DEAD)
+                                       continue;
+                               if (flp->oif && flp->oif != nh->nh_oif)
+                                       continue;
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+                               t->stats.semantic_match_passed++;
+#endif
+                               res->prefixlen = plen;
+                               res->nh_sel = nhsel;
+                               res->type = fa->fa_type;
+                               res->scope = fa->fa_scope;
+                               res->fi = fi;
+                               res->table = tb;
+                               res->fa_head = &li->falh;
+                               if (!(fib_flags & FIB_LOOKUP_NOREF))
+                                       atomic_inc(&res->fi->fib_clntref);
+                               return 0;
+                       }
+               }
+
+#ifdef CONFIG_IP_FIB_TRIE_STATS
+               t->stats.semantic_match_miss++;
 #endif
-               if (err <= 0)
-                       return err;
        }
 
        return 1;
@@ -1376,13 +1411,13 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
 {
        struct trie *t = (struct trie *) tb->tb_data;
        int ret;
-       struct node *n;
+       struct rt_trie_node *n;
        struct tnode *pn;
-       int pos, bits;
+       unsigned int pos, bits;
        t_key key = ntohl(flp->fl4_dst);
-       int chopped_off;
+       unsigned int chopped_off;
        t_key cindex = 0;
-       int current_prefix_length = KEYLENGTH;
+       unsigned int current_prefix_length = KEYLENGTH;
        struct tnode *cn;
        t_key pref_mismatch;
 
@@ -1398,7 +1433,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
 
        /* Just a leaf? */
        if (IS_LEAF(n)) {
-               ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+               ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
                goto found;
        }
 
@@ -1423,7 +1458,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
                }
 
                if (IS_LEAF(n)) {
-                       ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+                       ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
                        if (ret > 0)
                                goto backtrace;
                        goto found;
@@ -1541,7 +1576,7 @@ backtrace:
                if (chopped_off <= pn->bits) {
                        cindex &= ~(1 << (chopped_off-1));
                } else {
-                       struct tnode *parent = node_parent_rcu((struct node *) pn);
+                       struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
                        if (!parent)
                                goto failed;
 
@@ -1568,7 +1603,7 @@ found:
  */
 static void trie_leaf_remove(struct trie *t, struct leaf *l)
 {
-       struct tnode *tp = node_parent((struct node *) l);
+       struct tnode *tp = node_parent((struct rt_trie_node *) l);
 
        pr_debug("entering trie_leaf_remove(%p)\n", l);
 
@@ -1706,7 +1741,7 @@ static int trie_flush_leaf(struct leaf *l)
  * Scan for the next right leaf starting at node p->child[idx]
  * Since we have back pointer, no recursion necessary.
  */
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
+static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
 {
        do {
                t_key idx;
@@ -1732,7 +1767,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
                }
 
                /* Node empty, walk back up to parent */
-               c = (struct node *) p;
+               c = (struct rt_trie_node *) p;
        } while ((p = node_parent_rcu(c)) != NULL);
 
        return NULL; /* Root of trie */
@@ -1753,7 +1788,7 @@ static struct leaf *trie_firstleaf(struct trie *t)
 
 static struct leaf *trie_nextleaf(struct leaf *l)
 {
-       struct node *c = (struct node *) l;
+       struct rt_trie_node *c = (struct rt_trie_node *) l;
        struct tnode *p = node_parent_rcu(c);
 
        if (!p)
@@ -1802,80 +1837,6 @@ void fib_free_table(struct fib_table *tb)
        kfree(tb);
 }
 
-void fib_table_select_default(struct fib_table *tb,
-                             const struct flowi *flp,
-                             struct fib_result *res)
-{
-       struct trie *t = (struct trie *) tb->tb_data;
-       int order, last_idx;
-       struct fib_info *fi = NULL;
-       struct fib_info *last_resort;
-       struct fib_alias *fa = NULL;
-       struct list_head *fa_head;
-       struct leaf *l;
-
-       last_idx = -1;
-       last_resort = NULL;
-       order = -1;
-
-       rcu_read_lock();
-
-       l = fib_find_node(t, 0);
-       if (!l)
-               goto out;
-
-       fa_head = get_fa_head(l, 0);
-       if (!fa_head)
-               goto out;
-
-       if (list_empty(fa_head))
-               goto out;
-
-       list_for_each_entry_rcu(fa, fa_head, fa_list) {
-               struct fib_info *next_fi = fa->fa_info;
-
-               if (fa->fa_scope != res->scope ||
-                   fa->fa_type != RTN_UNICAST)
-                       continue;
-
-               if (next_fi->fib_priority > res->fi->fib_priority)
-                       break;
-               if (!next_fi->fib_nh[0].nh_gw ||
-                   next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
-                       continue;
-
-               fib_alias_accessed(fa);
-
-               if (fi == NULL) {
-                       if (next_fi != res->fi)
-                               break;
-               } else if (!fib_detect_death(fi, order, &last_resort,
-                                            &last_idx, tb->tb_default)) {
-                       fib_result_assign(res, fi);
-                       tb->tb_default = order;
-                       goto out;
-               }
-               fi = next_fi;
-               order++;
-       }
-       if (order <= 0 || fi == NULL) {
-               tb->tb_default = -1;
-               goto out;
-       }
-
-       if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-                               tb->tb_default)) {
-               fib_result_assign(res, fi);
-               tb->tb_default = order;
-               goto out;
-       }
-       if (last_idx >= 0)
-               fib_result_assign(res, last_resort);
-       tb->tb_default = last_idx;
-out:
-       rcu_read_unlock();
-}
-
 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
                           struct fib_table *tb,
                           struct sk_buff *skb, struct netlink_callback *cb)
@@ -1990,7 +1951,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
        return skb->len;
 }
 
-void __init fib_hash_init(void)
+void __init fib_trie_init(void)
 {
        fn_alias_kmem = kmem_cache_create("ip_fib_alias",
                                          sizeof(struct fib_alias),
@@ -2003,8 +1964,7 @@ void __init fib_hash_init(void)
 }
 
 
-/* Fix more generic FIB names for init later */
-struct fib_table *fib_hash_table(u32 id)
+struct fib_table *fib_trie_table(u32 id)
 {
        struct fib_table *tb;
        struct trie *t;
@@ -2036,7 +1996,7 @@ struct fib_trie_iter {
        unsigned int depth;
 };
 
-static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
 {
        struct tnode *tn = iter->tnode;
        unsigned int cindex = iter->index;
@@ -2050,7 +2010,7 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
                 iter->tnode, iter->index, iter->depth);
 rescan:
        while (cindex < (1<<tn->bits)) {
-               struct node *n = tnode_get_child_rcu(tn, cindex);
+               struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
 
                if (n) {
                        if (IS_LEAF(n)) {
@@ -2069,7 +2029,7 @@ rescan:
        }
 
        /* Current node exhausted, pop back up */
-       p = node_parent_rcu((struct node *)tn);
+       p = node_parent_rcu((struct rt_trie_node *)tn);
        if (p) {
                cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
                tn = p;
@@ -2081,10 +2041,10 @@ rescan:
        return NULL;
 }
 
-static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
                                       struct trie *t)
 {
-       struct node *n;
+       struct rt_trie_node *n;
 
        if (!t)
                return NULL;
@@ -2108,7 +2068,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
 
 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
 {
-       struct node *n;
+       struct rt_trie_node *n;
        struct fib_trie_iter iter;
 
        memset(s, 0, sizeof(*s));
@@ -2181,7 +2141,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
        seq_putc(seq, '\n');
        seq_printf(seq, "\tPointers: %u\n", pointers);
 
-       bytes += sizeof(struct node *) * pointers;
+       bytes += sizeof(struct rt_trie_node *) * pointers;
        seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
        seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
 }
@@ -2262,7 +2222,7 @@ static const struct file_operations fib_triestat_fops = {
        .release = single_release_net,
 };
 
-static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 {
        struct fib_trie_iter *iter = seq->private;
        struct net *net = seq_file_net(seq);
@@ -2275,7 +2235,7 @@ static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
                struct fib_table *tb;
 
                hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
-                       struct node *n;
+                       struct rt_trie_node *n;
 
                        for (n = fib_trie_get_first(iter,
                                                    (struct trie *) tb->tb_data);
@@ -2304,7 +2264,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct fib_table *tb = iter->tb;
        struct hlist_node *tb_node;
        unsigned int h;
-       struct node *n;
+       struct rt_trie_node *n;
 
        ++*pos;
        /* next node in same table */
@@ -2390,7 +2350,7 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
 static int fib_trie_seq_show(struct seq_file *seq, void *v)
 {
        const struct fib_trie_iter *iter = seq->private;
-       struct node *n = v;
+       struct rt_trie_node *n = v;
 
        if (!node_parent_rcu(n))
                fib_table_print(seq, iter->tb);
index 4aa1b7f01ea0c18a5718061fe31eee71f4de883f..1771ce662548d510c0eb4a783fc75866010b2f13 100644 (file)
@@ -233,48 +233,11 @@ static inline void icmp_xmit_unlock(struct sock *sk)
  *     Send an ICMP frame.
  */
 
-/*
- *     Check transmit rate limitation for given message.
- *     The rate information is held in the destination cache now.
- *     This function is generic and could be used for other purposes
- *     too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
- *
- *     Note that the same dst_entry fields are modified by functions in
- *     route.c too, but these work for packet destinations while xrlim_allow
- *     works for icmp destinations. This means the rate limiting information
- *     for one "ip object" is shared - and these ICMPs are twice limited:
- *     by source and by destination.
- *
- *     RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
- *                       SHOULD allow setting of rate limits
- *
- *     Shared between ICMPv4 and ICMPv6.
- */
-#define XRLIM_BURST_FACTOR 6
-int xrlim_allow(struct dst_entry *dst, int timeout)
-{
-       unsigned long now, token = dst->rate_tokens;
-       int rc = 0;
-
-       now = jiffies;
-       token += now - dst->rate_last;
-       dst->rate_last = now;
-       if (token > XRLIM_BURST_FACTOR * timeout)
-               token = XRLIM_BURST_FACTOR * timeout;
-       if (token >= timeout) {
-               token -= timeout;
-               rc = 1;
-       }
-       dst->rate_tokens = token;
-       return rc;
-}
-EXPORT_SYMBOL(xrlim_allow);
-
-static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
                int type, int code)
 {
        struct dst_entry *dst = &rt->dst;
-       int rc = 1;
+       bool rc = true;
 
        if (type > NR_ICMP_TYPES)
                goto out;
@@ -288,8 +251,12 @@ static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
                goto out;
 
        /* Limit if icmp type is enabled in ratemask. */
-       if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
-               rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
+       if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
+               if (!rt->peer)
+                       rt_bind_peer(rt, 1);
+               rc = inet_peer_xrlim_allow(rt->peer,
+                                          net->ipv4.sysctl_icmp_ratelimit);
+       }
 out:
        return rc;
 }
@@ -391,7 +358,8 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
                                    .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
                                    .proto = IPPROTO_ICMP };
                security_skb_classify_flow(skb, &fl);
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        goto out_unlock;
        }
        if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
@@ -402,6 +370,94 @@ out_unlock:
        icmp_xmit_unlock(sk);
 }
 
+static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
+                                       struct iphdr *iph,
+                                       __be32 saddr, u8 tos,
+                                       int type, int code,
+                                       struct icmp_bxm *param)
+{
+       struct flowi fl = {
+               .fl4_dst = (param->replyopts.srr ?
+                           param->replyopts.faddr : iph->saddr),
+               .fl4_src = saddr,
+               .fl4_tos = RT_TOS(tos),
+               .proto = IPPROTO_ICMP,
+               .fl_icmp_type = type,
+               .fl_icmp_code = code,
+       };
+       struct rtable *rt, *rt2;
+       int err;
+
+       security_skb_classify_flow(skb_in, &fl);
+       rt = __ip_route_output_key(net, &fl);
+       if (IS_ERR(rt))
+               return rt;
+
+       /* No need to clone since we're just using its address. */
+       rt2 = rt;
+
+       if (!fl.fl4_src)
+               fl.fl4_src = rt->rt_src;
+
+       rt = (struct rtable *) xfrm_lookup(net, &rt->dst, &fl, NULL, 0);
+       if (!IS_ERR(rt)) {
+               if (rt != rt2)
+                       return rt;
+       } else if (PTR_ERR(rt) == -EPERM) {
+               rt = NULL;
+       } else
+               return rt;
+
+       err = xfrm_decode_session_reverse(skb_in, &fl, AF_INET);
+       if (err)
+               goto relookup_failed;
+
+       if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL) {
+               rt2 = __ip_route_output_key(net, &fl);
+               if (IS_ERR(rt2))
+                       err = PTR_ERR(rt2);
+       } else {
+               struct flowi fl2 = {};
+               unsigned long orefdst;
+
+               fl2.fl4_dst = fl.fl4_src;
+               rt2 = ip_route_output_key(net, &fl2);
+               if (IS_ERR(rt2)) {
+                       err = PTR_ERR(rt2);
+                       goto relookup_failed;
+               }
+               /* Ugh! */
+               orefdst = skb_in->_skb_refdst; /* save old refdst */
+               err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
+                                    RT_TOS(tos), rt2->dst.dev);
+
+               dst_release(&rt2->dst);
+               rt2 = skb_rtable(skb_in);
+               skb_in->_skb_refdst = orefdst; /* restore old refdst */
+       }
+
+       if (err)
+               goto relookup_failed;
+
+       rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, &fl, NULL, XFRM_LOOKUP_ICMP);
+       if (!IS_ERR(rt2)) {
+               dst_release(&rt->dst);
+               rt = rt2;
+       } else if (PTR_ERR(rt2) == -EPERM) {
+               if (rt)
+                       dst_release(&rt->dst);
+               return rt2;
+       } else {
+               err = PTR_ERR(rt2);
+               goto relookup_failed;
+       }
+       return rt;
+
+relookup_failed:
+       if (rt)
+               return rt;
+       return ERR_PTR(err);
+}
 
 /*
  *     Send an ICMP message in response to a situation
@@ -507,7 +563,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                rcu_read_lock();
                if (rt_is_input_route(rt) &&
                    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
-                       dev = dev_get_by_index_rcu(net, rt->fl.iif);
+                       dev = dev_get_by_index_rcu(net, rt->rt_iif);
 
                if (dev)
                        saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -539,86 +595,11 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        ipc.opt = &icmp_param.replyopts;
        ipc.tx_flags = 0;
 
-       {
-               struct flowi fl = {
-                       .fl4_dst = icmp_param.replyopts.srr ?
-                                  icmp_param.replyopts.faddr : iph->saddr,
-                       .fl4_src = saddr,
-                       .fl4_tos = RT_TOS(tos),
-                       .proto = IPPROTO_ICMP,
-                       .fl_icmp_type = type,
-                       .fl_icmp_code = code,
-               };
-               int err;
-               struct rtable *rt2;
-
-               security_skb_classify_flow(skb_in, &fl);
-               if (__ip_route_output_key(net, &rt, &fl))
-                       goto out_unlock;
-
-               /* No need to clone since we're just using its address. */
-               rt2 = rt;
-
-               if (!fl.nl_u.ip4_u.saddr)
-                       fl.nl_u.ip4_u.saddr = rt->rt_src;
-
-               err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
-               switch (err) {
-               case 0:
-                       if (rt != rt2)
-                               goto route_done;
-                       break;
-               case -EPERM:
-                       rt = NULL;
-                       break;
-               default:
-                       goto out_unlock;
-               }
-
-               if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
-                       goto relookup_failed;
-
-               if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
-                       err = __ip_route_output_key(net, &rt2, &fl);
-               else {
-                       struct flowi fl2 = {};
-                       unsigned long orefdst;
-
-                       fl2.fl4_dst = fl.fl4_src;
-                       if (ip_route_output_key(net, &rt2, &fl2))
-                               goto relookup_failed;
-
-                       /* Ugh! */
-                       orefdst = skb_in->_skb_refdst; /* save old refdst */
-                       err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
-                                            RT_TOS(tos), rt2->dst.dev);
-
-                       dst_release(&rt2->dst);
-                       rt2 = skb_rtable(skb_in);
-                       skb_in->_skb_refdst = orefdst; /* restore old refdst */
-               }
-
-               if (err)
-                       goto relookup_failed;
-
-               err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
-                                 XFRM_LOOKUP_ICMP);
-               switch (err) {
-               case 0:
-                       dst_release(&rt->dst);
-                       rt = rt2;
-                       break;
-               case -EPERM:
-                       goto ende;
-               default:
-relookup_failed:
-                       if (!rt)
-                               goto out_unlock;
-                       break;
-               }
-       }
+       rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
+                              type, code, &icmp_param);
+       if (IS_ERR(rt))
+               goto out_unlock;
 
-route_done:
        if (!icmpv4_xrlim_allow(net, rt, type, code))
                goto ende;
 
index e0e77e297de32148356da4c309856dacabee2387..44ba9068b72f9b9136d8126a9b1b76b4cca1ce66 100644 (file)
@@ -325,7 +325,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
                struct flowi fl = { .oif = dev->ifindex,
                                    .fl4_dst = IGMPV3_ALL_MCR,
                                    .proto = IPPROTO_IGMP };
-               if (ip_route_output_key(net, &rt, &fl)) {
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt)) {
                        kfree_skb(skb);
                        return NULL;
                }
@@ -670,7 +671,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
                struct flowi fl = { .oif = dev->ifindex,
                                    .fl4_dst = dst,
                                    .proto = IPPROTO_IGMP };
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        return -1;
        }
        if (rt->rt_src == 0) {
@@ -1440,7 +1442,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 {
        struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
-       struct rtable *rt;
        struct net_device *dev = NULL;
        struct in_device *idev = NULL;
 
@@ -1454,9 +1455,12 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
                        return NULL;
        }
 
-       if (!dev && !ip_route_output_key(net, &rt, &fl)) {
-               dev = rt->dst.dev;
-               ip_rt_put(rt);
+       if (!dev) {
+               struct rtable *rt = ip_route_output_key(net, &fl);
+               if (!IS_ERR(rt)) {
+                       dev = rt->dst.dev;
+                       ip_rt_put(rt);
+               }
        }
        if (dev) {
                imr->imr_ifindex = dev->ifindex;
index 97e5fb76526500d6355b921be6359680bf7a8720..e4e301a61c5bd7f3a362e5445dc18b396afefbf4 100644 (file)
@@ -369,7 +369,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
        struct net *net = sock_net(sk);
 
        security_req_classify_flow(req, &fl);
-       if (ip_route_output_flow(net, &rt, &fl, sk, 0))
+       rt = ip_route_output_flow(net, &fl, sk);
+       if (IS_ERR(rt))
                goto no_route;
        if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
                goto route_err;
index a96e65674ac3e694e8e70fec1855324a694ccbf3..6442c35edb0b68a68c1a60aa225e7cb9758ac4ca 100644 (file)
@@ -81,19 +81,19 @@ static const struct inet_peer peer_fake_node = {
 
 struct inet_peer_base {
        struct inet_peer __rcu *root;
-       spinlock_t      lock;
+       seqlock_t       lock;
        int             total;
 };
 
 static struct inet_peer_base v4_peers = {
        .root           = peer_avl_empty_rcu,
-       .lock           = __SPIN_LOCK_UNLOCKED(v4_peers.lock),
+       .lock           = __SEQLOCK_UNLOCKED(v4_peers.lock),
        .total          = 0,
 };
 
 static struct inet_peer_base v6_peers = {
        .root           = peer_avl_empty_rcu,
-       .lock           = __SPIN_LOCK_UNLOCKED(v6_peers.lock),
+       .lock           = __SEQLOCK_UNLOCKED(v6_peers.lock),
        .total          = 0,
 };
 
@@ -167,9 +167,9 @@ static int addr_compare(const struct inetpeer_addr *a,
        int i, n = (a->family == AF_INET ? 1 : 4);
 
        for (i = 0; i < n; i++) {
-               if (a->a6[i] == b->a6[i])
+               if (a->addr.a6[i] == b->addr.a6[i])
                        continue;
-               if (a->a6[i] < b->a6[i])
+               if (a->addr.a6[i] < b->addr.a6[i])
                        return -1;
                return 1;
        }
@@ -177,6 +177,9 @@ static int addr_compare(const struct inetpeer_addr *a,
        return 0;
 }
 
+#define rcu_deref_locked(X, BASE)                              \
+       rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
+
 /*
  * Called with local BH disabled and the pool lock held.
  */
@@ -187,8 +190,7 @@ static int addr_compare(const struct inetpeer_addr *a,
                                                                \
        stackptr = _stack;                                      \
        *stackptr++ = &_base->root;                             \
-       for (u = rcu_dereference_protected(_base->root,         \
-                       lockdep_is_held(&_base->lock));         \
+       for (u = rcu_deref_locked(_base->root, _base);          \
             u != peer_avl_empty; ) {                           \
                int cmp = addr_compare(_daddr, &u->daddr);      \
                if (cmp == 0)                                   \
@@ -198,23 +200,22 @@ static int addr_compare(const struct inetpeer_addr *a,
                else                                            \
                        v = &u->avl_right;                      \
                *stackptr++ = v;                                \
-               u = rcu_dereference_protected(*v,               \
-                       lockdep_is_held(&_base->lock));         \
+               u = rcu_deref_locked(*v, _base);                \
        }                                                       \
        u;                                                      \
 })
 
 /*
- * Called with rcu_read_lock_bh()
+ * Called with rcu_read_lock()
  * Because we hold no lock against a writer, its quite possible we fall
  * in an endless loop.
  * But every pointer we follow is guaranteed to be valid thanks to RCU.
  * We exit from this function if number of links exceeds PEER_MAXDEPTH
  */
-static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
-                                      struct inet_peer_base *base)
+static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
+                                   struct inet_peer_base *base)
 {
-       struct inet_peer *u = rcu_dereference_bh(base->root);
+       struct inet_peer *u = rcu_dereference(base->root);
        int count = 0;
 
        while (u != peer_avl_empty) {
@@ -230,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
                        return u;
                }
                if (cmp == -1)
-                       u = rcu_dereference_bh(u->avl_left);
+                       u = rcu_dereference(u->avl_left);
                else
-                       u = rcu_dereference_bh(u->avl_right);
+                       u = rcu_dereference(u->avl_right);
                if (unlikely(++count == PEER_MAXDEPTH))
                        break;
        }
@@ -246,13 +247,11 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
        struct inet_peer __rcu **v;                             \
        *stackptr++ = &start->avl_left;                         \
        v = &start->avl_left;                                   \
-       for (u = rcu_dereference_protected(*v,                  \
-                       lockdep_is_held(&base->lock));          \
+       for (u = rcu_deref_locked(*v, base);                    \
             u->avl_right != peer_avl_empty_rcu; ) {            \
                v = &u->avl_right;                              \
                *stackptr++ = v;                                \
-               u = rcu_dereference_protected(*v,               \
-                       lockdep_is_held(&base->lock));          \
+               u = rcu_deref_locked(*v, base);                 \
        }                                                       \
        u;                                                      \
 })
@@ -271,21 +270,16 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
 
        while (stackend > stack) {
                nodep = *--stackend;
-               node = rcu_dereference_protected(*nodep,
-                               lockdep_is_held(&base->lock));
-               l = rcu_dereference_protected(node->avl_left,
-                               lockdep_is_held(&base->lock));
-               r = rcu_dereference_protected(node->avl_right,
-                               lockdep_is_held(&base->lock));
+               node = rcu_deref_locked(*nodep, base);
+               l = rcu_deref_locked(node->avl_left, base);
+               r = rcu_deref_locked(node->avl_right, base);
                lh = node_height(l);
                rh = node_height(r);
                if (lh > rh + 1) { /* l: RH+2 */
                        struct inet_peer *ll, *lr, *lrl, *lrr;
                        int lrh;
-                       ll = rcu_dereference_protected(l->avl_left,
-                               lockdep_is_held(&base->lock));
-                       lr = rcu_dereference_protected(l->avl_right,
-                               lockdep_is_held(&base->lock));
+                       ll = rcu_deref_locked(l->avl_left, base);
+                       lr = rcu_deref_locked(l->avl_right, base);
                        lrh = node_height(lr);
                        if (lrh <= node_height(ll)) {   /* ll: RH+1 */
                                RCU_INIT_POINTER(node->avl_left, lr);   /* lr: RH or RH+1 */
@@ -296,10 +290,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
                                l->avl_height = node->avl_height + 1;
                                RCU_INIT_POINTER(*nodep, l);
                        } else { /* ll: RH, lr: RH+1 */
-                               lrl = rcu_dereference_protected(lr->avl_left,
-                                       lockdep_is_held(&base->lock));  /* lrl: RH or RH-1 */
-                               lrr = rcu_dereference_protected(lr->avl_right,
-                                       lockdep_is_held(&base->lock));  /* lrr: RH or RH-1 */
+                               lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
+                               lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
                                RCU_INIT_POINTER(node->avl_left, lrr);  /* lrr: RH or RH-1 */
                                RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
                                node->avl_height = rh + 1; /* node: RH+1 */
@@ -314,10 +306,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
                } else if (rh > lh + 1) { /* r: LH+2 */
                        struct inet_peer *rr, *rl, *rlr, *rll;
                        int rlh;
-                       rr = rcu_dereference_protected(r->avl_right,
-                               lockdep_is_held(&base->lock));
-                       rl = rcu_dereference_protected(r->avl_left,
-                               lockdep_is_held(&base->lock));
+                       rr = rcu_deref_locked(r->avl_right, base);
+                       rl = rcu_deref_locked(r->avl_left, base);
                        rlh = node_height(rl);
                        if (rlh <= node_height(rr)) {   /* rr: LH+1 */
                                RCU_INIT_POINTER(node->avl_right, rl);  /* rl: LH or LH+1 */
@@ -328,10 +318,8 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
                                r->avl_height = node->avl_height + 1;
                                RCU_INIT_POINTER(*nodep, r);
                        } else { /* rr: RH, rl: RH+1 */
-                               rlr = rcu_dereference_protected(rl->avl_right,
-                                       lockdep_is_held(&base->lock));  /* rlr: LH or LH-1 */
-                               rll = rcu_dereference_protected(rl->avl_left,
-                                       lockdep_is_held(&base->lock));  /* rll: LH or LH-1 */
+                               rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
+                               rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
                                RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
                                RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
                                node->avl_height = lh + 1; /* node: LH+1 */
@@ -372,7 +360,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
 
        do_free = 0;
 
-       spin_lock_bh(&base->lock);
+       write_seqlock_bh(&base->lock);
        /* Check the reference counter.  It was artificially incremented by 1
         * in cleanup() function to prevent sudden disappearing.  If we can
         * atomically (because of lockless readers) take this last reference,
@@ -392,8 +380,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
                        /* look for a node to insert instead of p */
                        struct inet_peer *t;
                        t = lookup_rightempty(p, base);
-                       BUG_ON(rcu_dereference_protected(*stackptr[-1],
-                                       lockdep_is_held(&base->lock)) != t);
+                       BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
                        **--stackptr = t->avl_left;
                        /* t is removed, t->daddr > x->daddr for any
                         * x in p->avl_left subtree.
@@ -409,7 +396,7 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
                base->total--;
                do_free = 1;
        }
-       spin_unlock_bh(&base->lock);
+       write_sequnlock_bh(&base->lock);
 
        if (do_free)
                call_rcu_bh(&p->rcu, inetpeer_free_rcu);
@@ -477,13 +464,17 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
        struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
        struct inet_peer_base *base = family_to_base(daddr->family);
        struct inet_peer *p;
+       unsigned int sequence;
+       int invalidated;
 
        /* Look up for the address quickly, lockless.
         * Because of a concurrent writer, we might not find an existing entry.
         */
-       rcu_read_lock_bh();
-       p = lookup_rcu_bh(daddr, base);
-       rcu_read_unlock_bh();
+       rcu_read_lock();
+       sequence = read_seqbegin(&base->lock);
+       p = lookup_rcu(daddr, base);
+       invalidated = read_seqretry(&base->lock, sequence);
+       rcu_read_unlock();
 
        if (p) {
                /* The existing node has been found.
@@ -493,14 +484,18 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
                return p;
        }
 
+       /* If no writer did a change during our lookup, we can return early. */
+       if (!create && !invalidated)
+               return NULL;
+
        /* retry an exact lookup, taking the lock before.
         * At least, nodes should be hot in our cache.
         */
-       spin_lock_bh(&base->lock);
+       write_seqlock_bh(&base->lock);
        p = lookup(daddr, stack, base);
        if (p != peer_avl_empty) {
                atomic_inc(&p->refcnt);
-               spin_unlock_bh(&base->lock);
+               write_sequnlock_bh(&base->lock);
                /* Remove the entry from unused list if it was there. */
                unlink_from_unused(p);
                return p;
@@ -510,8 +505,13 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
                p->daddr = *daddr;
                atomic_set(&p->refcnt, 1);
                atomic_set(&p->rid, 0);
-               atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
+               atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
                p->tcp_ts_stamp = 0;
+               p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+               p->rate_tokens = 0;
+               p->rate_last = 0;
+               p->pmtu_expires = 0;
+               memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
                INIT_LIST_HEAD(&p->unused);
 
 
@@ -519,7 +519,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
                link_to_pool(p, base);
                base->total++;
        }
-       spin_unlock_bh(&base->lock);
+       write_sequnlock_bh(&base->lock);
 
        if (base->total >= inet_peer_threshold)
                /* Remove one less-recently-used entry. */
@@ -579,3 +579,44 @@ void inet_putpeer(struct inet_peer *p)
        local_bh_enable();
 }
 EXPORT_SYMBOL_GPL(inet_putpeer);
+
+/*
+ *     Check transmit rate limitation for given message.
+ *     The rate information is held in the inet_peer entries now.
+ *     This function is generic and could be used for other purposes
+ *     too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
+ *
+ *     Note that the same inet_peer fields are modified by functions in
+ *     route.c too, but these work for packet destinations while xrlim_allow
+ *     works for icmp destinations. This means the rate limiting information
+ *     for one "ip object" is shared - and these ICMPs are twice limited:
+ *     by source and by destination.
+ *
+ *     RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
+ *                       SHOULD allow setting of rate limits
+ *
+ *     Shared between ICMPv4 and ICMPv6.
+ */
+#define XRLIM_BURST_FACTOR 6
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+{
+       unsigned long now, token;
+       bool rc = false;
+
+       if (!peer)
+               return true;
+
+       token = peer->rate_tokens;
+       now = jiffies;
+       token += now - peer->rate_last;
+       peer->rate_last = now;
+       if (token > XRLIM_BURST_FACTOR * timeout)
+               token = XRLIM_BURST_FACTOR * timeout;
+       if (token >= timeout) {
+               token -= timeout;
+               rc = true;
+       }
+       peer->rate_tokens = token;
+       return rc;
+}
+EXPORT_SYMBOL(inet_peer_xrlim_allow);
index d1d0e2c256fc4080033a01a621f73b1c3b080b7e..71465955520bb2a4d79d7fa37eb8545b1d94889f 100644 (file)
@@ -778,7 +778,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        .proto = IPPROTO_GRE,
                        .fl_gre_key = tunnel->parms.o_key
                };
-               if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               rt = ip_route_output_key(dev_net(dev), &fl);
+               if (IS_ERR(rt)) {
                        dev->stats.tx_carrier_errors++;
                        goto tx_error;
                }
@@ -953,9 +954,9 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
                        .proto = IPPROTO_GRE,
                        .fl_gre_key = tunnel->parms.o_key
                };
-               struct rtable *rt;
+               struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
 
-               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
                        ip_rt_put(rt);
                }
@@ -1215,9 +1216,9 @@ static int ipgre_open(struct net_device *dev)
                        .proto = IPPROTO_GRE,
                        .fl_gre_key = t->parms.o_key
                };
-               struct rtable *rt;
+               struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
 
-               if (ip_route_output_key(dev_net(dev), &rt, &fl))
+               if (IS_ERR(rt))
                        return -EADDRNOTAVAIL;
                dev = rt->dst.dev;
                ip_rt_put(rt);
index d859bcc26cb7e568b60261e8a0703add32a92798..d7b2b0987a3b723739c17ef93df99992d5afddb0 100644 (file)
@@ -340,7 +340,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
                }
        }
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (unlikely(skb_dst(skb)->tclassid)) {
                struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
                u32 idx = skb_dst(skb)->tclassid;
index 04c7b3ba6b39118ed47c1778f3b10e76fd31e599..171f483b21d535f9b8fa2735fc578036ff518910 100644 (file)
@@ -355,7 +355,8 @@ int ip_queue_xmit(struct sk_buff *skb)
                         * itself out.
                         */
                        security_sk_classify_flow(sk, &fl);
-                       if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
+                       rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+                       if (IS_ERR(rt))
                                goto no_route;
                }
                sk_setup_caps(sk, &rt->dst);
@@ -733,6 +734,7 @@ csum_page(struct page *page, int offset, int copy)
 }
 
 static inline int ip_ufo_append_data(struct sock *sk,
+                       struct sk_buff_head *queue,
                        int getfrag(void *from, char *to, int offset, int len,
                               int odd, struct sk_buff *skb),
                        void *from, int length, int hh_len, int fragheaderlen,
@@ -745,7 +747,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
         * device, so create one single skb packet containing complete
         * udp datagram
         */
-       if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+       if ((skb = skb_peek_tail(queue)) == NULL) {
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
@@ -767,40 +769,28 @@ static inline int ip_ufo_append_data(struct sock *sk,
 
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
-               sk->sk_sndmsg_off = 0;
 
                /* specify the length of each IP datagram fragment */
                skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
                skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-               __skb_queue_tail(&sk->sk_write_queue, skb);
+               __skb_queue_tail(queue, skb);
        }
 
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
 
-/*
- *     ip_append_data() and ip_append_page() can make one large IP datagram
- *     from many pieces of data. Each pieces will be holded on the socket
- *     until ip_push_pending_frames() is called. Each piece can be a page
- *     or non-page data.
- *
- *     Not only UDP, other transport protocols - e.g. raw sockets - can use
- *     this interface potentially.
- *
- *     LATER: length must be adjusted by pad at tail, when it is required.
- */
-int ip_append_data(struct sock *sk,
-                  int getfrag(void *from, char *to, int offset, int len,
-                              int odd, struct sk_buff *skb),
-                  void *from, int length, int transhdrlen,
-                  struct ipcm_cookie *ipc, struct rtable **rtp,
-                  unsigned int flags)
+static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
+                           struct inet_cork *cork,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length, int transhdrlen,
+                           unsigned int flags)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
 
-       struct ip_options *opt = NULL;
+       struct ip_options *opt = cork->opt;
        int hh_len;
        int exthdrlen;
        int mtu;
@@ -809,58 +799,19 @@ int ip_append_data(struct sock *sk,
        int offset = 0;
        unsigned int maxfraglen, fragheaderlen;
        int csummode = CHECKSUM_NONE;
-       struct rtable *rt;
+       struct rtable *rt = (struct rtable *)cork->dst;
 
-       if (flags&MSG_PROBE)
-               return 0;
-
-       if (skb_queue_empty(&sk->sk_write_queue)) {
-               /*
-                * setup for corking.
-                */
-               opt = ipc->opt;
-               if (opt) {
-                       if (inet->cork.opt == NULL) {
-                               inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
-                               if (unlikely(inet->cork.opt == NULL))
-                                       return -ENOBUFS;
-                       }
-                       memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
-                       inet->cork.flags |= IPCORK_OPT;
-                       inet->cork.addr = ipc->addr;
-               }
-               rt = *rtp;
-               if (unlikely(!rt))
-                       return -EFAULT;
-               /*
-                * We steal reference to this route, caller should not release it
-                */
-               *rtp = NULL;
-               inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
-                                           rt->dst.dev->mtu :
-                                           dst_mtu(rt->dst.path);
-               inet->cork.dst = &rt->dst;
-               inet->cork.length = 0;
-               sk->sk_sndmsg_page = NULL;
-               sk->sk_sndmsg_off = 0;
-               exthdrlen = rt->dst.header_len;
-               length += exthdrlen;
-               transhdrlen += exthdrlen;
-       } else {
-               rt = (struct rtable *)inet->cork.dst;
-               if (inet->cork.flags & IPCORK_OPT)
-                       opt = inet->cork.opt;
+       exthdrlen = transhdrlen ? rt->dst.header_len : 0;
+       length += exthdrlen;
+       transhdrlen += exthdrlen;
+       mtu = cork->fragsize;
 
-               transhdrlen = 0;
-               exthdrlen = 0;
-               mtu = inet->cork.fragsize;
-       }
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
 
-       if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
+       if (cork->length + length > 0xFFFF - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
                               mtu-exthdrlen);
                return -EMSGSIZE;
@@ -876,15 +827,15 @@ int ip_append_data(struct sock *sk,
            !exthdrlen)
                csummode = CHECKSUM_PARTIAL;
 
-       skb = skb_peek_tail(&sk->sk_write_queue);
+       skb = skb_peek_tail(queue);
 
-       inet->cork.length += length;
+       cork->length += length;
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
-               err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
-                                        fragheaderlen, transhdrlen, mtu,
-                                        flags);
+               err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+                                        hh_len, fragheaderlen, transhdrlen,
+                                        mtu, flags);
                if (err)
                        goto error;
                return 0;
@@ -961,7 +912,7 @@ alloc_new_skb:
                                else
                                        /* only the initial fragment is
                                           time stamped */
-                                       ipc->tx_flags = 0;
+                                       cork->tx_flags = 0;
                        }
                        if (skb == NULL)
                                goto error;
@@ -972,7 +923,7 @@ alloc_new_skb:
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);
-                       skb_shinfo(skb)->tx_flags = ipc->tx_flags;
+                       skb_shinfo(skb)->tx_flags = cork->tx_flags;
 
                        /*
                         *      Find where to start putting bytes.
@@ -1009,7 +960,7 @@ alloc_new_skb:
                        /*
                         * Put the packet on the pending queue.
                         */
-                       __skb_queue_tail(&sk->sk_write_queue, skb);
+                       __skb_queue_tail(queue, skb);
                        continue;
                }
 
@@ -1029,8 +980,8 @@ alloc_new_skb:
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
-                       struct page *page = sk->sk_sndmsg_page;
-                       int off = sk->sk_sndmsg_off;
+                       struct page *page = cork->page;
+                       int off = cork->off;
                        unsigned int left;
 
                        if (page && (left = PAGE_SIZE - off) > 0) {
@@ -1042,7 +993,7 @@ alloc_new_skb:
                                                goto error;
                                        }
                                        get_page(page);
-                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+                                       skb_fill_page_desc(skb, i, page, off, 0);
                                        frag = &skb_shinfo(skb)->frags[i];
                                }
                        } else if (i < MAX_SKB_FRAGS) {
@@ -1053,8 +1004,8 @@ alloc_new_skb:
                                        err = -ENOMEM;
                                        goto error;
                                }
-                               sk->sk_sndmsg_page = page;
-                               sk->sk_sndmsg_off = 0;
+                               cork->page = page;
+                               cork->off = 0;
 
                                skb_fill_page_desc(skb, i, page, 0, 0);
                                frag = &skb_shinfo(skb)->frags[i];
@@ -1066,7 +1017,7 @@ alloc_new_skb:
                                err = -EFAULT;
                                goto error;
                        }
-                       sk->sk_sndmsg_off += copy;
+                       cork->off += copy;
                        frag->size += copy;
                        skb->len += copy;
                        skb->data_len += copy;
@@ -1080,11 +1031,87 @@ alloc_new_skb:
        return 0;
 
 error:
-       inet->cork.length -= length;
+       cork->length -= length;
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
        return err;
 }
 
+static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+                        struct ipcm_cookie *ipc, struct rtable **rtp)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ip_options *opt;
+       struct rtable *rt;
+
+       /*
+        * setup for corking.
+        */
+       opt = ipc->opt;
+       if (opt) {
+               if (cork->opt == NULL) {
+                       cork->opt = kmalloc(sizeof(struct ip_options) + 40,
+                                           sk->sk_allocation);
+                       if (unlikely(cork->opt == NULL))
+                               return -ENOBUFS;
+               }
+               memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen);
+               cork->flags |= IPCORK_OPT;
+               cork->addr = ipc->addr;
+       }
+       rt = *rtp;
+       if (unlikely(!rt))
+               return -EFAULT;
+       /*
+        * We steal reference to this route, caller should not release it
+        */
+       *rtp = NULL;
+       cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
+                        rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+       cork->dst = &rt->dst;
+       cork->length = 0;
+       cork->tx_flags = ipc->tx_flags;
+       cork->page = NULL;
+       cork->off = 0;
+
+       return 0;
+}
+
+/*
+ *     ip_append_data() and ip_append_page() can make one large IP datagram
+ *     from many pieces of data. Each pieces will be holded on the socket
+ *     until ip_push_pending_frames() is called. Each piece can be a page
+ *     or non-page data.
+ *
+ *     Not only UDP, other transport protocols - e.g. raw sockets - can use
+ *     this interface potentially.
+ *
+ *     LATER: length must be adjusted by pad at tail, when it is required.
+ */
+int ip_append_data(struct sock *sk,
+                  int getfrag(void *from, char *to, int offset, int len,
+                              int odd, struct sk_buff *skb),
+                  void *from, int length, int transhdrlen,
+                  struct ipcm_cookie *ipc, struct rtable **rtp,
+                  unsigned int flags)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       int err;
+
+       if (flags&MSG_PROBE)
+               return 0;
+
+       if (skb_queue_empty(&sk->sk_write_queue)) {
+               err = ip_setup_cork(sk, &inet->cork, ipc, rtp);
+               if (err)
+                       return err;
+       } else {
+               transhdrlen = 0;
+       }
+
+       return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag,
+                               from, length, transhdrlen, flags);
+}
+
 ssize_t        ip_append_page(struct sock *sk, struct page *page,
                       int offset, size_t size, int flags)
 {
@@ -1228,40 +1255,41 @@ error:
        return err;
 }
 
-static void ip_cork_release(struct inet_sock *inet)
+static void ip_cork_release(struct inet_cork *cork)
 {
-       inet->cork.flags &= ~IPCORK_OPT;
-       kfree(inet->cork.opt);
-       inet->cork.opt = NULL;
-       dst_release(inet->cork.dst);
-       inet->cork.dst = NULL;
+       cork->flags &= ~IPCORK_OPT;
+       kfree(cork->opt);
+       cork->opt = NULL;
+       dst_release(cork->dst);
+       cork->dst = NULL;
 }
 
 /*
  *     Combined all pending IP fragments on the socket as one IP datagram
  *     and push them out.
  */
-int ip_push_pending_frames(struct sock *sk)
+struct sk_buff *__ip_make_skb(struct sock *sk,
+                             struct sk_buff_head *queue,
+                             struct inet_cork *cork)
 {
        struct sk_buff *skb, *tmp_skb;
        struct sk_buff **tail_skb;
        struct inet_sock *inet = inet_sk(sk);
        struct net *net = sock_net(sk);
        struct ip_options *opt = NULL;
-       struct rtable *rt = (struct rtable *)inet->cork.dst;
+       struct rtable *rt = (struct rtable *)cork->dst;
        struct iphdr *iph;
        __be16 df = 0;
        __u8 ttl;
-       int err = 0;
 
-       if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
+       if ((skb = __skb_dequeue(queue)) == NULL)
                goto out;
        tail_skb = &(skb_shinfo(skb)->frag_list);
 
        /* move skb->data to ip header from ext header */
        if (skb->data < skb_network_header(skb))
                __skb_pull(skb, skb_network_offset(skb));
-       while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
+       while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
                __skb_pull(tmp_skb, skb_network_header_len(skb));
                *tail_skb = tmp_skb;
                tail_skb = &(tmp_skb->next);
@@ -1287,8 +1315,8 @@ int ip_push_pending_frames(struct sock *sk)
             ip_dont_fragment(sk, &rt->dst)))
                df = htons(IP_DF);
 
-       if (inet->cork.flags & IPCORK_OPT)
-               opt = inet->cork.opt;
+       if (cork->flags & IPCORK_OPT)
+               opt = cork->opt;
 
        if (rt->rt_type == RTN_MULTICAST)
                ttl = inet->mc_ttl;
@@ -1300,7 +1328,7 @@ int ip_push_pending_frames(struct sock *sk)
        iph->ihl = 5;
        if (opt) {
                iph->ihl += opt->optlen>>2;
-               ip_options_build(skb, opt, inet->cork.addr, rt, 0);
+               ip_options_build(skb, opt, cork->addr, rt, 0);
        }
        iph->tos = inet->tos;
        iph->frag_off = df;
@@ -1316,44 +1344,95 @@ int ip_push_pending_frames(struct sock *sk)
         * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
         * on dst refcount
         */
-       inet->cork.dst = NULL;
+       cork->dst = NULL;
        skb_dst_set(skb, &rt->dst);
 
        if (iph->protocol == IPPROTO_ICMP)
                icmp_out_count(net, ((struct icmphdr *)
                        skb_transport_header(skb))->type);
 
-       /* Netfilter gets whole the not fragmented skb. */
+       ip_cork_release(cork);
+out:
+       return skb;
+}
+
+int ip_send_skb(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       int err;
+
        err = ip_local_out(skb);
        if (err) {
                if (err > 0)
                        err = net_xmit_errno(err);
                if (err)
-                       goto error;
+                       IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
        }
 
-out:
-       ip_cork_release(inet);
        return err;
+}
 
-error:
-       IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
-       goto out;
+int ip_push_pending_frames(struct sock *sk)
+{
+       struct sk_buff *skb;
+
+       skb = ip_finish_skb(sk);
+       if (!skb)
+               return 0;
+
+       /* Netfilter gets whole the not fragmented skb. */
+       return ip_send_skb(skb);
 }
 
 /*
  *     Throw away all pending data on the socket.
  */
-void ip_flush_pending_frames(struct sock *sk)
+static void __ip_flush_pending_frames(struct sock *sk,
+                                     struct sk_buff_head *queue,
+                                     struct inet_cork *cork)
 {
        struct sk_buff *skb;
 
-       while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+       while ((skb = __skb_dequeue_tail(queue)) != NULL)
                kfree_skb(skb);
 
-       ip_cork_release(inet_sk(sk));
+       ip_cork_release(cork);
+}
+
+void ip_flush_pending_frames(struct sock *sk)
+{
+       __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
 }
 
+struct sk_buff *ip_make_skb(struct sock *sk,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length, int transhdrlen,
+                           struct ipcm_cookie *ipc, struct rtable **rtp,
+                           unsigned int flags)
+{
+       struct inet_cork cork = {};
+       struct sk_buff_head queue;
+       int err;
+
+       if (flags & MSG_PROBE)
+               return NULL;
+
+       __skb_queue_head_init(&queue);
+
+       err = ip_setup_cork(sk, &cork, ipc, rtp);
+       if (err)
+               return ERR_PTR(err);
+
+       err = __ip_append_data(sk, &queue, &cork, getfrag,
+                              from, length, transhdrlen, flags);
+       if (err) {
+               __ip_flush_pending_frames(sk, &queue, &cork);
+               return ERR_PTR(err);
+       }
+
+       return __ip_make_skb(sk, &queue, &cork);
+}
 
 /*
  *     Fetch data from kernel space and fill in checksum if needed.
@@ -1411,7 +1490,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
                                    .proto = sk->sk_protocol,
                                    .flags = ip_reply_arg_flowi_flags(arg) };
                security_skb_classify_flow(skb, &fl);
-               if (ip_route_output_key(sock_net(sk), &rt, &fl))
+               rt = ip_route_output_key(sock_net(sk), &fl);
+               if (IS_ERR(rt))
                        return;
        }
 
index a5f58e7cbb26eec188786ff1324cfc540aba1ce1..65008f45addcc608fcc92bb3978be3af04a7233d 100644 (file)
@@ -469,7 +469,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        .proto = IPPROTO_IPIP
                };
 
-               if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               rt = ip_route_output_key(dev_net(dev), &fl);
+               if (IS_ERR(rt)) {
                        dev->stats.tx_carrier_errors++;
                        goto tx_error_icmp;
                }
@@ -590,9 +591,9 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
                        .fl4_tos = RT_TOS(iph->tos),
                        .proto = IPPROTO_IPIP
                };
-               struct rtable *rt;
+               struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
 
-               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
                        ip_rt_put(rt);
                }
index 8b65a12654e73db7589a8fc56ee3f7d8d688c461..74909bac88171b076a6305a1a3a3bc4b7fc23ef9 100644 (file)
@@ -1618,8 +1618,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
                        .fl4_tos = RT_TOS(iph->tos),
                        .proto = IPPROTO_IPIP
                };
-
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        goto out_free;
                encap = sizeof(struct iphdr);
        } else {
@@ -1629,8 +1629,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
                        .fl4_tos = RT_TOS(iph->tos),
                        .proto = IPPROTO_IPIP
                };
-
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        goto out_free;
        }
 
@@ -1793,6 +1793,24 @@ dont_forward:
        return 0;
 }
 
+static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct rtable *rt)
+{
+       struct flowi fl = {
+               .fl4_dst = rt->rt_key_dst,
+               .fl4_src = rt->rt_key_src,
+               .fl4_tos = rt->rt_tos,
+               .oif = rt->rt_oif,
+               .iif = rt->rt_iif,
+               .mark = rt->rt_mark,
+       };
+       struct mr_table *mrt;
+       int err;
+
+       err = ipmr_fib_lookup(net, &fl, &mrt);
+       if (err)
+               return ERR_PTR(err);
+       return mrt;
+}
 
 /*
  *     Multicast packets for forwarding arrive here
@@ -1805,7 +1823,6 @@ int ip_mr_input(struct sk_buff *skb)
        struct net *net = dev_net(skb->dev);
        int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
        struct mr_table *mrt;
-       int err;
 
        /* Packet is looped back after forward, it should not be
         * forwarded second time, but still can be delivered locally.
@@ -1813,12 +1830,11 @@ int ip_mr_input(struct sk_buff *skb)
        if (IPCB(skb)->flags & IPSKB_FORWARDED)
                goto dont_forward;
 
-       err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
-       if (err < 0) {
+       mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb));
+       if (IS_ERR(mrt)) {
                kfree_skb(skb);
-               return err;
+               return PTR_ERR(mrt);
        }
-
        if (!local) {
                if (IPCB(skb)->opt.router_alert) {
                        if (ip_call_ra_chain(skb))
@@ -1946,9 +1962,9 @@ int pim_rcv_v1(struct sk_buff *skb)
 
        pim = igmp_hdr(skb);
 
-       if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+       mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb));
+       if (IS_ERR(mrt))
                goto drop;
-
        if (!mrt->mroute_do_pim ||
            pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
                goto drop;
@@ -1978,9 +1994,9 @@ static int pim_rcv(struct sk_buff *skb)
             csum_fold(skb_checksum(skb, 0, skb->len, 0))))
                goto drop;
 
-       if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
+       mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb));
+       if (IS_ERR(mrt))
                goto drop;
-
        if (__pim_rcv(mrt, skb, sizeof(*pim))) {
 drop:
                kfree_skb(skb);
index 994a1f29ebbcf062caffde6c42de446673e08de7..67bf709180de0410842c91047d8bcf0a8f62ed05 100644 (file)
@@ -38,7 +38,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
                fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
                fl.mark = skb->mark;
                fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
-               if (ip_route_output_key(net, &rt, &fl) != 0)
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        return -1;
 
                /* Drop old route. */
@@ -48,7 +49,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
                /* non-local src, find valid iif to satisfy
                 * rp-filter when calling ip_route_input. */
                fl.fl4_dst = iph->saddr;
-               if (ip_route_output_key(net, &rt, &fl) != 0)
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        return -1;
 
                orefdst = skb->_skb_refdst;
@@ -69,7 +71,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
            xfrm_decode_session(skb, &fl, AF_INET) == 0) {
                struct dst_entry *dst = skb_dst(skb);
                skb_dst_set(skb, NULL);
-               if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
+               dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
+               if (IS_ERR(dst))
                        return -1;
                skb_dst_set(skb, dst);
        }
@@ -102,7 +105,8 @@ int ip_xfrm_me_harder(struct sk_buff *skb)
                dst = ((struct xfrm_dst *)dst)->route;
        dst_hold(dst);
 
-       if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0)
+       dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
+       if (IS_ERR(dst))
                return -1;
 
        skb_dst_drop(skb);
@@ -219,7 +223,11 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
 
 static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
 {
-       return ip_route_output_key(&init_net, (struct rtable **)dst, fl);
+       struct rtable *rt = ip_route_output_key(&init_net, fl);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+       *dst = &rt->dst;
+       return 0;
 }
 
 static const struct nf_afinfo nf_ip_afinfo = {
index babd1a2bae5f1b6b35499d5e32a2a6c43c8c7fec..f926a310075d16aa4d286f76018d072dbdffe7a5 100644 (file)
@@ -206,8 +206,9 @@ config IP_NF_TARGET_REDIRECT
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
-       depends on NF_NAT
+       depends on NF_CONNTRACK_SNMP && NF_NAT
        depends on NETFILTER_ADVANCED
+       default NF_NAT && NF_CONNTRACK_SNMP
        ---help---
 
          This module implements an Application Layer Gateway (ALG) for
index e855fffaed95ba384a9b5bc2145fee888fcf6274..e95054c690c6523fbe3ebb2ddddd59f07d4e1de8 100644 (file)
@@ -866,6 +866,7 @@ static int compat_table_info(const struct xt_table_info *info,
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
        loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       xt_compat_init_offsets(NFPROTO_ARP, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
                if (ret != 0)
@@ -1333,6 +1334,7 @@ static int translate_compat_table(const char *name,
        duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(NFPROTO_ARP);
+       xt_compat_init_offsets(NFPROTO_ARP, number);
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter0, entry0, total_size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
index 652efea013dcc8bb8364397071086318d44ef9bf..ef7d7b9680eaefd3c6abea61afb3d4d0fc9b4964 100644 (file)
@@ -1063,6 +1063,7 @@ static int compat_table_info(const struct xt_table_info *info,
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
        loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       xt_compat_init_offsets(AF_INET, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
                if (ret != 0)
@@ -1664,6 +1665,7 @@ translate_compat_table(struct net *net,
        duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(AF_INET);
+       xt_compat_init_offsets(AF_INET, number);
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter0, entry0, total_size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
index 1e26a4897655bddd2720730956ebe2d15d265ebf..403ca57f60118544a99b29028edae70ec160e7de 100644 (file)
@@ -300,13 +300,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
         * that the ->target() function isn't called after ->destroy() */
 
        ct = nf_ct_get(skb, &ctinfo);
-       if (ct == NULL) {
-               pr_info("no conntrack!\n");
-                       /* FIXME: need to drop invalid ones, since replies
-                        * to outgoing connections of other nodes will be
-                        * marked as INVALID */
+       if (ct == NULL)
                return NF_DROP;
-       }
 
        /* special case: ICMP error handling. conntrack distinguishes between
         * error messages (RELATED) and information requests (see below) */
index 72ffc8fda2e9faca3ab8c4cf3682df0de4a44faa..d76d6c9ed9468263547a918936a76a1a9526b368 100644 (file)
@@ -442,8 +442,7 @@ ipt_log_packet(u_int8_t pf,
        }
 #endif
 
-       /* MAC logging for input path only. */
-       if (in && !out)
+       if (in != NULL)
                dump_mac_header(m, loginfo, skb);
 
        dump_packet(m, loginfo, skb, 0);
index 294a2a32f29345e1ceb39f13afbcc605e586bc4f..aef5d1fbe77dc39b5e6b951a97897e02ead47a62 100644 (file)
@@ -60,7 +60,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
        ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
                           dev_net(out)->ipv4.iptable_mangle);
        /* Reroute for ANY change. */
-       if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+       if (ret != NF_DROP && ret != NF_STOLEN) {
                iph = ip_hdr(skb);
 
                if (iph->saddr != saddr ||
index 63f60fc5d26a49ea844c106e8570260b29445a88..5585980fce2e351b35d2e6f0f741531f6a87d199 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
 
 struct ct_iter_state {
        struct seq_net_private p;
@@ -35,7 +36,8 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        for (st->bucket = 0;
             st->bucket < net->ct.htable_size;
             st->bucket++) {
-               n = rcu_dereference(net->ct.hash[st->bucket].first);
+               n = rcu_dereference(
+                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
                if (!is_a_nulls(n))
                        return n;
        }
@@ -48,13 +50,14 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_nulls_next_rcu(head));
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
                        if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
-               head = rcu_dereference(net->ct.hash[st->bucket].first);
+               head = rcu_dereference(
+                       hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
        }
        return head;
 }
@@ -217,7 +220,8 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
        struct hlist_node *n;
 
        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-               n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               n = rcu_dereference(
+                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
                if (n)
                        return n;
        }
@@ -230,11 +234,12 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_next_rcu(head));
        while (head == NULL) {
                if (++st->bucket >= nf_ct_expect_hsize)
                        return NULL;
-               head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               head = rcu_dereference(
+                       hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
        }
        return head;
 }
index 0f23b3f06df05e7643e1cd337325955dc6942794..703f366fd2358a3dd32c813700511ffb5129f351 100644 (file)
@@ -44,13 +44,13 @@ static unsigned int help(struct sk_buff *skb,
 
        /* Try to get same port: if not, try to change it. */
        for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               int ret;
+               int res;
 
                exp->tuple.dst.u.tcp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
-               if (ret == 0)
+               res = nf_ct_expect_related(exp);
+               if (res == 0)
                        break;
-               else if (ret != -EBUSY) {
+               else if (res != -EBUSY) {
                        port = 0;
                        break;
                }
index c04787ce1a71203e1346830450b0a130e358defc..21bcf471b25a022615ff3878da3b495df5b3ef07 100644 (file)
@@ -221,7 +221,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
           manips not an issue.  */
        if (maniptype == IP_NAT_MANIP_SRC &&
            !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
-               if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
+               /* try the original tuple first */
+               if (in_range(orig_tuple, range)) {
+                       if (!nf_nat_used_tuple(orig_tuple, ct)) {
+                               *tuple = *orig_tuple;
+                               return;
+                       }
+               } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
+                          range)) {
                        pr_debug("get_unique_tuple: Found current src map\n");
                        if (!nf_nat_used_tuple(tuple, ct))
                                return;
@@ -266,7 +273,6 @@ nf_nat_setup_info(struct nf_conn *ct,
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_tuple curr_tuple, new_tuple;
        struct nf_conn_nat *nat;
-       int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
 
        /* nat helper or nfctnetlink also setup binding */
        nat = nfct_nat(ct);
@@ -306,8 +312,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                        ct->status |= IPS_DST_NAT;
        }
 
-       /* Place in source hash if this is the first time. */
-       if (have_to_hash) {
+       if (maniptype == IP_NAT_MANIP_SRC) {
                unsigned int srchash;
 
                srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -323,9 +328,9 @@ nf_nat_setup_info(struct nf_conn *ct,
 
        /* It's done. */
        if (maniptype == IP_NAT_MANIP_DST)
-               set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+               ct->status |= IPS_DST_NAT_DONE;
        else
-               set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+               ct->status |= IPS_SRC_NAT_DONE;
 
        return NF_ACCEPT;
 }
@@ -502,7 +507,10 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
        int ret = 0;
 
        spin_lock_bh(&nf_nat_lock);
-       if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+       if (rcu_dereference_protected(
+                       nf_nat_protos[proto->protonum],
+                       lockdep_is_held(&nf_nat_lock)
+                       ) != &nf_nat_unknown_protocol) {
                ret = -EBUSY;
                goto out;
        }
@@ -532,7 +540,7 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
        if (nat == NULL || nat->ct == NULL)
                return;
 
-       NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
+       NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
 
        spin_lock_bh(&nf_nat_lock);
        hlist_del_rcu(&nat->bysource);
@@ -545,11 +553,10 @@ static void nf_nat_move_storage(void *new, void *old)
        struct nf_conn_nat *old_nat = old;
        struct nf_conn *ct = old_nat->ct;
 
-       if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
+       if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
                return;
 
        spin_lock_bh(&nf_nat_lock);
-       new_nat->ct = ct;
        hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
        spin_unlock_bh(&nf_nat_lock);
 }
@@ -679,8 +686,7 @@ static int __net_init nf_nat_net_init(struct net *net)
 {
        /* Leave them the same for the moment. */
        net->ipv4.nat_htable_size = net->ct.htable_size;
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
-                                                      &net->ipv4.nat_vmalloced, 0);
+       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
        if (!net->ipv4.nat_bysource)
                return -ENOMEM;
        return 0;
@@ -702,8 +708,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        nf_ct_iterate_cleanup(net, &clean_nat, NULL);
        synchronize_rcu();
-       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
-                            net->ipv4.nat_htable_size);
+       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
index ee5f419d0a56d01c6533d6ee45f70a7405480150..8812a02078ab4e04aa9da689a18d04f2e66c709a 100644 (file)
@@ -54,6 +54,7 @@
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
@@ -1310,9 +1311,9 @@ static int __init nf_nat_snmp_basic_init(void)
 {
        int ret = 0;
 
-       ret = nf_conntrack_helper_register(&snmp_helper);
-       if (ret < 0)
-               return ret;
+       BUG_ON(nf_nat_snmp_hook != NULL);
+       rcu_assign_pointer(nf_nat_snmp_hook, help);
+
        ret = nf_conntrack_helper_register(&snmp_trap_helper);
        if (ret < 0) {
                nf_conntrack_helper_unregister(&snmp_helper);
@@ -1323,7 +1324,7 @@ static int __init nf_nat_snmp_basic_init(void)
 
 static void __exit nf_nat_snmp_basic_fini(void)
 {
-       nf_conntrack_helper_unregister(&snmp_helper);
+       rcu_assign_pointer(nf_nat_snmp_hook, NULL);
        nf_conntrack_helper_unregister(&snmp_trap_helper);
 }
 
index 6390ba299b3da53cefaf1daa7668bbfcd46d441f..467d570d087a93f4dbaa49220bc1b36138020057 100644 (file)
@@ -555,7 +555,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                    .fl4_tos = tos,
                                    .proto = inet->hdrincl ? IPPROTO_RAW :
                                                             sk->sk_protocol,
-                                 };
+                                   .flags = FLOWI_FLAG_CAN_SLEEP,
+               };
                if (!inet->hdrincl) {
                        err = raw_probe_proto_opt(&fl, msg);
                        if (err)
@@ -563,10 +564,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                }
 
                security_sk_classify_flow(sk, &fl);
-               err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
+               rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+               if (IS_ERR(rt)) {
+                       err = PTR_ERR(rt);
+                       goto done;
+               }
        }
-       if (err)
-               goto done;
 
        err = -EACCES;
        if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
index 6ed6603c2f6db41e5a73d4fe39d7489f0c560130..ac32d8f3d68f14e822724c22023281f97515cb95 100644 (file)
@@ -131,9 +131,6 @@ static int ip_rt_min_pmtu __read_mostly             = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
-
 /*
  *     Interface to generic destination cache.
  */
@@ -152,6 +149,41 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 {
 }
 
+static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+       struct rtable *rt = (struct rtable *) dst;
+       struct inet_peer *peer;
+       u32 *p = NULL;
+
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+
+       peer = rt->peer;
+       if (peer) {
+               u32 *old_p = __DST_METRICS_PTR(old);
+               unsigned long prev, new;
+
+               p = peer->metrics;
+               if (inet_metrics_new(peer))
+                       memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+               new = (unsigned long) p;
+               prev = cmpxchg(&dst->_metrics, old, new);
+
+               if (prev != old) {
+                       p = __DST_METRICS_PTR(prev);
+                       if (prev & DST_METRICS_READ_ONLY)
+                               p = NULL;
+               } else {
+                       if (rt->fi) {
+                               fib_info_put(rt->fi);
+                               rt->fi = NULL;
+                       }
+               }
+       }
+       return p;
+}
+
 static struct dst_ops ipv4_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
@@ -159,6 +191,7 @@ static struct dst_ops ipv4_dst_ops = {
        .check =                ipv4_dst_check,
        .default_advmss =       ipv4_default_advmss,
        .default_mtu =          ipv4_default_mtu,
+       .cow_metrics =          ipv4_cow_metrics,
        .destroy =              ipv4_dst_destroy,
        .ifdown =               ipv4_dst_ifdown,
        .negative_advice =      ipv4_negative_advice,
@@ -391,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                        dst_metric(&r->dst, RTAX_WINDOW),
                        (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
                              dst_metric(&r->dst, RTAX_RTTVAR)),
-                       r->fl.fl4_tos,
+                       r->rt_tos,
                        r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
                        r->dst.hh ? (r->dst.hh->hh_output ==
                                       dev_queue_xmit) : 0,
@@ -514,7 +547,7 @@ static const struct file_operations rt_cpu_seq_fops = {
        .release = seq_release,
 };
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 static int rt_acct_proc_show(struct seq_file *m, void *v)
 {
        struct ip_rt_acct *dst, *src;
@@ -567,14 +600,14 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
        if (!pde)
                goto err2;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
        if (!pde)
                goto err3;
 #endif
        return 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 err3:
        remove_proc_entry("rt_cache", net->proc_net_stat);
 #endif
@@ -588,7 +621,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
 {
        remove_proc_entry("rt_cache", net->proc_net_stat);
        remove_proc_entry("rt_cache", net->proc_net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        remove_proc_entry("rt_acct", net->proc_net);
 #endif
 }
@@ -632,7 +665,7 @@ static inline int rt_fast_clean(struct rtable *rth)
 static inline int rt_valuable(struct rtable *rth)
 {
        return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
-               rth->dst.expires;
+               (rth->peer && rth->peer->pmtu_expires);
 }
 
 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -643,13 +676,7 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
        if (atomic_read(&rth->dst.__refcnt))
                goto out;
 
-       ret = 1;
-       if (rth->dst.expires &&
-           time_after_eq(jiffies, rth->dst.expires))
-               goto out;
-
        age = jiffies - rth->dst.lastuse;
-       ret = 0;
        if ((age <= tmo1 && !rt_fast_clean(rth)) ||
            (age <= tmo2 && rt_valuable(rth)))
                goto out;
@@ -684,22 +711,22 @@ static inline bool rt_caching(const struct net *net)
                net->ipv4.sysctl_rt_cache_rebuild_count;
 }
 
-static inline bool compare_hash_inputs(const struct flowi *fl1,
-                                       const struct flowi *fl2)
+static inline bool compare_hash_inputs(const struct rtable *rt1,
+                                      const struct rtable *rt2)
 {
-       return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
-               ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
-               (fl1->iif ^ fl2->iif)) == 0);
+       return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
+               ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
+               (rt1->rt_iif ^ rt2->rt_iif)) == 0);
 }
 
-static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
+static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
 {
-       return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
-               ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
-               (fl1->mark ^ fl2->mark) |
-               (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
-               (fl1->oif ^ fl2->oif) |
-               (fl1->iif ^ fl2->iif)) == 0;
+       return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
+               ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
+               (rt1->rt_mark ^ rt2->rt_mark) |
+               (rt1->rt_tos ^ rt2->rt_tos) |
+               (rt1->rt_oif ^ rt2->rt_oif) |
+               (rt1->rt_iif ^ rt2->rt_iif)) == 0;
 }
 
 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -786,104 +813,13 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        const struct rtable *aux = head;
 
        while (aux != rth) {
-               if (compare_hash_inputs(&aux->fl, &rth->fl))
+               if (compare_hash_inputs(aux, rth))
                        return 0;
                aux = rcu_dereference_protected(aux->dst.rt_next, 1);
        }
        return ONE;
 }
 
-static void rt_check_expire(void)
-{
-       static unsigned int rover;
-       unsigned int i = rover, goal;
-       struct rtable *rth;
-       struct rtable __rcu **rthp;
-       unsigned long samples = 0;
-       unsigned long sum = 0, sum2 = 0;
-       unsigned long delta;
-       u64 mult;
-
-       delta = jiffies - expires_ljiffies;
-       expires_ljiffies = jiffies;
-       mult = ((u64)delta) << rt_hash_log;
-       if (ip_rt_gc_timeout > 1)
-               do_div(mult, ip_rt_gc_timeout);
-       goal = (unsigned int)mult;
-       if (goal > rt_hash_mask)
-               goal = rt_hash_mask + 1;
-       for (; goal > 0; goal--) {
-               unsigned long tmo = ip_rt_gc_timeout;
-               unsigned long length;
-
-               i = (i + 1) & rt_hash_mask;
-               rthp = &rt_hash_table[i].chain;
-
-               if (need_resched())
-                       cond_resched();
-
-               samples++;
-
-               if (rcu_dereference_raw(*rthp) == NULL)
-                       continue;
-               length = 0;
-               spin_lock_bh(rt_hash_lock_addr(i));
-               while ((rth = rcu_dereference_protected(*rthp,
-                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
-                       prefetch(rth->dst.rt_next);
-                       if (rt_is_expired(rth)) {
-                               *rthp = rth->dst.rt_next;
-                               rt_free(rth);
-                               continue;
-                       }
-                       if (rth->dst.expires) {
-                               /* Entry is expired even if it is in use */
-                               if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
-                                       tmo >>= 1;
-                                       rthp = &rth->dst.rt_next;
-                                       /*
-                                        * We only count entries on
-                                        * a chain with equal hash inputs once
-                                        * so that entries for different QOS
-                                        * levels, and other non-hash input
-                                        * attributes don't unfairly skew
-                                        * the length computation
-                                        */
-                                       length += has_noalias(rt_hash_table[i].chain, rth);
-                                       continue;
-                               }
-                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
-                               goto nofree;
-
-                       /* Cleanup aged off entries. */
-                       *rthp = rth->dst.rt_next;
-                       rt_free(rth);
-               }
-               spin_unlock_bh(rt_hash_lock_addr(i));
-               sum += length;
-               sum2 += length*length;
-       }
-       if (samples) {
-               unsigned long avg = sum / samples;
-               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
-               rt_chain_length_max = max_t(unsigned long,
-                                       ip_rt_gc_elasticity,
-                                       (avg + 4*sd) >> FRACT_BITS);
-       }
-       rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
-       rt_check_expire();
-       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
 /*
  * Pertubation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1078,8 +1014,8 @@ static int slow_chain_length(const struct rtable *head)
        return length >> FRACT_BITS;
 }
 
-static int rt_intern_hash(unsigned hash, struct rtable *rt,
-                         struct rtable **rp, struct sk_buff *skb, int ifindex)
+static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
+                                    struct sk_buff *skb, int ifindex)
 {
        struct rtable   *rth, *cand;
        struct rtable __rcu **rthp, **candp;
@@ -1120,7 +1056,7 @@ restart:
                                        printk(KERN_WARNING
                                            "Neighbour table failure & not caching routes.\n");
                                ip_rt_put(rt);
-                               return err;
+                               return ERR_PTR(err);
                        }
                }
 
@@ -1137,7 +1073,7 @@ restart:
                        rt_free(rth);
                        continue;
                }
-               if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
+               if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
                        /* Put it first */
                        *rthp = rth->dst.rt_next;
                        /*
@@ -1157,11 +1093,9 @@ restart:
                        spin_unlock_bh(rt_hash_lock_addr(hash));
 
                        rt_drop(rt);
-                       if (rp)
-                               *rp = rth;
-                       else
+                       if (skb)
                                skb_dst_set(skb, &rth->dst);
-                       return 0;
+                       return rth;
                }
 
                if (!atomic_read(&rth->dst.__refcnt)) {
@@ -1202,7 +1136,7 @@ restart:
                        rt_emergency_hash_rebuild(net);
                        spin_unlock_bh(rt_hash_lock_addr(hash));
 
-                       hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
+                       hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
                                        ifindex, rt_genid(net));
                        goto restart;
                }
@@ -1218,7 +1152,7 @@ restart:
 
                        if (err != -ENOBUFS) {
                                rt_drop(rt);
-                               return err;
+                               return ERR_PTR(err);
                        }
 
                        /* Neighbour tables are full and nothing
@@ -1239,7 +1173,7 @@ restart:
                        if (net_ratelimit())
                                printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
                        rt_drop(rt);
-                       return -ENOBUFS;
+                       return ERR_PTR(-ENOBUFS);
                }
        }
 
@@ -1265,11 +1199,16 @@ restart:
        spin_unlock_bh(rt_hash_lock_addr(hash));
 
 skip_hashing:
-       if (rp)
-               *rp = rt;
-       else
+       if (skb)
                skb_dst_set(skb, &rt->dst);
-       return 0;
+       return rt;
+}
+
+static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt_peer_genid(void)
+{
+       return atomic_read(&__rt_peer_genid);
 }
 
 void rt_bind_peer(struct rtable *rt, int create)
@@ -1280,6 +1219,8 @@ void rt_bind_peer(struct rtable *rt, int create)
 
        if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
                inet_putpeer(peer);
+       else
+               rt->rt_peer_genid = rt_peer_genid();
 }
 
 /*
@@ -1349,13 +1290,8 @@ static void rt_del(unsigned hash, struct rtable *rt)
 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                    __be32 saddr, struct net_device *dev)
 {
-       int i, k;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rth;
-       struct rtable __rcu **rthp;
-       __be32  skeys[2] = { saddr, 0 };
-       int  ikeys[2] = { dev->ifindex, 0 };
-       struct netevent_redirect netevent;
+       struct inet_peer *peer;
        struct net *net;
 
        if (!in_dev)
@@ -1367,9 +1303,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
            ipv4_is_zeronet(new_gw))
                goto reject_redirect;
 
-       if (!rt_caching(net))
-               goto reject_redirect;
-
        if (!IN_DEV_SHARED_MEDIA(in_dev)) {
                if (!inet_addr_onlink(in_dev, new_gw, old_gw))
                        goto reject_redirect;
@@ -1380,91 +1313,13 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                        goto reject_redirect;
        }
 
-       for (i = 0; i < 2; i++) {
-               for (k = 0; k < 2; k++) {
-                       unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
-                                               rt_genid(net));
-
-                       rthp = &rt_hash_table[hash].chain;
-
-                       while ((rth = rcu_dereference(*rthp)) != NULL) {
-                               struct rtable *rt;
-
-                               if (rth->fl.fl4_dst != daddr ||
-                                   rth->fl.fl4_src != skeys[i] ||
-                                   rth->fl.oif != ikeys[k] ||
-                                   rt_is_input_route(rth) ||
-                                   rt_is_expired(rth) ||
-                                   !net_eq(dev_net(rth->dst.dev), net)) {
-                                       rthp = &rth->dst.rt_next;
-                                       continue;
-                               }
-
-                               if (rth->rt_dst != daddr ||
-                                   rth->rt_src != saddr ||
-                                   rth->dst.error ||
-                                   rth->rt_gateway != old_gw ||
-                                   rth->dst.dev != dev)
-                                       break;
-
-                               dst_hold(&rth->dst);
-
-                               rt = dst_alloc(&ipv4_dst_ops);
-                               if (rt == NULL) {
-                                       ip_rt_put(rth);
-                                       return;
-                               }
-
-                               /* Copy all the information. */
-                               *rt = *rth;
-                               rt->dst.__use           = 1;
-                               atomic_set(&rt->dst.__refcnt, 1);
-                               rt->dst.child           = NULL;
-                               if (rt->dst.dev)
-                                       dev_hold(rt->dst.dev);
-                               rt->dst.obsolete        = -1;
-                               rt->dst.lastuse = jiffies;
-                               rt->dst.path            = &rt->dst;
-                               rt->dst.neighbour       = NULL;
-                               rt->dst.hh              = NULL;
-#ifdef CONFIG_XFRM
-                               rt->dst.xfrm            = NULL;
-#endif
-                               rt->rt_genid            = rt_genid(net);
-                               rt->rt_flags            |= RTCF_REDIRECTED;
-
-                               /* Gateway is different ... */
-                               rt->rt_gateway          = new_gw;
-
-                               /* Redirect received -> path was valid */
-                               dst_confirm(&rth->dst);
-
-                               if (rt->peer)
-                                       atomic_inc(&rt->peer->refcnt);
-
-                               if (arp_bind_neighbour(&rt->dst) ||
-                                   !(rt->dst.neighbour->nud_state &
-                                           NUD_VALID)) {
-                                       if (rt->dst.neighbour)
-                                               neigh_event_send(rt->dst.neighbour, NULL);
-                                       ip_rt_put(rth);
-                                       rt_drop(rt);
-                                       goto do_next;
-                               }
+       peer = inet_getpeer_v4(daddr, 1);
+       if (peer) {
+               peer->redirect_learned.a4 = new_gw;
 
-                               netevent.old = &rth->dst;
-                               netevent.new = &rt->dst;
-                               call_netevent_notifiers(NETEVENT_REDIRECT,
-                                                       &netevent);
+               inet_putpeer(peer);
 
-                               rt_del(hash, rth);
-                               if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
-                                       ip_rt_put(rt);
-                               goto do_next;
-                       }
-               do_next:
-                       ;
-               }
+               atomic_inc(&__rt_peer_genid);
        }
        return;
 
@@ -1488,18 +1343,24 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
                if (dst->obsolete > 0) {
                        ip_rt_put(rt);
                        ret = NULL;
-               } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
-                          (rt->dst.expires &&
-                           time_after_eq(jiffies, rt->dst.expires))) {
-                       unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
-                                               rt->fl.oif,
+               } else if (rt->rt_flags & RTCF_REDIRECTED) {
+                       unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
+                                               rt->rt_oif,
                                                rt_genid(dev_net(dst->dev)));
 #if RT_CACHE_DEBUG >= 1
                        printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
-                               &rt->rt_dst, rt->fl.fl4_tos);
+                               &rt->rt_dst, rt->rt_tos);
 #endif
                        rt_del(hash, rt);
                        ret = NULL;
+               } else if (rt->peer &&
+                          rt->peer->pmtu_expires &&
+                          time_after_eq(jiffies, rt->peer->pmtu_expires)) {
+                       unsigned long orig = rt->peer->pmtu_expires;
+
+                       if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+                               dst_metric_set(dst, RTAX_MTU,
+                                              rt->peer->pmtu_orig);
                }
        }
        return ret;
@@ -1525,6 +1386,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
 {
        struct rtable *rt = skb_rtable(skb);
        struct in_device *in_dev;
+       struct inet_peer *peer;
        int log_martians;
 
        rcu_read_lock();
@@ -1536,33 +1398,41 @@ void ip_rt_send_redirect(struct sk_buff *skb)
        log_martians = IN_DEV_LOG_MARTIANS(in_dev);
        rcu_read_unlock();
 
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+       peer = rt->peer;
+       if (!peer) {
+               icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
+               return;
+       }
+
        /* No redirected packets during ip_rt_redirect_silence;
         * reset the algorithm.
         */
-       if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
-               rt->dst.rate_tokens = 0;
+       if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+               peer->rate_tokens = 0;
 
        /* Too many ignored redirects; do not send anything
         * set dst.rate_last to the last seen redirected packet.
         */
-       if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
-               rt->dst.rate_last = jiffies;
+       if (peer->rate_tokens >= ip_rt_redirect_number) {
+               peer->rate_last = jiffies;
                return;
        }
 
        /* Check for load limit; set rate_last to the latest sent
         * redirect.
         */
-       if (rt->dst.rate_tokens == 0 ||
+       if (peer->rate_tokens == 0 ||
            time_after(jiffies,
-                      (rt->dst.rate_last +
-                       (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
+                      (peer->rate_last +
+                       (ip_rt_redirect_load << peer->rate_tokens)))) {
                icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
-               rt->dst.rate_last = jiffies;
-               ++rt->dst.rate_tokens;
+               peer->rate_last = jiffies;
+               ++peer->rate_tokens;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
                if (log_martians &&
-                   rt->dst.rate_tokens == ip_rt_redirect_number &&
+                   peer->rate_tokens == ip_rt_redirect_number &&
                    net_ratelimit())
                        printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
                                &rt->rt_src, rt->rt_iif,
@@ -1574,7 +1444,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
 static int ip_error(struct sk_buff *skb)
 {
        struct rtable *rt = skb_rtable(skb);
+       struct inet_peer *peer;
        unsigned long now;
+       bool send;
        int code;
 
        switch (rt->dst.error) {
@@ -1594,15 +1466,24 @@ static int ip_error(struct sk_buff *skb)
                        break;
        }
 
-       now = jiffies;
-       rt->dst.rate_tokens += now - rt->dst.rate_last;
-       if (rt->dst.rate_tokens > ip_rt_error_burst)
-               rt->dst.rate_tokens = ip_rt_error_burst;
-       rt->dst.rate_last = now;
-       if (rt->dst.rate_tokens >= ip_rt_error_cost) {
-               rt->dst.rate_tokens -= ip_rt_error_cost;
-               icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+       peer = rt->peer;
+
+       send = true;
+       if (peer) {
+               now = jiffies;
+               peer->rate_tokens += now - peer->rate_last;
+               if (peer->rate_tokens > ip_rt_error_burst)
+                       peer->rate_tokens = ip_rt_error_burst;
+               peer->rate_last = now;
+               if (peer->rate_tokens >= ip_rt_error_cost)
+                       peer->rate_tokens -= ip_rt_error_cost;
+               else
+                       send = false;
        }
+       if (send)
+               icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
 
 out:   kfree_skb(skb);
        return 0;
@@ -1630,88 +1511,130 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                                 unsigned short new_mtu,
                                 struct net_device *dev)
 {
-       int i, k;
        unsigned short old_mtu = ntohs(iph->tot_len);
-       struct rtable *rth;
-       int  ikeys[2] = { dev->ifindex, 0 };
-       __be32  skeys[2] = { iph->saddr, 0, };
-       __be32  daddr = iph->daddr;
        unsigned short est_mtu = 0;
+       struct inet_peer *peer;
 
-       for (k = 0; k < 2; k++) {
-               for (i = 0; i < 2; i++) {
-                       unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
-                                               rt_genid(net));
-
-                       rcu_read_lock();
-                       for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-                            rth = rcu_dereference(rth->dst.rt_next)) {
-                               unsigned short mtu = new_mtu;
-
-                               if (rth->fl.fl4_dst != daddr ||
-                                   rth->fl.fl4_src != skeys[i] ||
-                                   rth->rt_dst != daddr ||
-                                   rth->rt_src != iph->saddr ||
-                                   rth->fl.oif != ikeys[k] ||
-                                   rt_is_input_route(rth) ||
-                                   dst_metric_locked(&rth->dst, RTAX_MTU) ||
-                                   !net_eq(dev_net(rth->dst.dev), net) ||
-                                   rt_is_expired(rth))
-                                       continue;
-
-                               if (new_mtu < 68 || new_mtu >= old_mtu) {
+       peer = inet_getpeer_v4(iph->daddr, 1);
+       if (peer) {
+               unsigned short mtu = new_mtu;
 
-                                       /* BSD 4.2 compatibility hack :-( */
-                                       if (mtu == 0 &&
-                                           old_mtu >= dst_mtu(&rth->dst) &&
-                                           old_mtu >= 68 + (iph->ihl << 2))
-                                               old_mtu -= iph->ihl << 2;
+               if (new_mtu < 68 || new_mtu >= old_mtu) {
+                       /* BSD 4.2 derived systems incorrectly adjust
+                        * tot_len by the IP header length, and report
+                        * a zero MTU in the ICMP message.
+                        */
+                       if (mtu == 0 &&
+                           old_mtu >= 68 + (iph->ihl << 2))
+                               old_mtu -= iph->ihl << 2;
+                       mtu = guess_mtu(old_mtu);
+               }
 
-                                       mtu = guess_mtu(old_mtu);
-                               }
-                               if (mtu <= dst_mtu(&rth->dst)) {
-                                       if (mtu < dst_mtu(&rth->dst)) {
-                                               dst_confirm(&rth->dst);
-                                               if (mtu < ip_rt_min_pmtu) {
-                                                       u32 lock = dst_metric(&rth->dst,
-                                                                             RTAX_LOCK);
-                                                       mtu = ip_rt_min_pmtu;
-                                                       lock |= (1 << RTAX_MTU);
-                                                       dst_metric_set(&rth->dst, RTAX_LOCK,
-                                                                      lock);
-                                               }
-                                               dst_metric_set(&rth->dst, RTAX_MTU, mtu);
-                                               dst_set_expires(&rth->dst,
-                                                       ip_rt_mtu_expires);
-                                       }
-                                       est_mtu = mtu;
-                               }
-                       }
-                       rcu_read_unlock();
+               if (mtu < ip_rt_min_pmtu)
+                       mtu = ip_rt_min_pmtu;
+               if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+                       est_mtu = mtu;
+                       peer->pmtu_learned = mtu;
+                       peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
                }
+
+               inet_putpeer(peer);
+
+               atomic_inc(&__rt_peer_genid);
        }
        return est_mtu ? : new_mtu;
 }
 
+static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+{
+       unsigned long expires = peer->pmtu_expires;
+
+       if (time_before(expires, jiffies)) {
+               u32 orig_dst_mtu = dst_mtu(dst);
+               if (peer->pmtu_learned < orig_dst_mtu) {
+                       if (!peer->pmtu_orig)
+                               peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
+                       dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
+               }
+       } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
+               dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+}
+
 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-       if (dst_mtu(dst) > mtu && mtu >= 68 &&
-           !(dst_metric_locked(dst, RTAX_MTU))) {
-               if (mtu < ip_rt_min_pmtu) {
-                       u32 lock = dst_metric(dst, RTAX_LOCK);
+       struct rtable *rt = (struct rtable *) dst;
+       struct inet_peer *peer;
+
+       dst_confirm(dst);
+
+       if (!rt->peer)
+               rt_bind_peer(rt, 1);
+       peer = rt->peer;
+       if (peer) {
+               if (mtu < ip_rt_min_pmtu)
                        mtu = ip_rt_min_pmtu;
-                       dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
+               if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+                       peer->pmtu_learned = mtu;
+                       peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
+
+                       atomic_inc(&__rt_peer_genid);
+                       rt->rt_peer_genid = rt_peer_genid();
+
+                       check_peer_pmtu(dst, peer);
                }
-               dst_metric_set(dst, RTAX_MTU, mtu);
-               dst_set_expires(dst, ip_rt_mtu_expires);
-               call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
+               inet_putpeer(peer);
+       }
+}
+
+static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+       struct rtable *rt = (struct rtable *) dst;
+       __be32 orig_gw = rt->rt_gateway;
+
+       dst_confirm(&rt->dst);
+
+       neigh_release(rt->dst.neighbour);
+       rt->dst.neighbour = NULL;
+
+       rt->rt_gateway = peer->redirect_learned.a4;
+       if (arp_bind_neighbour(&rt->dst) ||
+           !(rt->dst.neighbour->nud_state & NUD_VALID)) {
+               if (rt->dst.neighbour)
+                       neigh_event_send(rt->dst.neighbour, NULL);
+               rt->rt_gateway = orig_gw;
+               return -EAGAIN;
+       } else {
+               rt->rt_flags |= RTCF_REDIRECTED;
+               call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
+                                       rt->dst.neighbour);
        }
+       return 0;
 }
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 {
-       if (rt_is_expired((struct rtable *)dst))
+       struct rtable *rt = (struct rtable *) dst;
+
+       if (rt_is_expired(rt))
                return NULL;
+       if (rt->rt_peer_genid != rt_peer_genid()) {
+               struct inet_peer *peer;
+
+               if (!rt->peer)
+                       rt_bind_peer(rt, 0);
+
+               peer = rt->peer;
+               if (peer && peer->pmtu_expires)
+                       check_peer_pmtu(dst, peer);
+
+               if (peer && peer->redirect_learned.a4 &&
+                   peer->redirect_learned.a4 != rt->rt_gateway) {
+                       if (check_peer_redir(dst, peer))
+                               return NULL;
+               }
+
+               rt->rt_peer_genid = rt_peer_genid();
+       }
        return dst;
 }
 
@@ -1720,6 +1643,10 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
        struct rtable *rt = (struct rtable *) dst;
        struct inet_peer *peer = rt->peer;
 
+       if (rt->fi) {
+               fib_info_put(rt->fi);
+               rt->fi = NULL;
+       }
        if (peer) {
                rt->peer = NULL;
                inet_putpeer(peer);
@@ -1734,8 +1661,14 @@ static void ipv4_link_failure(struct sk_buff *skb)
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
 
        rt = skb_rtable(skb);
-       if (rt)
-               dst_set_expires(&rt->dst, 0);
+       if (rt &&
+           rt->peer &&
+           rt->peer->pmtu_expires) {
+               unsigned long orig = rt->peer->pmtu_expires;
+
+               if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+                       dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+       }
 }
 
 static int ip_rt_bug(struct sk_buff *skb)
@@ -1764,8 +1697,17 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
        if (rt_is_output_route(rt))
                src = rt->rt_src;
        else {
+               struct flowi fl = {
+                       .fl4_dst = rt->rt_key_dst,
+                       .fl4_src = rt->rt_key_src,
+                       .fl4_tos = rt->rt_tos,
+                       .oif = rt->rt_oif,
+                       .iif = rt->rt_iif,
+                       .mark = rt->rt_mark,
+               };
+
                rcu_read_lock();
-               if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
+               if (fib_lookup(dev_net(rt->dst.dev), &fl, &res) == 0)
                        src = FIB_RES_PREFSRC(res);
                else
                        src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
@@ -1775,7 +1717,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
        memcpy(addr, &src, 4);
 }
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 static void set_class_tag(struct rtable *rt, u32 tag)
 {
        if (!(rt->dst.tclassid & 0xFFFF))
@@ -1815,17 +1757,54 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
        return mtu;
 }
 
-static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
+static void rt_init_metrics(struct rtable *rt, const struct flowi *oldflp,
+                           struct fib_info *fi)
+{
+       struct inet_peer *peer;
+       int create = 0;
+
+       /* If a peer entry exists for this destination, we must hook
+        * it up in order to get at cached metrics.
+        */
+       if (oldflp && (oldflp->flags & FLOWI_FLAG_PRECOW_METRICS))
+               create = 1;
+
+       rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
+       if (peer) {
+               rt->rt_peer_genid = rt_peer_genid();
+               if (inet_metrics_new(peer))
+                       memcpy(peer->metrics, fi->fib_metrics,
+                              sizeof(u32) * RTAX_MAX);
+               dst_init_metrics(&rt->dst, peer->metrics, false);
+
+               if (peer->pmtu_expires)
+                       check_peer_pmtu(&rt->dst, peer);
+               if (peer->redirect_learned.a4 &&
+                   peer->redirect_learned.a4 != rt->rt_gateway) {
+                       rt->rt_gateway = peer->redirect_learned.a4;
+                       rt->rt_flags |= RTCF_REDIRECTED;
+               }
+       } else {
+               if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+                       rt->fi = fi;
+                       atomic_inc(&fi->fib_clntref);
+               }
+               dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+       }
+}
+
+static void rt_set_nexthop(struct rtable *rt, const struct flowi *oldflp,
+                          const struct fib_result *res,
+                          struct fib_info *fi, u16 type, u32 itag)
 {
        struct dst_entry *dst = &rt->dst;
-       struct fib_info *fi = res->fi;
 
        if (fi) {
                if (FIB_RES_GW(*res) &&
                    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
                        rt->rt_gateway = FIB_RES_GW(*res);
-               dst_import_metrics(dst, fi->fib_metrics);
-#ifdef CONFIG_NET_CLS_ROUTE
+               rt_init_metrics(rt, oldflp, fi);
+#ifdef CONFIG_IP_ROUTE_CLASSID
                dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
        }
@@ -1835,13 +1814,26 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
        if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
                dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        set_class_tag(rt, fib_rules_tclass(res));
 #endif
        set_class_tag(rt, itag);
 #endif
-       rt->rt_type = res->type;
+       rt->rt_type = type;
+}
+
+static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
+{
+       struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
+       if (rt) {
+               rt->dst.obsolete = -1;
+
+               rt->dst.flags = DST_HOST |
+                       (nopolicy ? DST_NOPOLICY : 0) |
+                       (noxfrm ? DST_NOXFRM : 0);
+       }
+       return rt;
 }
 
 /* called in rcu_read_lock() section */
@@ -1874,31 +1866,25 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                if (err < 0)
                        goto e_err;
        }
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
        if (!rth)
                goto e_nobufs;
 
        rth->dst.output = ip_rt_bug;
-       rth->dst.obsolete = -1;
 
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-       rth->fl.fl4_dst = daddr;
+       rth->rt_key_dst = daddr;
        rth->rt_dst     = daddr;
-       rth->fl.fl4_tos = tos;
-       rth->fl.mark    = skb->mark;
-       rth->fl.fl4_src = saddr;
+       rth->rt_tos     = tos;
+       rth->rt_mark    = skb->mark;
+       rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
 #endif
-       rth->rt_iif     =
-       rth->fl.iif     = dev->ifindex;
+       rth->rt_iif     = dev->ifindex;
        rth->dst.dev    = init_net.loopback_dev;
        dev_hold(rth->dst.dev);
-       rth->fl.oif     = 0;
+       rth->rt_oif     = 0;
        rth->rt_gateway = daddr;
        rth->rt_spec_dst= spec_dst;
        rth->rt_genid   = rt_genid(dev_net(dev));
@@ -1916,7 +1902,10 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        RT_CACHE_STAT_INC(in_slow_mc);
 
        hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
-       return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
+       rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
+       err = 0;
+       if (IS_ERR(rth))
+               err = PTR_ERR(rth);
 
 e_nobufs:
        return -ENOBUFS;
@@ -1959,7 +1948,7 @@ static void ip_handle_martian_source(struct net_device *dev,
 
 /* called in rcu_read_lock() section */
 static int __mkroute_input(struct sk_buff *skb,
-                          struct fib_result *res,
+                          const struct fib_result *res,
                           struct in_device *in_dev,
                           __be32 daddr, __be32 saddr, u32 tos,
                           struct rtable **result)
@@ -2013,39 +2002,31 @@ static int __mkroute_input(struct sk_buff *skb,
                }
        }
 
-
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+                          IN_DEV_CONF_GET(out_dev, NOXFRM));
        if (!rth) {
                err = -ENOBUFS;
                goto cleanup;
        }
 
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-       if (IN_DEV_CONF_GET(out_dev, NOXFRM))
-               rth->dst.flags |= DST_NOXFRM;
-       rth->fl.fl4_dst = daddr;
+       rth->rt_key_dst = daddr;
        rth->rt_dst     = daddr;
-       rth->fl.fl4_tos = tos;
-       rth->fl.mark    = skb->mark;
-       rth->fl.fl4_src = saddr;
+       rth->rt_tos     = tos;
+       rth->rt_mark    = skb->mark;
+       rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
        rth->rt_gateway = daddr;
-       rth->rt_iif     =
-               rth->fl.iif     = in_dev->dev->ifindex;
+       rth->rt_iif     = in_dev->dev->ifindex;
        rth->dst.dev    = (out_dev)->dev;
        dev_hold(rth->dst.dev);
-       rth->fl.oif     = 0;
+       rth->rt_oif     = 0;
        rth->rt_spec_dst= spec_dst;
 
-       rth->dst.obsolete = -1;
        rth->dst.input = ip_forward;
        rth->dst.output = ip_output;
        rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
 
-       rt_set_nexthop(rth, res, itag);
+       rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
 
        rth->rt_flags = flags;
 
@@ -2078,7 +2059,10 @@ static int ip_mkroute_input(struct sk_buff *skb,
        /* put it into the cache */
        hash = rt_hash(daddr, saddr, fl->iif,
                       rt_genid(dev_net(rth->dst.dev)));
-       return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
+       rth = rt_intern_hash(hash, rth, skb, fl->iif);
+       if (IS_ERR(rth))
+               return PTR_ERR(rth);
+       return 0;
 }
 
 /*
@@ -2097,12 +2081,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 {
        struct fib_result res;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct flowi fl = { .fl4_dst    = daddr,
-                           .fl4_src    = saddr,
-                           .fl4_tos    = tos,
-                           .fl4_scope  = RT_SCOPE_UNIVERSE,
-                           .mark = skb->mark,
-                           .iif = dev->ifindex };
+       struct flowi fl;
        unsigned        flags = 0;
        u32             itag = 0;
        struct rtable * rth;
@@ -2139,6 +2118,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        /*
         *      Now we are ready to route packet.
         */
+       fl.oif = 0;
+       fl.iif = dev->ifindex;
+       fl.mark = skb->mark;
+       fl.fl4_dst = daddr;
+       fl.fl4_src = saddr;
+       fl.fl4_tos = tos;
+       fl.fl4_scope = RT_SCOPE_UNIVERSE;
        err = fib_lookup(net, &fl, &res);
        if (err != 0) {
                if (!IN_DEV_FORWARD(in_dev))
@@ -2190,29 +2176,23 @@ brd_input:
        RT_CACHE_STAT_INC(in_brd);
 
 local_input:
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
        if (!rth)
                goto e_nobufs;
 
        rth->dst.output= ip_rt_bug;
-       rth->dst.obsolete = -1;
        rth->rt_genid = rt_genid(net);
 
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-       rth->fl.fl4_dst = daddr;
+       rth->rt_key_dst = daddr;
        rth->rt_dst     = daddr;
-       rth->fl.fl4_tos = tos;
-       rth->fl.mark    = skb->mark;
-       rth->fl.fl4_src = saddr;
+       rth->rt_tos     = tos;
+       rth->rt_mark    = skb->mark;
+       rth->rt_key_src = saddr;
        rth->rt_src     = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        rth->dst.tclassid = itag;
 #endif
-       rth->rt_iif     =
-       rth->fl.iif     = dev->ifindex;
+       rth->rt_iif     = dev->ifindex;
        rth->dst.dev    = net->loopback_dev;
        dev_hold(rth->dst.dev);
        rth->rt_gateway = daddr;
@@ -2226,7 +2206,10 @@ local_input:
        }
        rth->rt_type    = res.type;
        hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
-       err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
+       rth = rt_intern_hash(hash, rth, skb, fl.iif);
+       err = 0;
+       if (IS_ERR(rth))
+               err = PTR_ERR(rth);
        goto out;
 
 no_route:
@@ -2288,12 +2271,12 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
             rth = rcu_dereference(rth->dst.rt_next)) {
-               if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
-                    ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
-                    (rth->fl.iif ^ iif) |
-                    rth->fl.oif |
-                    (rth->fl.fl4_tos ^ tos)) == 0 &&
-                   rth->fl.mark == skb->mark &&
+               if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
+                    ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
+                    (rth->rt_iif ^ iif) |
+                    rth->rt_oif |
+                    (rth->rt_tos ^ tos)) == 0 &&
+                   rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
                        if (noref) {
@@ -2351,38 +2334,39 @@ skip_cache:
 EXPORT_SYMBOL(ip_route_input_common);
 
 /* called with rcu_read_lock() */
-static int __mkroute_output(struct rtable **result,
-                           struct fib_result *res,
-                           const struct flowi *fl,
-                           const struct flowi *oldflp,
-                           struct net_device *dev_out,
-                           unsigned flags)
+static struct rtable *__mkroute_output(const struct fib_result *res,
+                                      const struct flowi *fl,
+                                      const struct flowi *oldflp,
+                                      struct net_device *dev_out,
+                                      unsigned int flags)
 {
-       struct rtable *rth;
-       struct in_device *in_dev;
+       struct fib_info *fi = res->fi;
        u32 tos = RT_FL_TOS(oldflp);
+       struct in_device *in_dev;
+       u16 type = res->type;
+       struct rtable *rth;
 
        if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        if (ipv4_is_lbcast(fl->fl4_dst))
-               res->type = RTN_BROADCAST;
+               type = RTN_BROADCAST;
        else if (ipv4_is_multicast(fl->fl4_dst))
-               res->type = RTN_MULTICAST;
+               type = RTN_MULTICAST;
        else if (ipv4_is_zeronet(fl->fl4_dst))
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
        in_dev = __in_dev_get_rcu(dev_out);
        if (!in_dev)
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
-       if (res->type == RTN_BROADCAST) {
+       if (type == RTN_BROADCAST) {
                flags |= RTCF_BROADCAST | RTCF_LOCAL;
-               res->fi = NULL;
-       } else if (res->type == RTN_MULTICAST) {
+               fi = NULL;
+       } else if (type == RTN_MULTICAST) {
                flags |= RTCF_MULTICAST | RTCF_LOCAL;
                if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
                                 oldflp->proto))
@@ -2391,30 +2375,23 @@ static int __mkroute_output(struct rtable **result,
                 * default one, but do not gateway in this case.
                 * Yes, it is hack.
                 */
-               if (res->fi && res->prefixlen < 4)
-                       res->fi = NULL;
+               if (fi && res->prefixlen < 4)
+                       fi = NULL;
        }
 
-
-       rth = dst_alloc(&ipv4_dst_ops);
+       rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+                          IN_DEV_CONF_GET(in_dev, NOXFRM));
        if (!rth)
-               return -ENOBUFS;
-
-       atomic_set(&rth->dst.__refcnt, 1);
-       rth->dst.flags= DST_HOST;
-       if (IN_DEV_CONF_GET(in_dev, NOXFRM))
-               rth->dst.flags |= DST_NOXFRM;
-       if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-               rth->dst.flags |= DST_NOPOLICY;
-
-       rth->fl.fl4_dst = oldflp->fl4_dst;
-       rth->fl.fl4_tos = tos;
-       rth->fl.fl4_src = oldflp->fl4_src;
-       rth->fl.oif     = oldflp->oif;
-       rth->fl.mark    = oldflp->mark;
+               return ERR_PTR(-ENOBUFS);
+
+       rth->rt_key_dst = oldflp->fl4_dst;
+       rth->rt_tos     = tos;
+       rth->rt_key_src = oldflp->fl4_src;
+       rth->rt_oif     = oldflp->oif;
+       rth->rt_mark    = oldflp->mark;
        rth->rt_dst     = fl->fl4_dst;
        rth->rt_src     = fl->fl4_src;
-       rth->rt_iif     = oldflp->oif ? : dev_out->ifindex;
+       rth->rt_iif     = 0;
        /* get references to the devices that are to be hold by the routing
           cache entry */
        rth->dst.dev    = dev_out;
@@ -2423,7 +2400,6 @@ static int __mkroute_output(struct rtable **result,
        rth->rt_spec_dst= fl->fl4_src;
 
        rth->dst.output=ip_output;
-       rth->dst.obsolete = -1;
        rth->rt_genid = rt_genid(dev_net(dev_out));
 
        RT_CACHE_STAT_INC(out_slow_tot);
@@ -2440,7 +2416,7 @@ static int __mkroute_output(struct rtable **result,
                        RT_CACHE_STAT_INC(out_slow_mc);
                }
 #ifdef CONFIG_IP_MROUTE
-               if (res->type == RTN_MULTICAST) {
+               if (type == RTN_MULTICAST) {
                        if (IN_DEV_MFORWARD(in_dev) &&
                            !ipv4_is_local_multicast(oldflp->fl4_dst)) {
                                rth->dst.input = ip_mr_input;
@@ -2450,31 +2426,10 @@ static int __mkroute_output(struct rtable **result,
 #endif
        }
 
-       rt_set_nexthop(rth, res, 0);
+       rt_set_nexthop(rth, oldflp, res, fi, type, 0);
 
        rth->rt_flags = flags;
-       *result = rth;
-       return 0;
-}
-
-/* called with rcu_read_lock() */
-static int ip_mkroute_output(struct rtable **rp,
-                            struct fib_result *res,
-                            const struct flowi *fl,
-                            const struct flowi *oldflp,
-                            struct net_device *dev_out,
-                            unsigned flags)
-{
-       struct rtable *rth = NULL;
-       int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
-       unsigned hash;
-       if (err == 0) {
-               hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
-                              rt_genid(dev_net(dev_out)));
-               err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
-       }
-
-       return err;
+       return rth;
 }
 
 /*
@@ -2482,31 +2437,33 @@ static int ip_mkroute_output(struct rtable **rp,
  * called with rcu_read_lock();
  */
 
-static int ip_route_output_slow(struct net *net, struct rtable **rp,
-                               const struct flowi *oldflp)
+static struct rtable *ip_route_output_slow(struct net *net,
+                                          const struct flowi *oldflp)
 {
        u32 tos = RT_FL_TOS(oldflp);
-       struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
-                           .fl4_src = oldflp->fl4_src,
-                           .fl4_tos = tos & IPTOS_RT_MASK,
-                           .fl4_scope = ((tos & RTO_ONLINK) ?
-                                         RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
-                           .mark = oldflp->mark,
-                           .iif = net->loopback_dev->ifindex,
-                           .oif = oldflp->oif };
+       struct flowi fl;
        struct fib_result res;
        unsigned int flags = 0;
        struct net_device *dev_out = NULL;
-       int err;
-
+       struct rtable *rth;
 
        res.fi          = NULL;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        res.r           = NULL;
 #endif
 
+       fl.oif = oldflp->oif;
+       fl.iif = net->loopback_dev->ifindex;
+       fl.mark = oldflp->mark;
+       fl.fl4_dst = oldflp->fl4_dst;
+       fl.fl4_src = oldflp->fl4_src;
+       fl.fl4_tos = tos & IPTOS_RT_MASK;
+       fl.fl4_scope = ((tos & RTO_ONLINK) ?
+                       RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
+
+       rcu_read_lock();
        if (oldflp->fl4_src) {
-               err = -EINVAL;
+               rth = ERR_PTR(-EINVAL);
                if (ipv4_is_multicast(oldflp->fl4_src) ||
                    ipv4_is_lbcast(oldflp->fl4_src) ||
                    ipv4_is_zeronet(oldflp->fl4_src))
@@ -2557,13 +2514,13 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
 
        if (oldflp->oif) {
                dev_out = dev_get_by_index_rcu(net, oldflp->oif);
-               err = -ENODEV;
+               rth = ERR_PTR(-ENODEV);
                if (dev_out == NULL)
                        goto out;
 
                /* RACE: Check return value of inet_select_addr instead. */
                if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
-                       err = -ENETUNREACH;
+                       rth = ERR_PTR(-ENETUNREACH);
                        goto out;
                }
                if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
@@ -2621,7 +2578,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                        res.type = RTN_UNICAST;
                        goto make_route;
                }
-               err = -ENETUNREACH;
+               rth = ERR_PTR(-ENETUNREACH);
                goto out;
        }
 
@@ -2645,7 +2602,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
        else
 #endif
        if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
-               fib_select_default(net, &fl, &res);
+               fib_select_default(&res);
 
        if (!fl.fl4_src)
                fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2655,17 +2612,24 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
 
 
 make_route:
-       err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
+       rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
+       if (!IS_ERR(rth)) {
+               unsigned int hash;
 
-out:   return err;
+               hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
+                              rt_genid(dev_net(dev_out)));
+               rth = rt_intern_hash(hash, rth, NULL, oldflp->oif);
+       }
+
+out:
+       rcu_read_unlock();
+       return rth;
 }
 
-int __ip_route_output_key(struct net *net, struct rtable **rp,
-                         const struct flowi *flp)
+struct rtable *__ip_route_output_key(struct net *net, const struct flowi *flp)
 {
-       unsigned int hash;
-       int res;
        struct rtable *rth;
+       unsigned int hash;
 
        if (!rt_caching(net))
                goto slow_output;
@@ -2675,30 +2639,26 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
        rcu_read_lock_bh();
        for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
                rth = rcu_dereference_bh(rth->dst.rt_next)) {
-               if (rth->fl.fl4_dst == flp->fl4_dst &&
-                   rth->fl.fl4_src == flp->fl4_src &&
+               if (rth->rt_key_dst == flp->fl4_dst &&
+                   rth->rt_key_src == flp->fl4_src &&
                    rt_is_output_route(rth) &&
-                   rth->fl.oif == flp->oif &&
-                   rth->fl.mark == flp->mark &&
-                   !((rth->fl.fl4_tos ^ flp->fl4_tos) &
+                   rth->rt_oif == flp->oif &&
+                   rth->rt_mark == flp->mark &&
+                   !((rth->rt_tos ^ flp->fl4_tos) &
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
-                       *rp = rth;
-                       return 0;
+                       return rth;
                }
                RT_CACHE_STAT_INC(out_hlist_search);
        }
        rcu_read_unlock_bh();
 
 slow_output:
-       rcu_read_lock();
-       res = ip_route_output_slow(net, rp, flp);
-       rcu_read_unlock();
-       return res;
+       return ip_route_output_slow(net, flp);
 }
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
@@ -2726,17 +2686,14 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
 };
 
-
-static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
+struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
-       struct rtable *ort = *rp;
-       struct rtable *rt = (struct rtable *)
-               dst_alloc(&ipv4_dst_blackhole_ops);
+       struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
+       struct rtable *ort = (struct rtable *) dst_orig;
 
        if (rt) {
                struct dst_entry *new = &rt->dst;
 
-               atomic_set(&new->__refcnt, 1);
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard;
@@ -2746,7 +2703,12 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
                if (new->dev)
                        dev_hold(new->dev);
 
-               rt->fl = ort->fl;
+               rt->rt_key_dst = ort->rt_key_dst;
+               rt->rt_key_src = ort->rt_key_src;
+               rt->rt_tos = ort->rt_tos;
+               rt->rt_iif = ort->rt_iif;
+               rt->rt_oif = ort->rt_oif;
+               rt->rt_mark = ort->rt_mark;
 
                rt->rt_genid = rt_genid(net);
                rt->rt_flags = ort->rt_flags;
@@ -2759,46 +2721,38 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
                rt->peer = ort->peer;
                if (rt->peer)
                        atomic_inc(&rt->peer->refcnt);
+               rt->fi = ort->fi;
+               if (rt->fi)
+                       atomic_inc(&rt->fi->fib_clntref);
 
                dst_free(new);
        }
 
-       dst_release(&(*rp)->dst);
-       *rp = rt;
-       return rt ? 0 : -ENOMEM;
+       dst_release(dst_orig);
+
+       return rt ? &rt->dst : ERR_PTR(-ENOMEM);
 }
 
-int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
-                        struct sock *sk, int flags)
+struct rtable *ip_route_output_flow(struct net *net, struct flowi *flp,
+                                   struct sock *sk)
 {
-       int err;
+       struct rtable *rt = __ip_route_output_key(net, flp);
 
-       if ((err = __ip_route_output_key(net, rp, flp)) != 0)
-               return err;
+       if (IS_ERR(rt))
+               return rt;
 
        if (flp->proto) {
                if (!flp->fl4_src)
-                       flp->fl4_src = (*rp)->rt_src;
+                       flp->fl4_src = rt->rt_src;
                if (!flp->fl4_dst)
-                       flp->fl4_dst = (*rp)->rt_dst;
-               err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
-                                   flags ? XFRM_LOOKUP_WAIT : 0);
-               if (err == -EREMOTE)
-                       err = ipv4_dst_blackhole(net, rp, flp);
-
-               return err;
+                       flp->fl4_dst = rt->rt_dst;
+               rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flp, sk, 0);
        }
 
-       return 0;
+       return rt;
 }
 EXPORT_SYMBOL_GPL(ip_route_output_flow);
 
-int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
-{
-       return ip_route_output_flow(net, rp, flp, NULL, 0);
-}
-EXPORT_SYMBOL(ip_route_output_key);
-
 static int rt_fill_info(struct net *net,
                        struct sk_buff *skb, u32 pid, u32 seq, int event,
                        int nowait, unsigned int flags)
@@ -2817,7 +2771,7 @@ static int rt_fill_info(struct net *net,
        r->rtm_family    = AF_INET;
        r->rtm_dst_len  = 32;
        r->rtm_src_len  = 0;
-       r->rtm_tos      = rt->fl.fl4_tos;
+       r->rtm_tos      = rt->rt_tos;
        r->rtm_table    = RT_TABLE_MAIN;
        NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
        r->rtm_type     = rt->rt_type;
@@ -2829,19 +2783,19 @@ static int rt_fill_info(struct net *net,
 
        NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
 
-       if (rt->fl.fl4_src) {
+       if (rt->rt_key_src) {
                r->rtm_src_len = 32;
-               NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
+               NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
        }
        if (rt->dst.dev)
                NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (rt->dst.tclassid)
                NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
        if (rt_is_input_route(rt))
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
-       else if (rt->rt_src != rt->fl.fl4_src)
+       else if (rt->rt_src != rt->rt_key_src)
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
 
        if (rt->rt_dst != rt->rt_gateway)
@@ -2850,11 +2804,12 @@ static int rt_fill_info(struct net *net,
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
-       if (rt->fl.mark)
-               NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
+       if (rt->rt_mark)
+               NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
 
        error = rt->dst.error;
-       expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+       expires = (rt->peer && rt->peer->pmtu_expires) ?
+               rt->peer->pmtu_expires - jiffies : 0;
        if (rt->peer) {
                inet_peer_refcheck(rt->peer);
                id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
@@ -2884,7 +2839,7 @@ static int rt_fill_info(struct net *net,
                        }
                } else
 #endif
-                       NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
+                       NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
        }
 
        if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -2965,7 +2920,11 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                        .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
                        .mark = mark,
                };
-               err = ip_route_output_key(net, &rt, &fl);
+               rt = ip_route_output_key(net, &fl);
+
+               err = 0;
+               if (IS_ERR(rt))
+                       err = PTR_ERR(rt);
        }
 
        if (err)
@@ -3256,9 +3215,9 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
 };
 
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_NET_CLS_ROUTE */
+#endif /* CONFIG_IP_ROUTE_CLASSID */
 
 static __initdata unsigned long rhash_entries;
 static int __init set_rhash_entries(char *str)
@@ -3274,7 +3233,7 @@ int __init ip_rt_init(void)
 {
        int rc = 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
        if (!ip_rt_acct)
                panic("IP: failed to allocate ip_rt_acct\n");
@@ -3311,14 +3270,6 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
-       /* All the timers, started at system startup tend
-          to synchronize. Perturb it a bit.
-        */
-       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
-       expires_ljiffies = jiffies;
-       schedule_delayed_work(&expires_work,
-               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 47519205a014a773e2df5b21f6fdf27c43fe6cb1..0ad6ddf638a7ef41f33de2f13bfbe92aebbc16dc 100644 (file)
@@ -355,7 +355,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
                                    .fl_ip_sport = th->dest,
                                    .fl_ip_dport = th->source };
                security_req_classify_flow(req, &fl);
-               if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
+               rt = ip_route_output_key(sock_net(sk), &fl);
+               if (IS_ERR(rt)) {
                        reqsk_free(req);
                        goto out;
                }
index 6c11eece262cf77a377a9fa901f4031c46745205..b22d450105450ae089d42a06486938fcaa2c80dc 100644 (file)
@@ -505,6 +505,15 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                else
                        answ = tp->write_seq - tp->snd_una;
                break;
+       case SIOCOUTQNSD:
+               if (sk->sk_state == TCP_LISTEN)
+                       return -EINVAL;
+
+               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+                       answ = 0;
+               else
+                       answ = tp->write_seq - tp->snd_nxt;
+               break;
        default:
                return -ENOIOCTLCMD;
        }
@@ -873,9 +882,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
                                        flags);
 
        lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
        res = do_tcp_sendpages(sk, &page, offset, size, flags);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return res;
 }
@@ -916,7 +923,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        long timeo;
 
        lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
 
        flags = msg->msg_flags;
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1104,7 +1110,6 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
@@ -1123,7 +1128,6 @@ do_error:
                goto out;
 out_err:
        err = sk_stream_error(sk, flags, err);
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return err;
 }
@@ -1415,8 +1419,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        lock_sock(sk);
 
-       TCP_CHECK_TIMER(sk);
-
        err = -ENOTCONN;
        if (sk->sk_state == TCP_LISTEN)
                goto out;
@@ -1767,12 +1769,10 @@ skip_copy:
        /* Clean up data we have read: This will do ACK frames. */
        tcp_cleanup_rbuf(sk, copied);
 
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return copied;
 
 out:
-       TCP_CHECK_TIMER(sk);
        release_sock(sk);
        return err;
 
@@ -2653,7 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct tcphdr *th;
index 3b53fd1af23f04bb2a9023b56204ca45051fa81f..6187eb4d1dcfe1e66dc90a799c50df496c2d8974 100644 (file)
@@ -209,7 +209,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
 }
 
 
-static struct tcp_congestion_ops bictcp = {
+static struct tcp_congestion_ops bictcp __read_mostly = {
        .init           = bictcp_init,
        .ssthresh       = bictcp_recalc_ssthresh,
        .cong_avoid     = bictcp_cong_avoid,
index 71d5f2f29fa62b92cb4ae4d4762b5322d017f5d3..62f775cb0863d6e17e89a6328d408224d0b70819 100644 (file)
@@ -405,7 +405,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                hystart_update(sk, delay);
 }
 
-static struct tcp_congestion_ops cubictcp = {
+static struct tcp_congestion_ops cubictcp __read_mostly = {
        .init           = bictcp_init,
        .ssthresh       = bictcp_recalc_ssthresh,
        .cong_avoid     = bictcp_cong_avoid,
index 8b6caaf75bb9877648052b97209c7b2bca2f132b..30f27f6b3655fb030b8f501c700c90ad7fb91b38 100644 (file)
@@ -158,7 +158,7 @@ static u32 hstcp_ssthresh(struct sock *sk)
 }
 
 
-static struct tcp_congestion_ops tcp_highspeed = {
+static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
        .init           = hstcp_init,
        .ssthresh       = hstcp_ssthresh,
        .cong_avoid     = hstcp_cong_avoid,
index 7c94a49554167a7315c22faf7cbaafa13824c434..c1a8175361e896a37c515b93ed64f23f0cd311ee 100644 (file)
@@ -284,7 +284,7 @@ static void htcp_state(struct sock *sk, u8 new_state)
        }
 }
 
-static struct tcp_congestion_ops htcp = {
+static struct tcp_congestion_ops htcp __read_mostly = {
        .init           = htcp_init,
        .ssthresh       = htcp_recalc_ssthresh,
        .cong_avoid     = htcp_cong_avoid,
index 377bc93493712f3037ae1cb2e12eb6e421366a4e..fe3ecf484b4471969ef51b27d7fc21e7fa309ff1 100644 (file)
@@ -162,7 +162,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
        tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
 }
 
-static struct tcp_congestion_ops tcp_hybla = {
+static struct tcp_congestion_ops tcp_hybla __read_mostly = {
        .init           = hybla_init,
        .ssthresh       = tcp_reno_ssthresh,
        .min_cwnd       = tcp_reno_min_cwnd,
index 00ca688d89644b5c9456db8839a8ff36b7e562a3..813b43a76fece71e2614878a5830962d55f35a22 100644 (file)
@@ -322,7 +322,7 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
        }
 }
 
-static struct tcp_congestion_ops tcp_illinois = {
+static struct tcp_congestion_ops tcp_illinois __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_illinois_init,
        .ssthresh       = tcp_illinois_ssthresh,
index 65f6c04062453aefdffa2317781921990616e796..08ea735b9d7299ca7391634dccde443589387246 100644 (file)
@@ -817,7 +817,7 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
 
        if (!cwnd)
-               cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
+               cwnd = TCP_INIT_CWND;
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
index 02f583b3744a7b633743e8c998ba1b3b0081a3fb..f7e6c2c2d2bbca0d3c953322af44541c6ab9fae8 100644 (file)
@@ -149,9 +149,9 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct inet_sock *inet = inet_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
+       __be16 orig_sport, orig_dport;
        struct rtable *rt;
        __be32 daddr, nexthop;
-       int tmp;
        int err;
 
        if (addr_len < sizeof(struct sockaddr_in))
@@ -167,14 +167,17 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                nexthop = inet->opt->faddr;
        }
 
-       tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
-                              RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
-                              IPPROTO_TCP,
-                              inet->inet_sport, usin->sin_port, sk, 1);
-       if (tmp < 0) {
-               if (tmp == -ENETUNREACH)
+       orig_sport = inet->inet_sport;
+       orig_dport = usin->sin_port;
+       rt = ip_route_connect(nexthop, inet->inet_saddr,
+                             RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
+                             IPPROTO_TCP,
+                             orig_sport, orig_dport, sk, true);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
+               if (err == -ENETUNREACH)
                        IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
-               return tmp;
+               return err;
        }
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
@@ -233,11 +236,14 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err)
                goto failure;
 
-       err = ip_route_newports(&rt, IPPROTO_TCP,
-                               inet->inet_sport, inet->inet_dport, sk);
-       if (err)
+       rt = ip_route_newports(rt, IPPROTO_TCP,
+                              orig_sport, orig_dport,
+                              inet->inet_sport, inet->inet_dport, sk);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
+               rt = NULL;
                goto failure;
-
+       }
        /* OK, now commit destination to socket.  */
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
@@ -1341,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                    tcp_death_row.sysctl_tw_recycle &&
                    (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
-                   peer->daddr.a4 == saddr) {
+                   peer->daddr.addr.a4 == saddr) {
                        inet_peer_refcheck(peer);
                        if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
                            (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1556,12 +1562,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                sock_rps_save_rxhash(sk, skb->rxhash);
-               TCP_CHECK_TIMER(sk);
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
                        rsk = sk;
                        goto reset;
                }
-               TCP_CHECK_TIMER(sk);
                return 0;
        }
 
@@ -1583,13 +1587,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        } else
                sock_rps_save_rxhash(sk, skb->rxhash);
 
-
-       TCP_CHECK_TIMER(sk);
        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
                rsk = sk;
                goto reset;
        }
-       TCP_CHECK_TIMER(sk);
        return 0;
 
 reset:
index de870377fbba9b5d1f38827fc65197424968ca37..656d431c99adcad51bf6a85d13d427729ba1ff5d 100644 (file)
@@ -313,7 +313,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
        lp->last_drop = tcp_time_stamp;
 }
 
-static struct tcp_congestion_ops tcp_lp = {
+static struct tcp_congestion_ops tcp_lp __read_mostly = {
        .flags = TCP_CONG_RTT_STAMP,
        .init = tcp_lp_init,
        .ssthresh = tcp_reno_ssthresh,
index a76513779e2be92fdefbfba84091685bcdf3eaf3..8ce55b8aaec8476e3b8292ddd7bf10c98ac402b2 100644 (file)
@@ -35,7 +35,7 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
 }
 
 
-static struct tcp_congestion_ops tcp_scalable = {
+static struct tcp_congestion_ops tcp_scalable __read_mostly = {
        .ssthresh       = tcp_scalable_ssthresh,
        .cong_avoid     = tcp_scalable_cong_avoid,
        .min_cwnd       = tcp_reno_min_cwnd,
index 74a6aa003657392d45d97f040ba6a987ee413004..ecd44b0c45f160eca2f6679437fc62c5c31ec8b9 100644 (file)
@@ -259,7 +259,6 @@ static void tcp_delack_timer(unsigned long data)
                tcp_send_ack(sk);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
-       TCP_CHECK_TIMER(sk);
 
 out:
        if (tcp_memory_pressure)
@@ -481,7 +480,6 @@ static void tcp_write_timer(unsigned long data)
                tcp_probe_timer(sk);
                break;
        }
-       TCP_CHECK_TIMER(sk);
 
 out:
        sk_mem_reclaim(sk);
@@ -589,7 +587,6 @@ static void tcp_keepalive_timer (unsigned long data)
                elapsed = keepalive_time_when(tp) - elapsed;
        }
 
-       TCP_CHECK_TIMER(sk);
        sk_mem_reclaim(sk);
 
 resched:
index c6743eec9b7d0c555db5e32fdf73c82d6fa7422e..80fa2bfd7edef91e309119d2b5bda953342e1e1b 100644 (file)
@@ -304,7 +304,7 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
 
-static struct tcp_congestion_ops tcp_vegas = {
+static struct tcp_congestion_ops tcp_vegas __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_vegas_init,
        .ssthresh       = tcp_reno_ssthresh,
index 38bc0b52d745b3a4fc9c7bc1eddcb3ff901a9a31..ac43cd747bcebdd83ce98fb26c3f7b1496ea3c69 100644 (file)
@@ -201,7 +201,7 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
                return max(tp->snd_cwnd >> 1U, 2U);
 }
 
-static struct tcp_congestion_ops tcp_veno = {
+static struct tcp_congestion_ops tcp_veno __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_veno_init,
        .ssthresh       = tcp_veno_ssthresh,
index a534dda5456e4bbf0a4dfca5b0731dc660985ec8..1b91bf48e277bcc982e8e34e4eca3d67db26f16f 100644 (file)
@@ -272,7 +272,7 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
 }
 
 
-static struct tcp_congestion_ops tcp_westwood = {
+static struct tcp_congestion_ops tcp_westwood __read_mostly = {
        .init           = tcp_westwood_init,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
index a0f2403588922654afcbbdf1777a40af99d5ad3a..dc7f43179c9a15fb858295fb0c6c960ac416959f 100644 (file)
@@ -225,7 +225,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
        return tp->snd_cwnd - reduction;
 }
 
-static struct tcp_congestion_ops tcp_yeah = {
+static struct tcp_congestion_ops tcp_yeah __read_mostly = {
        .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_yeah_init,
        .ssthresh       = tcp_yeah_ssthresh,
index 8157b17959eeb78ce317b3f0ce82668fde0a70fd..c9a73e5b26a39289169d3018139c94fd1d88648b 100644 (file)
@@ -663,75 +663,72 @@ void udp_flush_pending_frames(struct sock *sk)
 EXPORT_SYMBOL(udp_flush_pending_frames);
 
 /**
- *     udp4_hwcsum_outgoing  -  handle outgoing HW checksumming
- *     @sk:    socket we are sending on
+ *     udp4_hwcsum  -  handle outgoing HW checksumming
  *     @skb:   sk_buff containing the filled-in UDP header
  *             (checksum field must be zeroed out)
+ *     @src:   source IP address
+ *     @dst:   destination IP address
  */
-static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
-                                __be32 src, __be32 dst, int len)
+static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
 {
-       unsigned int offset;
        struct udphdr *uh = udp_hdr(skb);
+       struct sk_buff *frags = skb_shinfo(skb)->frag_list;
+       int offset = skb_transport_offset(skb);
+       int len = skb->len - offset;
+       int hlen = len;
        __wsum csum = 0;
 
-       if (skb_queue_len(&sk->sk_write_queue) == 1) {
+       if (!frags) {
                /*
                 * Only one fragment on the socket.
                 */
                skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct udphdr, check);
-               uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
+               uh->check = ~csum_tcpudp_magic(src, dst, len,
+                                              IPPROTO_UDP, 0);
        } else {
                /*
                 * HW-checksum won't work as there are two or more
                 * fragments on the socket so that all csums of sk_buffs
                 * should be together
                 */
-               offset = skb_transport_offset(skb);
-               skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+               do {
+                       csum = csum_add(csum, frags->csum);
+                       hlen -= frags->len;
+               } while ((frags = frags->next));
 
+               csum = skb_checksum(skb, offset, hlen, csum);
                skb->ip_summed = CHECKSUM_NONE;
 
-               skb_queue_walk(&sk->sk_write_queue, skb) {
-                       csum = csum_add(csum, skb->csum);
-               }
-
                uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
                if (uh->check == 0)
                        uh->check = CSUM_MANGLED_0;
        }
 }
 
-/*
- * Push out all pending data as one UDP datagram. Socket is locked.
- */
-static int udp_push_pending_frames(struct sock *sk)
+static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
 {
-       struct udp_sock  *up = udp_sk(sk);
+       struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(sk);
-       struct flowi *fl = &inet->cork.fl;
-       struct sk_buff *skb;
        struct udphdr *uh;
+       struct rtable *rt = (struct rtable *)skb_dst(skb);
        int err = 0;
        int is_udplite = IS_UDPLITE(sk);
+       int offset = skb_transport_offset(skb);
+       int len = skb->len - offset;
        __wsum csum = 0;
 
-       /* Grab the skbuff where UDP header space exists. */
-       if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
-               goto out;
-
        /*
         * Create a UDP header
         */
        uh = udp_hdr(skb);
-       uh->source = fl->fl_ip_sport;
-       uh->dest = fl->fl_ip_dport;
-       uh->len = htons(up->len);
+       uh->source = inet->inet_sport;
+       uh->dest = dport;
+       uh->len = htons(len);
        uh->check = 0;
 
        if (is_udplite)                                  /*     UDP-Lite      */
-               csum  = udplite_csum_outgoing(sk, skb);
+               csum = udplite_csum(skb);
 
        else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
 
@@ -740,20 +737,20 @@ static int udp_push_pending_frames(struct sock *sk)
 
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 
-               udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len);
+               udp4_hwcsum(skb, rt->rt_src, daddr);
                goto send;
 
-       } else                                           /*   `normal' UDP    */
-               csum = udp_csum_outgoing(sk, skb);
+       } else
+               csum = udp_csum(skb);
 
        /* add protocol-dependent pseudo-header */
-       uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
+       uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len,
                                      sk->sk_protocol, csum);
        if (uh->check == 0)
                uh->check = CSUM_MANGLED_0;
 
 send:
-       err = ip_push_pending_frames(sk);
+       err = ip_send_skb(skb);
        if (err) {
                if (err == -ENOBUFS && !inet->recverr) {
                        UDP_INC_STATS_USER(sock_net(sk),
@@ -763,6 +760,26 @@ send:
        } else
                UDP_INC_STATS_USER(sock_net(sk),
                                   UDP_MIB_OUTDATAGRAMS, is_udplite);
+       return err;
+}
+
+/*
+ * Push out all pending data as one UDP datagram. Socket is locked.
+ */
+static int udp_push_pending_frames(struct sock *sk)
+{
+       struct udp_sock  *up = udp_sk(sk);
+       struct inet_sock *inet = inet_sk(sk);
+       struct flowi *fl = &inet->cork.fl;
+       struct sk_buff *skb;
+       int err = 0;
+
+       skb = ip_finish_skb(sk);
+       if (!skb)
+               goto out;
+
+       err = udp_send_skb(skb, fl->fl4_dst, fl->fl_ip_dport);
+
 out:
        up->len = 0;
        up->pending = 0;
@@ -785,6 +802,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        int err, is_udplite = IS_UDPLITE(sk);
        int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+       struct sk_buff *skb;
 
        if (len > 0xFFFF)
                return -EMSGSIZE;
@@ -799,6 +817,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.opt = NULL;
        ipc.tx_flags = 0;
 
+       getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
+
        if (up->pending) {
                /*
                 * There are pending frames.
@@ -894,14 +914,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                    .fl4_src = saddr,
                                    .fl4_tos = tos,
                                    .proto = sk->sk_protocol,
-                                   .flags = inet_sk_flowi_flags(sk),
+                                   .flags = (inet_sk_flowi_flags(sk) |
+                                             FLOWI_FLAG_CAN_SLEEP),
                                    .fl_ip_sport = inet->inet_sport,
-                                   .fl_ip_dport = dport };
+                                   .fl_ip_dport = dport
+               };
                struct net *net = sock_net(sk);
 
                security_sk_classify_flow(sk, &fl);
-               err = ip_route_output_flow(net, &rt, &fl, sk, 1);
-               if (err) {
+               rt = ip_route_output_flow(net, &fl, sk);
+               if (IS_ERR(rt)) {
+                       err = PTR_ERR(rt);
+                       rt = NULL;
                        if (err == -ENETUNREACH)
                                IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
                        goto out;
@@ -923,6 +947,17 @@ back_from_confirm:
        if (!ipc.addr)
                daddr = ipc.addr = rt->rt_dst;
 
+       /* Lockless fast path for the non-corking case. */
+       if (!corkreq) {
+               skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen,
+                                 sizeof(struct udphdr), &ipc, &rt,
+                                 msg->msg_flags);
+               err = PTR_ERR(skb);
+               if (skb && !IS_ERR(skb))
+                       err = udp_send_skb(skb, daddr, dport);
+               goto out;
+       }
+
        lock_sock(sk);
        if (unlikely(up->pending)) {
                /* The socket is already corked while preparing it. */
@@ -944,7 +979,6 @@ back_from_confirm:
 
 do_append_data:
        up->len += ulen;
-       getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
                        sizeof(struct udphdr), &ipc, &rt,
                        corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
@@ -2199,7 +2233,7 @@ int udp4_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
index b057d40addec3ef2ffab44413421e502894fe60e..c70c42e7e77b8cb95a1be26deab0116f904f5fd9 100644 (file)
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
-                                         xfrm_address_t *saddr,
-                                         xfrm_address_t *daddr)
+                                         const xfrm_address_t *saddr,
+                                         const xfrm_address_t *daddr)
 {
        struct flowi fl = {
                .fl4_dst = daddr->a4,
                .fl4_tos = tos,
        };
-       struct dst_entry *dst;
        struct rtable *rt;
-       int err;
 
        if (saddr)
                fl.fl4_src = saddr->a4;
 
-       err = __ip_route_output_key(net, &rt, &fl);
-       dst = &rt->dst;
-       if (err)
-               dst = ERR_PTR(err);
-       return dst;
+       rt = __ip_route_output_key(net, &fl);
+       if (!IS_ERR(rt))
+               return &rt->dst;
+
+       return ERR_CAST(rt);
 }
 
 static int xfrm4_get_saddr(struct net *net,
@@ -56,7 +54,7 @@ static int xfrm4_get_saddr(struct net *net,
        return 0;
 }
 
-static int xfrm4_get_tos(struct flowi *fl)
+static int xfrm4_get_tos(const struct flowi *fl)
 {
        return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
 }
@@ -68,11 +66,16 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 }
 
 static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
-                         struct flowi *fl)
+                         const struct flowi *fl)
 {
        struct rtable *rt = (struct rtable *)xdst->route;
 
-       xdst->u.rt.fl = *fl;
+       rt->rt_key_dst = fl->fl4_dst;
+       rt->rt_key_src = fl->fl4_src;
+       rt->rt_tos = fl->fl4_tos;
+       rt->rt_iif = fl->iif;
+       rt->rt_oif = fl->oif;
+       rt->rt_mark = fl->mark;
 
        xdst->u.dst.dev = dev;
        dev_hold(dev);
@@ -196,8 +199,11 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
+       dst_destroy_metrics_generic(dst);
+
        if (likely(xdst->u.rt.peer))
                inet_putpeer(xdst->u.rt.peer);
+
        xfrm_dst_destroy(xdst);
 }
 
@@ -215,6 +221,7 @@ static struct dst_ops xfrm4_dst_ops = {
        .protocol =             cpu_to_be16(ETH_P_IP),
        .gc =                   xfrm4_garbage_collect,
        .update_pmtu =          xfrm4_update_pmtu,
+       .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              xfrm4_dst_destroy,
        .ifdown =               xfrm4_dst_ifdown,
        .local_out =            __ip_local_out,
@@ -230,6 +237,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
        .get_tos =              xfrm4_get_tos,
        .init_path =            xfrm4_init_path,
        .fill_dst =             xfrm4_fill_dst,
+       .blackhole_route =      ipv4_blackhole_route,
 };
 
 #ifdef CONFIG_SYSCTL
index 47947624eccc58fe7fcfcdbff19fb655a07ddedc..983eff248988e855798f24c9ca78119f24f96d8f 100644 (file)
@@ -21,7 +21,7 @@ static int xfrm4_init_flags(struct xfrm_state *x)
 }
 
 static void
-__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
 {
        sel->daddr.a4 = fl->fl4_dst;
        sel->saddr.a4 = fl->fl4_src;
@@ -37,8 +37,8 @@ __xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
 }
 
 static void
-xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
-                  xfrm_address_t *daddr, xfrm_address_t *saddr)
+xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+                  const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        x->id = tmpl->id;
        if (x->id.daddr.a4 == 0)
index fd6782e3a038a03b2a17f9da188ae1bc8d181f3f..3daaf3c7703c7354f13268aac9f8c267bb2d6ba1 100644 (file)
@@ -718,12 +718,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
        struct inet6_ifaddr *ifa, *ifn;
        struct inet6_dev *idev = ifp->idev;
        int state;
-       int hash;
        int deleted = 0, onlink = 0;
        unsigned long expires = jiffies;
 
-       hash = ipv6_addr_hash(&ifp->addr);
-
        spin_lock_bh(&ifp->state_lock);
        state = ifp->state;
        ifp->state = INET6_IFADDR_STATE_DEAD;
index 978e80e2c4a8a08e7731150d2a43a42fe9827e68..a88b2e9d25f1ab955935991b6d841dc59def9a28 100644 (file)
@@ -644,9 +644,8 @@ EXPORT_SYMBOL(inet6_unregister_protosw);
 
 int inet6_sk_rebuild_header(struct sock *sk)
 {
-       int err;
-       struct dst_entry *dst;
        struct ipv6_pinfo *np = inet6_sk(sk);
+       struct dst_entry *dst;
 
        dst = __sk_dst_check(sk, np->dst_cookie);
 
@@ -668,17 +667,11 @@ int inet6_sk_rebuild_header(struct sock *sk)
 
                final_p = fl6_update_dst(&fl, np->opt, &final);
 
-               err = ip6_dst_lookup(sk, &dst, &fl);
-               if (err) {
+               dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+               if (IS_ERR(dst)) {
                        sk->sk_route_caps = 0;
-                       return err;
-               }
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-               if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
-                       sk->sk_err_soft = -err;
-                       return err;
+                       sk->sk_err_soft = -PTR_ERR(dst);
+                       return PTR_ERR(dst);
                }
 
                __ip6_dst_store(sk, dst, NULL, NULL);
@@ -772,7 +765,7 @@ out:
        return err;
 }
 
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct ipv6hdr *ipv6h;
index 320bdb877eed2ff61da25c6c9a1ac2f6cc00baac..be3a781c00855563fc29c21d2645dc869ef6233b 100644 (file)
@@ -162,18 +162,11 @@ ipv4_connected:
        opt = flowlabel ? flowlabel->opt : np->opt;
        final_p = fl6_update_dst(&fl, opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+       err = 0;
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto out;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto out;
        }
 
        /* source address lookup done in ip6_dst_lookup */
index 03e62f94ff8efb35a5c865322d72e623b539eeef..55665956b3a821af682f0460d5e19e682567f9ae 100644 (file)
@@ -157,20 +157,20 @@ static int is_ineligible(struct sk_buff *skb)
 /*
  * Check the ICMP output rate limit
  */
-static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
-                                    struct flowi *fl)
+static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+                                     struct flowi *fl)
 {
        struct dst_entry *dst;
        struct net *net = sock_net(sk);
-       int res = 0;
+       bool res = false;
 
        /* Informational messages are not limited. */
        if (type & ICMPV6_INFOMSG_MASK)
-               return 1;
+               return true;
 
        /* Do not limit pmtu discovery, it would break it. */
        if (type == ICMPV6_PKT_TOOBIG)
-               return 1;
+               return true;
 
        /*
         * Look up the output route.
@@ -182,7 +182,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
                IP6_INC_STATS(net, ip6_dst_idev(dst),
                              IPSTATS_MIB_OUTNOROUTES);
        } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
-               res = 1;
+               res = true;
        } else {
                struct rt6_info *rt = (struct rt6_info *)dst;
                int tmo = net->ipv6.sysctl.icmpv6_time;
@@ -191,7 +191,9 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
                if (rt->rt6i_dst.plen < 128)
                        tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
 
-               res = xrlim_allow(dst, tmo);
+               if (!rt->rt6i_peer)
+                       rt6_bind_peer(rt, 1);
+               res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
        }
        dst_release(dst);
        return res;
@@ -298,6 +300,68 @@ static void mip6_addr_swap(struct sk_buff *skb)
 static inline void mip6_addr_swap(struct sk_buff *skb) {}
 #endif
 
+static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+                                            struct sock *sk, struct flowi *fl)
+{
+       struct dst_entry *dst, *dst2;
+       struct flowi fl2;
+       int err;
+
+       err = ip6_dst_lookup(sk, &dst, fl);
+       if (err)
+               return ERR_PTR(err);
+
+       /*
+        * We won't send icmp if the destination is known
+        * anycast.
+        */
+       if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
+               LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
+               dst_release(dst);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* No need to clone since we're just using its address. */
+       dst2 = dst;
+
+       dst = xfrm_lookup(net, dst, fl, sk, 0);
+       if (!IS_ERR(dst)) {
+               if (dst != dst2)
+                       return dst;
+       } else {
+               if (PTR_ERR(dst) == -EPERM)
+                       dst = NULL;
+               else
+                       return dst;
+       }
+
+       err = xfrm_decode_session_reverse(skb, &fl2, AF_INET6);
+       if (err)
+               goto relookup_failed;
+
+       err = ip6_dst_lookup(sk, &dst2, &fl2);
+       if (err)
+               goto relookup_failed;
+
+       dst2 = xfrm_lookup(net, dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
+       if (!IS_ERR(dst2)) {
+               dst_release(dst);
+               dst = dst2;
+       } else {
+               err = PTR_ERR(dst2);
+               if (err == -EPERM) {
+                       dst_release(dst);
+                       return dst2;
+               } else
+                       goto relookup_failed;
+       }
+
+relookup_failed:
+       if (dst)
+               return dst;
+       return ERR_PTR(err);
+}
+
 /*
  *     Send an ICMP message in response to a packet in error
  */
@@ -310,10 +374,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        struct ipv6_pinfo *np;
        struct in6_addr *saddr = NULL;
        struct dst_entry *dst;
-       struct dst_entry *dst2;
        struct icmp6hdr tmp_hdr;
        struct flowi fl;
-       struct flowi fl2;
        struct icmpv6_msg msg;
        int iif = 0;
        int addr_type = 0;
@@ -406,57 +468,10 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
                fl.oif = np->mcast_oif;
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
-               goto out;
-
-       /*
-        * We won't send icmp if the destination is known
-        * anycast.
-        */
-       if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
-               LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
-               goto out_dst_release;
-       }
-
-       /* No need to clone since we're just using its address. */
-       dst2 = dst;
-
-       err = xfrm_lookup(net, &dst, &fl, sk, 0);
-       switch (err) {
-       case 0:
-               if (dst != dst2)
-                       goto route_done;
-               break;
-       case -EPERM:
-               dst = NULL;
-               break;
-       default:
+       dst = icmpv6_route_lookup(net, skb, sk, &fl);
+       if (IS_ERR(dst))
                goto out;
-       }
-
-       if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
-               goto relookup_failed;
-
-       if (ip6_dst_lookup(sk, &dst2, &fl2))
-               goto relookup_failed;
-
-       err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
-       switch (err) {
-       case 0:
-               dst_release(dst);
-               dst = dst2;
-               break;
-       case -EPERM:
-               goto out_dst_release;
-       default:
-relookup_failed:
-               if (!dst)
-                       goto out;
-               break;
-       }
 
-route_done:
        if (ipv6_addr_is_multicast(&fl.fl6_dst))
                hlimit = np->mcast_hops;
        else
@@ -543,7 +558,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        err = ip6_dst_lookup(sk, &dst, &fl);
        if (err)
                goto out;
-       if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0)
+       dst = xfrm_lookup(net, dst, &fl, sk, 0);
+       if (IS_ERR(dst))
                goto out;
 
        if (ipv6_addr_is_multicast(&fl.fl6_dst))
index d144e629d2b43d1091800df484ca98ff590cc71d..d687e139733392945b33b74b9ab4ce66e7d3148a 100644 (file)
@@ -74,13 +74,8 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
        fl.fl_ip_sport = inet_rsk(req)->loc_port;
        security_req_classify_flow(req, &fl);
 
-       if (ip6_dst_lookup(sk, &dst, &fl))
-               return NULL;
-
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+       if (IS_ERR(dst))
                return NULL;
 
        return dst;
@@ -234,21 +229,13 @@ int inet6_csk_xmit(struct sk_buff *skb)
        dst = __inet6_csk_dst_check(sk, np->dst_cookie);
 
        if (dst == NULL) {
-               int err = ip6_dst_lookup(sk, &dst, &fl);
-
-               if (err) {
-                       sk->sk_err_soft = -err;
-                       kfree_skb(skb);
-                       return err;
-               }
-
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
+               dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
 
-               if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
+               if (IS_ERR(dst)) {
+                       sk->sk_err_soft = -PTR_ERR(dst);
                        sk->sk_route_caps = 0;
                        kfree_skb(skb);
-                       return err;
+                       return PTR_ERR(dst);
                }
 
                __inet6_csk_dst_store(sk, dst, NULL, NULL);
index 5f8d242be3f3016592678d09e9cf4654be9234a7..adaffaf84555d21ddb6a4dd6f5ed2ef156b9d25f 100644 (file)
@@ -274,13 +274,10 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct ipv6hdr *hdr;
-       int totlen;
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
 
-       totlen = len + sizeof(struct ipv6hdr);
-
        skb_reset_network_header(skb);
        skb_put(skb, sizeof(struct ipv6hdr));
        hdr = ipv6_hdr(skb);
@@ -479,10 +476,13 @@ int ip6_forward(struct sk_buff *skb)
                else
                        target = &hdr->daddr;
 
+               if (!rt->rt6i_peer)
+                       rt6_bind_peer(rt, 1);
+
                /* Limit redirects both by destination (here)
                   and by source (inside ndisc_send_redirect)
                 */
-               if (xrlim_allow(dst, 1*HZ))
+               if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                        ndisc_send_redirect(skb, n, target);
        } else {
                int addrtype = ipv6_addr_type(&hdr->saddr);
@@ -1002,29 +1002,71 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
 /**
- *     ip6_sk_dst_lookup - perform socket cached route lookup on flow
+ *     ip6_dst_lookup_flow - perform route lookup on flow with ipsec
+ *     @sk: socket which provides route info
+ *     @fl: flow to lookup
+ *     @final_dst: final destination address for ipsec lookup
+ *     @can_sleep: we are in a sleepable context
+ *
+ *     This function performs a route lookup on the given flow.
+ *
+ *     It returns a valid dst pointer on success, or a pointer encoded
+ *     error code.
+ */
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi *fl,
+                                     const struct in6_addr *final_dst,
+                                     bool can_sleep)
+{
+       struct dst_entry *dst = NULL;
+       int err;
+
+       err = ip6_dst_lookup_tail(sk, &dst, fl);
+       if (err)
+               return ERR_PTR(err);
+       if (final_dst)
+               ipv6_addr_copy(&fl->fl6_dst, final_dst);
+       if (can_sleep)
+               fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+
+       return xfrm_lookup(sock_net(sk), dst, fl, sk, 0);
+}
+EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+
+/**
+ *     ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
  *     @sk: socket which provides the dst cache and route info
- *     @dst: pointer to dst_entry * for result
  *     @fl: flow to lookup
+ *     @final_dst: final destination address for ipsec lookup
+ *     @can_sleep: we are in a sleepable context
  *
  *     This function performs a route lookup on the given flow with the
  *     possibility of using the cached route in the socket if it is valid.
  *     It will take the socket dst lock when operating on the dst cache.
  *     As a result, this function can only be used in process context.
  *
- *     It returns zero on success, or a standard errno code on error.
+ *     It returns a valid dst pointer on success, or a pointer encoded
+ *     error code.
  */
-int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi *fl,
+                                        const struct in6_addr *final_dst,
+                                        bool can_sleep)
 {
-       *dst = NULL;
-       if (sk) {
-               *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
-               *dst = ip6_sk_dst_check(sk, *dst, fl);
-       }
+       struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
+       int err;
 
-       return ip6_dst_lookup_tail(sk, dst, fl);
+       dst = ip6_sk_dst_check(sk, dst, fl);
+
+       err = ip6_dst_lookup_tail(sk, &dst, fl);
+       if (err)
+               return ERR_PTR(err);
+       if (final_dst)
+               ipv6_addr_copy(&fl->fl6_dst, final_dst);
+       if (can_sleep)
+               fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+
+       return xfrm_lookup(sock_net(sk), dst, fl, sk, 0);
 }
-EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
+EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
 static inline int ip6_ufo_append_data(struct sock *sk,
                        int getfrag(void *from, char *to, int offset, int len,
@@ -1061,7 +1103,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
-               sk->sk_sndmsg_off = 0;
        }
 
        err = skb_append_datato_frags(sk,skb, getfrag, from,
@@ -1118,6 +1159,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        int err;
        int offset = 0;
        int csummode = CHECKSUM_NONE;
+       __u8 tx_flags = 0;
 
        if (flags&MSG_PROBE)
                return 0;
@@ -1202,6 +1244,13 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                }
        }
 
+       /* For UDP, check if TX timestamp is enabled */
+       if (sk->sk_type == SOCK_DGRAM) {
+               err = sock_tx_timestamp(sk, &tx_flags);
+               if (err)
+                       goto error;
+       }
+
        /*
         * Let's try using as much space as possible.
         * Use MTU if total length of the message fits into the MTU.
@@ -1306,6 +1355,12 @@ alloc_new_skb:
                                                           sk->sk_allocation);
                                if (unlikely(skb == NULL))
                                        err = -ENOBUFS;
+                               else {
+                                       /* Only the initial fragment
+                                        * is time stamped.
+                                        */
+                                       tx_flags = 0;
+                               }
                        }
                        if (skb == NULL)
                                goto error;
@@ -1317,6 +1372,9 @@ alloc_new_skb:
                        /* reserve for fragmentation */
                        skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
 
+                       if (sk->sk_type == SOCK_DGRAM)
+                               skb_shinfo(skb)->tx_flags = tx_flags;
+
                        /*
                         *      Find where to start putting bytes
                         */
index e528a42a52be2114e2e2df72519802faa4ce0aab..ea8d5e8128a9d3f7675b560654239af0541ccb16 100644 (file)
@@ -582,7 +582,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        fl.fl4_dst = eiph->saddr;
        fl.fl4_tos = RT_TOS(eiph->tos);
        fl.proto = IPPROTO_IPIP;
-       if (ip_route_output_key(dev_net(skb->dev), &rt, &fl))
+       rt = ip_route_output_key(dev_net(skb->dev), &fl);
+       if (IS_ERR(rt))
                goto out;
 
        skb2->dev = rt->dst.dev;
@@ -594,12 +595,14 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                fl.fl4_dst = eiph->daddr;
                fl.fl4_src = eiph->saddr;
                fl.fl4_tos = eiph->tos;
-               if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
+               rt = ip_route_output_key(dev_net(skb->dev), &fl);
+               if (IS_ERR(rt) ||
                    rt->dst.dev->type != ARPHRD_TUNNEL) {
-                       ip_rt_put(rt);
+                       if (!IS_ERR(rt))
+                               ip_rt_put(rt);
                        goto out;
                }
-               skb_dst_set(skb2, (struct dst_entry *)rt);
+               skb_dst_set(skb2, &rt->dst);
        } else {
                ip_rt_put(rt);
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
@@ -904,8 +907,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        else {
                dst = ip6_route_output(net, NULL, fl);
 
-               if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0)
+               if (dst->error)
                        goto tx_err_link_failure;
+               dst = xfrm_lookup(net, dst, fl, NULL, 0);
+               if (IS_ERR(dst)) {
+                       err = PTR_ERR(dst);
+                       dst = NULL;
+                       goto tx_err_link_failure;
+               }
        }
 
        tdev = dst->dev;
index 0e1d53bcf1e03626061aa75011dd7eccc6a89d6e..618f67ccda315ee5af0e61e6ec1ae4af34898233 100644 (file)
@@ -1039,7 +1039,6 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
 
        while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
                if (ipv6_hdr(skb)->version == 0) {
-                       int err;
                        struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
 
                        if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
@@ -1050,7 +1049,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
                                skb_trim(skb, nlh->nlmsg_len);
                                ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
                        }
-                       err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
                } else
                        ip6_mr_forward(net, mrt, skb, c);
        }
index 49f986d626a09370fe8f442326195f8f2dcc776f..f2c9b6930ffc5d11b22222fc921555b409c0250c 100644 (file)
@@ -319,7 +319,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
 {
        struct in6_addr *source, *group;
        struct ipv6_mc_socklist *pmc;
-       struct net_device *dev;
        struct inet6_dev *idev;
        struct ipv6_pinfo *inet6 = inet6_sk(sk);
        struct ip6_sf_socklist *psl;
@@ -341,7 +340,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
                rcu_read_unlock();
                return -ENODEV;
        }
-       dev = idev->dev;
 
        err = -EADDRNOTAVAIL;
 
@@ -455,7 +453,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
 {
        struct in6_addr *group;
        struct ipv6_mc_socklist *pmc;
-       struct net_device *dev;
        struct inet6_dev *idev;
        struct ipv6_pinfo *inet6 = inet6_sk(sk);
        struct ip6_sf_socklist *newpsl, *psl;
@@ -478,7 +475,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
                rcu_read_unlock();
                return -ENODEV;
        }
-       dev = idev->dev;
 
        err = 0;
 
@@ -549,7 +545,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
        struct in6_addr *group;
        struct ipv6_mc_socklist *pmc;
        struct inet6_dev *idev;
-       struct net_device *dev;
        struct ipv6_pinfo *inet6 = inet6_sk(sk);
        struct ip6_sf_socklist *psl;
        struct net *net = sock_net(sk);
@@ -566,7 +561,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
                rcu_read_unlock();
                return -ENODEV;
        }
-       dev = idev->dev;
 
        err = -EADDRNOTAVAIL;
        /*
@@ -1429,7 +1423,12 @@ static void mld_sendpack(struct sk_buff *skb)
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
+       dst = xfrm_lookup(net, dst, &fl, NULL, 0);
+       err = 0;
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
+       }
        skb_dst_set(skb, dst);
        if (err)
                goto err_out;
@@ -1796,9 +1795,11 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
-       if (err)
+       dst = xfrm_lookup(net, dst, &fl, NULL, 0);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto err_out;
+       }
 
        skb_dst_set(skb, dst);
        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
index d6e9599d0705a5b79749a0566cbd97dfaa9993c0..f3e3ca938a546179dcabab931e6b928752b0356e 100644 (file)
@@ -203,7 +203,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
        return allow;
 }
 
-static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct flowi *fl)
+static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
+                              const struct flowi *fl)
 {
        struct net *net = xs_net(x);
        struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
index 2342545a5ee9bfe125ff3030bac07cafcb24a00b..9360d3be94f006a09b33a53a1bc14e466db4a50a 100644 (file)
@@ -529,8 +529,8 @@ void ndisc_send_skb(struct sk_buff *skb,
                return;
        }
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
-       if (err < 0) {
+       dst = xfrm_lookup(net, dst, &fl, NULL, 0);
+       if (IS_ERR(dst)) {
                kfree_skb(skb);
                return;
        }
@@ -1542,8 +1542,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        if (dst == NULL)
                return;
 
-       err = xfrm_lookup(net, &dst, &fl, NULL, 0);
-       if (err)
+       dst = xfrm_lookup(net, dst, &fl, NULL, 0);
+       if (IS_ERR(dst))
                return;
 
        rt = (struct rt6_info *) dst;
@@ -1553,7 +1553,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
                           "ICMPv6 Redirect: destination is not a neighbour.\n");
                goto release;
        }
-       if (!xrlim_allow(dst, 1*HZ))
+       if (!rt->rt6i_peer)
+               rt6_bind_peer(rt, 1);
+       if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                goto release;
 
        if (dev->addr_len) {
index 35915e8617f08ccf274855378bce1d4705aea6b0..8d74116ae27de1c946a94931c536a959b48efcbc 100644 (file)
@@ -39,7 +39,8 @@ int ip6_route_me_harder(struct sk_buff *skb)
        if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
            xfrm_decode_session(skb, &fl, AF_INET6) == 0) {
                skb_dst_set(skb, NULL);
-               if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
+               dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
+               if (IS_ERR(dst))
                        return -1;
                skb_dst_set(skb, dst);
        }
index 7d227c644f7275266624c44aaa305b6c35165b2e..47b7b8df7fac8578f2c226a7989c150464353414 100644 (file)
@@ -1076,6 +1076,7 @@ static int compat_table_info(const struct xt_table_info *info,
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
        loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       xt_compat_init_offsets(AF_INET6, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
                if (ret != 0)
@@ -1679,6 +1680,7 @@ translate_compat_table(struct net *net,
        duprintf("translate_compat_table: size %u\n", info->size);
        j = 0;
        xt_compat_lock(AF_INET6);
+       xt_compat_init_offsets(AF_INET6, number);
        /* Walk through entries, checking offsets. */
        xt_entry_foreach(iter0, entry0, total_size) {
                ret = check_compat_entry_size_and_hooks(iter0, info, &size,
index de338037a7362cf55255b35f8653d21a219b6915..e6af8d72f26be3cfb509b45bffb9198a151ae395 100644 (file)
@@ -452,8 +452,7 @@ ip6t_log_packet(u_int8_t pf,
               in ? in->name : "",
               out ? out->name : "");
 
-       /* MAC logging for input path only. */
-       if (in && !out)
+       if (in != NULL)
                dump_mac_header(m, loginfo, skb);
 
        dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
index bf998feac14e04251542c430f3888ab3d05a2fb3..91f6a61cefab102285cf7f98305f8a783c0f4839 100644 (file)
@@ -101,7 +101,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
                dst_release(dst);
                return;
        }
-       if (xfrm_lookup(net, &dst, &fl, NULL, 0))
+       dst = xfrm_lookup(net, dst, &fl, NULL, 0);
+       if (IS_ERR(dst))
                return;
 
        hh_len = (dst->dev->hard_header_len + 15)&~15;
index 79d43aa8fa8da80b405689bf7c751d7a9cc89ad4..085727263812e7f589ccd50f2c6510e3c33169c0 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
 
 struct nf_ct_frag6_skb_cb
@@ -73,7 +74,7 @@ static struct inet_frags nf_frags;
 static struct netns_frags nf_init_frags;
 
 #ifdef CONFIG_SYSCTL
-struct ctl_table nf_ct_frag6_sysctl_table[] = {
+static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        {
                .procname       = "nf_conntrack_frag6_timeout",
                .data           = &nf_init_frags.timeout,
index c5b0915d106bfe788cdf6d691e1c6c371831c86c..dc29b07caf42a5ea09d59f40db53480f965cd463 100644 (file)
@@ -124,18 +124,18 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
 }
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
+typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
 
-int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
-                                          struct sk_buff *skb))
+static mh_filter_t __rcu *mh_filter __read_mostly;
+
+int rawv6_mh_filter_register(mh_filter_t filter)
 {
        rcu_assign_pointer(mh_filter, filter);
        return 0;
 }
 EXPORT_SYMBOL(rawv6_mh_filter_register);
 
-int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
-                                            struct sk_buff *skb))
+int rawv6_mh_filter_unregister(mh_filter_t filter)
 {
        rcu_assign_pointer(mh_filter, NULL);
        synchronize_rcu();
@@ -193,10 +193,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
                         * policy is placed in rawv6_rcv() because it is
                         * required for each socket.
                         */
-                       int (*filter)(struct sock *sock, struct sk_buff *skb);
+                       mh_filter_t *filter;
 
                        filter = rcu_dereference(mh_filter);
-                       filtered = filter ? filter(sk, skb) : 0;
+                       filtered = filter ? (*filter)(sk, skb) : 0;
                        break;
                }
 #endif
@@ -856,20 +856,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                fl.oif = np->mcast_oif;
        security_sk_classify_flow(sk, &fl);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto out;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto out;
        }
-
        if (hlimit < 0) {
                if (ipv6_addr_is_multicast(&fl.fl6_dst))
                        hlimit = np->mcast_hops;
index e7db7014e89f949dc5683b104d3ccf6afb797d5f..001276055a6b7bcbb52aa545f0c0b767c9f29787 100644 (file)
@@ -97,6 +97,36 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                                           struct in6_addr *gwaddr, int ifindex);
 #endif
 
+static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+       struct rt6_info *rt = (struct rt6_info *) dst;
+       struct inet_peer *peer;
+       u32 *p = NULL;
+
+       if (!rt->rt6i_peer)
+               rt6_bind_peer(rt, 1);
+
+       peer = rt->rt6i_peer;
+       if (peer) {
+               u32 *old_p = __DST_METRICS_PTR(old);
+               unsigned long prev, new;
+
+               p = peer->metrics;
+               if (inet_metrics_new(peer))
+                       memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+               new = (unsigned long) p;
+               prev = cmpxchg(&dst->_metrics, old, new);
+
+               if (prev != old) {
+                       p = __DST_METRICS_PTR(prev);
+                       if (prev & DST_METRICS_READ_ONLY)
+                               p = NULL;
+               }
+       }
+       return p;
+}
+
 static struct dst_ops ip6_dst_ops_template = {
        .family                 =       AF_INET6,
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
@@ -105,6 +135,7 @@ static struct dst_ops ip6_dst_ops_template = {
        .check                  =       ip6_dst_check,
        .default_advmss         =       ip6_default_advmss,
        .default_mtu            =       ip6_default_mtu,
+       .cow_metrics            =       ipv6_cow_metrics,
        .destroy                =       ip6_dst_destroy,
        .ifdown                 =       ip6_dst_ifdown,
        .negative_advice        =       ip6_negative_advice,
@@ -132,6 +163,10 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
 };
 
+static const u32 ip6_template_metrics[RTAX_MAX] = {
+       [RTAX_HOPLIMIT - 1] = 255,
+};
+
 static struct rt6_info ip6_null_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
@@ -187,7 +222,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
 /* allocate dst with ip6_dst_ops */
 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
 {
-       return (struct rt6_info *)dst_alloc(ops);
+       return (struct rt6_info *)dst_alloc(ops, 0);
 }
 
 static void ip6_dst_destroy(struct dst_entry *dst)
@@ -206,6 +241,13 @@ static void ip6_dst_destroy(struct dst_entry *dst)
        }
 }
 
+static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt6_peer_genid(void)
+{
+       return atomic_read(&__rt6_peer_genid);
+}
+
 void rt6_bind_peer(struct rt6_info *rt, int create)
 {
        struct inet_peer *peer;
@@ -213,6 +255,8 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
        peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
        if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
                inet_putpeer(peer);
+       else
+               rt->rt6i_peer_genid = rt6_peer_genid();
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -828,17 +872,15 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
 
 EXPORT_SYMBOL(ip6_route_output);
 
-int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
+struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
-       struct rt6_info *ort = (struct rt6_info *) *dstp;
-       struct rt6_info *rt = (struct rt6_info *)
-               dst_alloc(&ip6_dst_blackhole_ops);
+       struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1);
+       struct rt6_info *ort = (struct rt6_info *) dst_orig;
        struct dst_entry *new = NULL;
 
        if (rt) {
                new = &rt->dst;
 
-               atomic_set(&new->__refcnt, 1);
                new->__use = 1;
                new->input = dst_discard;
                new->output = dst_discard;
@@ -864,11 +906,9 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
                dst_free(new);
        }
 
-       dst_release(*dstp);
-       *dstp = new;
-       return new ? 0 : -ENOMEM;
+       dst_release(dst_orig);
+       return new ? new : ERR_PTR(-ENOMEM);
 }
-EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
 
 /*
  *     Destination cache support functions
@@ -880,9 +920,14 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 
        rt = (struct rt6_info *) dst;
 
-       if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
+       if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
+               if (rt->rt6i_peer_genid != rt6_peer_genid()) {
+                       if (!rt->rt6i_peer)
+                               rt6_bind_peer(rt, 0);
+                       rt->rt6i_peer_genid = rt6_peer_genid();
+               }
                return dst;
-
+       }
        return NULL;
 }
 
@@ -933,7 +978,6 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
                        dst_metric_set(dst, RTAX_FEATURES, features);
                }
                dst_metric_set(dst, RTAX_MTU, mtu);
-               call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
        }
 }
 
@@ -1030,11 +1074,9 @@ out:
 
 int icmp6_dst_gc(void)
 {
-       struct dst_entry *dst, *next, **pprev;
+       struct dst_entry *dst, **pprev;
        int more = 0;
 
-       next = NULL;
-
        spin_lock_bh(&icmp6_dst_lock);
        pprev = &icmp6_dst_gc_list;
 
@@ -1982,12 +2024,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        if (IS_ERR(neigh)) {
                dst_free(&rt->dst);
 
-               /* We are casting this because that is the return
-                * value type.  But an errno encoded pointer is the
-                * same regardless of the underlying pointer type,
-                * and that's what we are returning.  So this is OK.
-                */
-               return (struct rt6_info *) neigh;
+               return ERR_CAST(neigh);
        }
        rt->rt6i_nexthop = neigh;
 
@@ -2689,7 +2726,8 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_null_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_null_entry;
        net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-       dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
+       dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
+                        ip6_template_metrics, true);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2700,7 +2738,8 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_prohibit_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
        net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-       dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
+       dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
+                        ip6_template_metrics, true);
 
        net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
                                               sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2710,7 +2749,8 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.ip6_blk_hole_entry->dst.path =
                (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
        net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-       dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
+       dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+                        ip6_template_metrics, true);
 #endif
 
        net->ipv6.sysctl.flush_delay = 0;
index d2c16e10f650807ec32cc0502efa32cea8bc93b6..3534ceaa4fbac443032781224414ebb61a970116 100644 (file)
@@ -412,7 +412,7 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
 
        p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
        do {
-               n = p->next;
+               n = rcu_dereference_protected(p->next, 1);
                kfree(p);
                p = n;
        } while (p);
@@ -421,15 +421,17 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
 static int
 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
 {
-       struct ip_tunnel_prl_entry *x, **p;
+       struct ip_tunnel_prl_entry *x;
+       struct ip_tunnel_prl_entry __rcu **p;
        int err = 0;
 
        ASSERT_RTNL();
 
        if (a && a->addr != htonl(INADDR_ANY)) {
-               for (p = &t->prl; *p; p = &(*p)->next) {
-                       if ((*p)->addr == a->addr) {
-                               x = *p;
+               for (p = &t->prl;
+                    (x = rtnl_dereference(*p)) != NULL;
+                    p = &x->next) {
+                       if (x->addr == a->addr) {
                                *p = x->next;
                                call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
                                t->prl_count--;
@@ -438,9 +440,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
                }
                err = -ENXIO;
        } else {
-               if (t->prl) {
+               x = rtnl_dereference(t->prl);
+               if (x) {
                        t->prl_count = 0;
-                       x = t->prl;
                        call_rcu(&x->rcu_head, prl_list_destroy_rcu);
                        t->prl = NULL;
                }
@@ -736,7 +738,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                                    .fl4_tos = RT_TOS(tos),
                                    .oif = tunnel->parms.link,
                                    .proto = IPPROTO_IPV6 };
-               if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               rt = ip_route_output_key(dev_net(dev), &fl);
+               if (IS_ERR(rt)) {
                        dev->stats.tx_carrier_errors++;
                        goto tx_error_icmp;
                }
@@ -860,8 +863,9 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
                                    .fl4_tos = RT_TOS(iph->tos),
                                    .oif = tunnel->parms.link,
                                    .proto = IPPROTO_IPV6 };
-               struct rtable *rt;
-               if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
+               struct rtable *rt = ip_route_output_key(dev_net(dev), &fl);
+
+               if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
                        ip_rt_put(rt);
                }
@@ -1179,7 +1183,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
        dev_hold(dev);
-       sitn->tunnels_wc[0]     = tunnel;
+       rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
        return 0;
 }
 
@@ -1196,11 +1200,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
        for (prio = 1; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
-                       struct ip_tunnel *t = sitn->tunnels[prio][h];
+                       struct ip_tunnel *t;
 
+                       t = rtnl_dereference(sitn->tunnels[prio][h]);
                        while (t != NULL) {
                                unregister_netdevice_queue(t->dev, head);
-                               t = t->next;
+                               t = rtnl_dereference(t->next);
                        }
                }
        }
index 09fd34f0dbf2e18efbc7c1d3b3681eee0ded6f3e..0b4cf350631beabcd42db20ce0e02cd544f2fdeb 100644 (file)
@@ -243,12 +243,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                fl.fl_ip_dport = inet_rsk(req)->rmt_port;
                fl.fl_ip_sport = inet_sk(sk)->inet_sport;
                security_req_classify_flow(req, &fl);
-               if (ip6_dst_lookup(sk, &dst, &fl))
-                       goto out_free;
 
-               if (final_p)
-                       ipv6_addr_copy(&fl.fl6_dst, final_p);
-               if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+               dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+               if (IS_ERR(dst))
                        goto out_free;
        }
 
index 20aa95e373599bc6d98e8f451ffeb7393c9a695c..e59a31c48baf821d48c561e1a351fff00e8df3ff 100644 (file)
@@ -255,18 +255,10 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        security_sk_classify_flow(sk, &fl);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto failure;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto failure;
        }
 
        if (saddr == NULL) {
@@ -385,7 +377,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        np = inet6_sk(sk);
 
        if (type == ICMPV6_PKT_TOOBIG) {
-               struct dst_entry *dst = NULL;
+               struct dst_entry *dst;
 
                if (sock_owned_by_user(sk))
                        goto out;
@@ -413,13 +405,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                        fl.fl_ip_sport = inet->inet_sport;
                        security_skb_classify_flow(skb, &fl);
 
-                       if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
-                               sk->sk_err_soft = -err;
-                               goto out;
-                       }
-
-                       if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
-                               sk->sk_err_soft = -err;
+                       dst = ip6_dst_lookup_flow(sk, &fl, NULL, false);
+                       if (IS_ERR(dst)) {
+                               sk->sk_err_soft = -PTR_ERR(dst);
                                goto out;
                        }
 
@@ -496,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
        struct in6_addr * final_p, final;
        struct flowi fl;
        struct dst_entry *dst;
-       int err = -1;
+       int err;
 
        memset(&fl, 0, sizeof(fl));
        fl.proto = IPPROTO_TCP;
@@ -512,15 +500,13 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
        opt = np->opt;
        final_p = fl6_update_dst(&fl, opt, &final);
 
-       err = ip6_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_dst_lookup_flow(sk, &fl, final_p, false);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
                goto done;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-       if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
-               goto done;
-
+       }
        skb = tcp_make_synack(sk, dst, req, rvp);
+       err = -ENOMEM;
        if (skb) {
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
@@ -1079,15 +1065,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
         * Underlying function will use this to retrieve the network
         * namespace
         */
-       if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
-               if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
-                       skb_dst_set(buff, dst);
-                       ip6_xmit(ctl_sk, buff, &fl, NULL);
-                       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
-                       if (rst)
-                               TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
-                       return;
-               }
+       dst = ip6_dst_lookup_flow(ctl_sk, &fl, NULL, false);
+       if (!IS_ERR(dst)) {
+               skb_dst_set(buff, dst);
+               ip6_xmit(ctl_sk, buff, &fl, NULL);
+               TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+               if (rst)
+                       TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+               return;
        }
 
        kfree_skb(buff);
@@ -1323,7 +1308,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                    tcp_death_row.sysctl_tw_recycle &&
                    (dst = inet6_csk_route_req(sk, req)) != NULL &&
                    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
-                   ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+                   ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
                                    &treq->rmt_addr)) {
                        inet_peer_refcheck(peer);
                        if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1636,10 +1621,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                opt_skb = skb_clone(skb, GFP_ATOMIC);
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-               TCP_CHECK_TIMER(sk);
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
                        goto reset;
-               TCP_CHECK_TIMER(sk);
                if (opt_skb)
                        goto ipv6_pktoptions;
                return 0;
@@ -1667,10 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       TCP_CHECK_TIMER(sk);
        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
                goto reset;
-       TCP_CHECK_TIMER(sk);
        if (opt_skb)
                goto ipv6_pktoptions;
        return 0;
index 9a009c66c8a3dd549262aeeb392bf68ec60c5fd4..d86d7f67a597fb2e228c0ca8c349cb3e3cd32e23 100644 (file)
@@ -1125,18 +1125,11 @@ do_udp_sendmsg:
 
        security_sk_classify_flow(sk, &fl);
 
-       err = ip6_sk_dst_lookup(sk, &dst, &fl);
-       if (err)
+       dst = ip6_sk_dst_lookup_flow(sk, &fl, final_p, true);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
                goto out;
-       if (final_p)
-               ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-       err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
-       if (err < 0) {
-               if (err == -EREMOTE)
-                       err = ip6_dst_blackhole(sk, &dst, &fl);
-               if (err < 0)
-                       goto out;
        }
 
        if (hlimit < 0) {
@@ -1299,7 +1292,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
index da87428681cccc9d955fdc80ee6f8bd4998356dd..48ce496802fdc83cc7f20712bee044ed4cf8d394 100644 (file)
@@ -27,8 +27,8 @@
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
 
 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
-                                         xfrm_address_t *saddr,
-                                         xfrm_address_t *daddr)
+                                         const xfrm_address_t *saddr,
+                                         const xfrm_address_t *daddr)
 {
        struct flowi fl = {};
        struct dst_entry *dst;
@@ -67,7 +67,7 @@ static int xfrm6_get_saddr(struct net *net,
        return 0;
 }
 
-static int xfrm6_get_tos(struct flowi *fl)
+static int xfrm6_get_tos(const struct flowi *fl)
 {
        return 0;
 }
@@ -87,7 +87,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 }
 
 static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
-                         struct flowi *fl)
+                         const struct flowi *fl)
 {
        struct rt6_info *rt = (struct rt6_info*)xdst->route;
 
@@ -220,6 +220,7 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
 
        if (likely(xdst->u.rt6.rt6i_idev))
                in6_dev_put(xdst->u.rt6.rt6i_idev);
+       dst_destroy_metrics_generic(dst);
        if (likely(xdst->u.rt6.rt6i_peer))
                inet_putpeer(xdst->u.rt6.rt6i_peer);
        xfrm_dst_destroy(xdst);
@@ -257,6 +258,7 @@ static struct dst_ops xfrm6_dst_ops = {
        .protocol =             cpu_to_be16(ETH_P_IPV6),
        .gc =                   xfrm6_garbage_collect,
        .update_pmtu =          xfrm6_update_pmtu,
+       .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              xfrm6_dst_destroy,
        .ifdown =               xfrm6_dst_ifdown,
        .local_out =            __ip6_local_out,
@@ -272,6 +274,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
        .get_tos =              xfrm6_get_tos,
        .init_path =            xfrm6_init_path,
        .fill_dst =             xfrm6_fill_dst,
+       .blackhole_route =      ip6_blackhole_route,
 };
 
 static int __init xfrm6_policy_init(void)
index a67575d472a320f306002a88959f0d0f9a73af09..a02598e0079a6ea72315d35951dae879c0807a84 100644 (file)
@@ -20,7 +20,7 @@
 #include <net/addrconf.h>
 
 static void
-__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
 {
        /* Initialize temporary selector matching only
         * to current session. */
@@ -38,8 +38,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
 }
 
 static void
-xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
-                  xfrm_address_t *daddr, xfrm_address_t *saddr)
+xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+                  const xfrm_address_t *daddr, const xfrm_address_t *saddr)
 {
        x->id = tmpl->id;
        if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
index d87c22df6f1eb3e7f7d8daa3704a612f9e032179..7db86ffcf0705be6a5eaf02002fc2a90fd58bedf 100644 (file)
@@ -70,7 +70,7 @@ static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
        return (struct pfkey_sock *)sk;
 }
 
-static int pfkey_can_dump(struct sock *sk)
+static int pfkey_can_dump(const struct sock *sk)
 {
        if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
                return 1;
@@ -303,12 +303,13 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        return rc;
 }
 
-static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
+static inline void pfkey_hdr_dup(struct sadb_msg *new,
+                                const struct sadb_msg *orig)
 {
        *new = *orig;
 }
 
-static int pfkey_error(struct sadb_msg *orig, int err, struct sock *sk)
+static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
 {
        struct sk_buff *skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL);
        struct sadb_msg *hdr;
@@ -369,13 +370,13 @@ static u8 sadb_ext_min_len[] = {
 };
 
 /* Verify sadb_address_{len,prefixlen} against sa_family.  */
-static int verify_address_len(void *p)
+static int verify_address_len(const void *p)
 {
-       struct sadb_address *sp = p;
-       struct sockaddr *addr = (struct sockaddr *)(sp + 1);
-       struct sockaddr_in *sin;
+       const struct sadb_address *sp = p;
+       const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
+       const struct sockaddr_in *sin;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-       struct sockaddr_in6 *sin6;
+       const struct sockaddr_in6 *sin6;
 #endif
        int len;
 
@@ -411,16 +412,16 @@ static int verify_address_len(void *p)
        return 0;
 }
 
-static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx)
+static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
 {
        return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
                            sec_ctx->sadb_x_ctx_len,
                            sizeof(uint64_t));
 }
 
-static inline int verify_sec_ctx_len(void *p)
+static inline int verify_sec_ctx_len(const void *p)
 {
-       struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p;
+       const struct sadb_x_sec_ctx *sec_ctx = p;
        int len = sec_ctx->sadb_x_ctx_len;
 
        if (len > PAGE_SIZE)
@@ -434,7 +435,7 @@ static inline int verify_sec_ctx_len(void *p)
        return 0;
 }
 
-static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(struct sadb_x_sec_ctx *sec_ctx)
+static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx)
 {
        struct xfrm_user_sec_ctx *uctx = NULL;
        int ctx_size = sec_ctx->sadb_x_ctx_len;
@@ -455,16 +456,16 @@ static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(struct sadb
        return uctx;
 }
 
-static int present_and_same_family(struct sadb_address *src,
-                                  struct sadb_address *dst)
+static int present_and_same_family(const struct sadb_address *src,
+                                  const struct sadb_address *dst)
 {
-       struct sockaddr *s_addr, *d_addr;
+       const struct sockaddr *s_addr, *d_addr;
 
        if (!src || !dst)
                return 0;
 
-       s_addr = (struct sockaddr *)(src + 1);
-       d_addr = (struct sockaddr *)(dst + 1);
+       s_addr = (const struct sockaddr *)(src + 1);
+       d_addr = (const struct sockaddr *)(dst + 1);
        if (s_addr->sa_family != d_addr->sa_family)
                return 0;
        if (s_addr->sa_family != AF_INET
@@ -477,15 +478,15 @@ static int present_and_same_family(struct sadb_address *src,
        return 1;
 }
 
-static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void **ext_hdrs)
 {
-       char *p = (char *) hdr;
+       const char *p = (char *) hdr;
        int len = skb->len;
 
        len -= sizeof(*hdr);
        p += sizeof(*hdr);
        while (len > 0) {
-               struct sadb_ext *ehdr = (struct sadb_ext *) p;
+               const struct sadb_ext *ehdr = (const struct sadb_ext *) p;
                uint16_t ext_type;
                int ext_len;
 
@@ -514,7 +515,7 @@ static int parse_exthdrs(struct sk_buff *skb, struct sadb_msg *hdr, void **ext_h
                                if (verify_sec_ctx_len(p))
                                        return -EINVAL;
                        }
-                       ext_hdrs[ext_type-1] = p;
+                       ext_hdrs[ext_type-1] = (void *) p;
                }
                p   += ext_len;
                len -= ext_len;
@@ -606,21 +607,21 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
 }
 
 static
-int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr)
+int pfkey_sadb_addr2xfrm_addr(const struct sadb_address *addr, xfrm_address_t *xaddr)
 {
        return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
                                      xaddr);
 }
 
-static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_msg *hdr, void **ext_hdrs)
+static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
-       struct sadb_sa *sa;
-       struct sadb_address *addr;
+       const struct sadb_sa *sa;
+       const struct sadb_address *addr;
        uint16_t proto;
        unsigned short family;
        xfrm_address_t *xaddr;
 
-       sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+       sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
        if (sa == NULL)
                return NULL;
 
@@ -629,18 +630,18 @@ static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
                return NULL;
 
        /* sadb_address_len should be checked by caller */
-       addr = (struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
+       addr = (const struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
        if (addr == NULL)
                return NULL;
 
-       family = ((struct sockaddr *)(addr + 1))->sa_family;
+       family = ((const struct sockaddr *)(addr + 1))->sa_family;
        switch (family) {
        case AF_INET:
-               xaddr = (xfrm_address_t *)&((struct sockaddr_in *)(addr + 1))->sin_addr;
+               xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
                break;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
-               xaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(addr + 1))->sin6_addr;
+               xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
                break;
 #endif
        default:
@@ -690,9 +691,9 @@ static inline int pfkey_mode_to_xfrm(int mode)
        }
 }
 
-static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
-                                      struct sockaddr *sa,
-                                      unsigned short family)
+static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port,
+                                       struct sockaddr *sa,
+                                       unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -720,7 +721,7 @@ static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
        return 0;
 }
 
-static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
+static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
                                              int add_keys, int hsc)
 {
        struct sk_buff *skb;
@@ -1010,7 +1011,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
 }
 
 
-static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
+static inline struct sk_buff *pfkey_xfrm_state2msg(const struct xfrm_state *x)
 {
        struct sk_buff *skb;
 
@@ -1019,26 +1020,26 @@ static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
        return skb;
 }
 
-static inline struct sk_buff *pfkey_xfrm_state2msg_expire(struct xfrm_state *x,
+static inline struct sk_buff *pfkey_xfrm_state2msg_expire(const struct xfrm_state *x,
                                                          int hsc)
 {
        return __pfkey_xfrm_state2msg(x, 0, hsc);
 }
 
 static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
-                                               struct sadb_msg *hdr,
-                                               void **ext_hdrs)
+                                               const struct sadb_msg *hdr,
+                                               void * const *ext_hdrs)
 {
        struct xfrm_state *x;
-       struct sadb_lifetime *lifetime;
-       struct sadb_sa *sa;
-       struct sadb_key *key;
-       struct sadb_x_sec_ctx *sec_ctx;
+       const struct sadb_lifetime *lifetime;
+       const struct sadb_sa *sa;
+       const struct sadb_key *key;
+       const struct sadb_x_sec_ctx *sec_ctx;
        uint16_t proto;
        int err;
 
 
-       sa = (struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+       sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
        if (!sa ||
            !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
                                     ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
@@ -1077,7 +1078,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
             sa->sadb_sa_encrypt > SADB_X_CALG_MAX) ||
            sa->sadb_sa_encrypt > SADB_EALG_MAX)
                return ERR_PTR(-EINVAL);
-       key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+       key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
        if (key != NULL &&
            sa->sadb_sa_auth != SADB_X_AALG_NULL &&
            ((key->sadb_key_bits+7) / 8 == 0 ||
@@ -1104,14 +1105,14 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC)
                x->props.flags |= XFRM_STATE_NOPMTUDISC;
 
-       lifetime = (struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
+       lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
        if (lifetime != NULL) {
                x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
                x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
                x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
                x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
        }
-       lifetime = (struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
+       lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
        if (lifetime != NULL) {
                x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
                x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
@@ -1119,7 +1120,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
        }
 
-       sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
+       sec_ctx = (const struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
        if (sec_ctx != NULL) {
                struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
 
@@ -1133,7 +1134,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                        goto out;
        }
 
-       key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+       key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
        if (sa->sadb_sa_auth) {
                int keysize = 0;
                struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
@@ -1202,7 +1203,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                                  &x->id.daddr);
 
        if (ext_hdrs[SADB_X_EXT_SA2-1]) {
-               struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1];
+               const struct sadb_x_sa2 *sa2 = ext_hdrs[SADB_X_EXT_SA2-1];
                int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
                if (mode < 0) {
                        err = -EINVAL;
@@ -1213,7 +1214,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        }
 
        if (ext_hdrs[SADB_EXT_ADDRESS_PROXY-1]) {
-               struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
+               const struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1];
 
                /* Nobody uses this, but we try. */
                x->sel.family = pfkey_sadb_addr2xfrm_addr(addr, &x->sel.saddr);
@@ -1224,7 +1225,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                x->sel.family = x->props.family;
 
        if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
-               struct sadb_x_nat_t_type* n_type;
+               const struct sadb_x_nat_t_type* n_type;
                struct xfrm_encap_tmpl *natt;
 
                x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
@@ -1236,12 +1237,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                natt->encap_type = n_type->sadb_x_nat_t_type_type;
 
                if (ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1]) {
-                       struct sadb_x_nat_t_port* n_port =
+                       const struct sadb_x_nat_t_port *n_port =
                                ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1];
                        natt->encap_sport = n_port->sadb_x_nat_t_port_port;
                }
                if (ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]) {
-                       struct sadb_x_nat_t_port* n_port =
+                       const struct sadb_x_nat_t_port *n_port =
                                ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1];
                        natt->encap_dport = n_port->sadb_x_nat_t_port_port;
                }
@@ -1261,12 +1262,12 @@ out:
        return ERR_PTR(err);
 }
 
-static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        return -EOPNOTSUPP;
 }
 
-static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct sk_buff *resp_skb;
@@ -1365,7 +1366,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
        return 0;
 }
 
-static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct xfrm_state *x;
@@ -1429,7 +1430,7 @@ static inline int event2keytype(int event)
 }
 
 /* ADD/UPD/DEL */
-static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
+static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -1453,7 +1454,7 @@ static int key_notify_sa(struct xfrm_state *x, struct km_event *c)
        return 0;
 }
 
-static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct xfrm_state *x;
@@ -1492,7 +1493,7 @@ out:
        return err;
 }
 
-static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct xfrm_state *x;
@@ -1534,7 +1535,7 @@ out:
        return err;
 }
 
-static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        __u8 proto;
@@ -1570,7 +1571,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
        return 0;
 }
 
-static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig,
+static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
                                              gfp_t allocation)
 {
        struct sk_buff *skb;
@@ -1642,7 +1643,7 @@ out_put_algs:
        return skb;
 }
 
-static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
        struct sk_buff *supp_skb;
@@ -1671,7 +1672,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
        return 0;
 }
 
-static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
+static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -1688,7 +1689,7 @@ static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
        return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
 }
 
-static int key_notify_sa_flush(struct km_event *c)
+static int key_notify_sa_flush(const struct km_event *c)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -1710,7 +1711,7 @@ static int key_notify_sa_flush(struct km_event *c)
        return 0;
 }
 
-static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        unsigned proto;
@@ -1784,7 +1785,7 @@ static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
        xfrm_state_walk_done(&pfk->dump.u.state);
 }
 
-static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        u8 proto;
        struct pfkey_sock *pfk = pfkey_sk(sk);
@@ -1805,19 +1806,29 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr
        return pfkey_do_dump(pfk);
 }
 
-static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
        int satype = hdr->sadb_msg_satype;
+       bool reset_errno = false;
 
        if (hdr->sadb_msg_len == (sizeof(*hdr) / sizeof(uint64_t))) {
-               /* XXX we mangle packet... */
-               hdr->sadb_msg_errno = 0;
+               reset_errno = true;
                if (satype != 0 && satype != 1)
                        return -EINVAL;
                pfk->promisc = satype;
        }
-       pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+       if (reset_errno && skb_cloned(skb))
+               skb = skb_copy(skb, GFP_KERNEL);
+       else
+               skb = skb_clone(skb, GFP_KERNEL);
+
+       if (reset_errno && skb) {
+               struct sadb_msg *new_hdr = (struct sadb_msg *) skb->data;
+               new_hdr->sadb_msg_errno = 0;
+       }
+
+       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
        return 0;
 }
 
@@ -1921,7 +1932,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        return 0;
 }
 
-static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp)
+static inline int pfkey_xfrm_policy2sec_ctx_size(const struct xfrm_policy *xp)
 {
   struct xfrm_sec_ctx *xfrm_ctx = xp->security;
 
@@ -1933,9 +1944,9 @@ static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp)
        return 0;
 }
 
-static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
+static int pfkey_xfrm_policy2msg_size(const struct xfrm_policy *xp)
 {
-       struct xfrm_tmpl *t;
+       const struct xfrm_tmpl *t;
        int sockaddr_size = pfkey_sockaddr_size(xp->family);
        int socklen = 0;
        int i;
@@ -1955,7 +1966,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
                pfkey_xfrm_policy2sec_ctx_size(xp);
 }
 
-static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
+static struct sk_buff * pfkey_xfrm_policy2msg_prep(const struct xfrm_policy *xp)
 {
        struct sk_buff *skb;
        int size;
@@ -1969,7 +1980,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
        return skb;
 }
 
-static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir)
+static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir)
 {
        struct sadb_msg *hdr;
        struct sadb_address *addr;
@@ -2065,8 +2076,8 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
        pol->sadb_x_policy_priority = xp->priority;
 
        for (i=0; i<xp->xfrm_nr; i++) {
+               const struct xfrm_tmpl *t = xp->xfrm_vec + i;
                struct sadb_x_ipsecrequest *rq;
-               struct xfrm_tmpl *t = xp->xfrm_vec + i;
                int req_size;
                int mode;
 
@@ -2123,7 +2134,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
        return 0;
 }
 
-static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct sk_buff *out_skb;
        struct sadb_msg *out_hdr;
@@ -2152,7 +2163,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
 
 }
 
-static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        int err = 0;
@@ -2273,7 +2284,7 @@ out:
        return err;
 }
 
-static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        int err;
@@ -2350,7 +2361,7 @@ out:
        return err;
 }
 
-static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb_msg *hdr, int dir)
+static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struct sadb_msg *hdr, int dir)
 {
        int err;
        struct sk_buff *out_skb;
@@ -2458,7 +2469,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
 }
 
 static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
-                        struct sadb_msg *hdr, void **ext_hdrs)
+                        const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        int i, len, ret, err = -EINVAL;
        u8 dir;
@@ -2549,14 +2560,14 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
 }
 #else
 static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
-                        struct sadb_msg *hdr, void **ext_hdrs)
+                        const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        return -ENOPROTOOPT;
 }
 #endif
 
 
-static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        unsigned int dir;
@@ -2644,7 +2655,7 @@ static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
        xfrm_policy_walk_done(&pfk->dump.u.policy);
 }
 
-static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
@@ -2660,7 +2671,7 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
        return pfkey_do_dump(pfk);
 }
 
-static int key_notify_policy_flush(struct km_event *c)
+static int key_notify_policy_flush(const struct km_event *c)
 {
        struct sk_buff *skb_out;
        struct sadb_msg *hdr;
@@ -2680,7 +2691,7 @@ static int key_notify_policy_flush(struct km_event *c)
 
 }
 
-static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
+static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        struct net *net = sock_net(sk);
        struct km_event c;
@@ -2709,7 +2720,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
 }
 
 typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
-                            struct sadb_msg *hdr, void **ext_hdrs);
+                            const struct sadb_msg *hdr, void * const *ext_hdrs);
 static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
        [SADB_RESERVED]         = pfkey_reserved,
        [SADB_GETSPI]           = pfkey_getspi,
@@ -2736,7 +2747,7 @@ static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
        [SADB_X_MIGRATE]        = pfkey_migrate,
 };
 
-static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr)
+static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr)
 {
        void *ext_hdrs[SADB_EXT_MAX];
        int err;
@@ -2781,7 +2792,8 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
        return hdr;
 }
 
-static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+static inline int aalg_tmpl_set(const struct xfrm_tmpl *t,
+                               const struct xfrm_algo_desc *d)
 {
        unsigned int id = d->desc.sadb_alg_id;
 
@@ -2791,7 +2803,8 @@ static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
        return (t->aalgos >> id) & 1;
 }
 
-static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
+static inline int ealg_tmpl_set(const struct xfrm_tmpl *t,
+                               const struct xfrm_algo_desc *d)
 {
        unsigned int id = d->desc.sadb_alg_id;
 
@@ -2801,12 +2814,12 @@ static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
        return (t->ealgos >> id) & 1;
 }
 
-static int count_ah_combs(struct xfrm_tmpl *t)
+static int count_ah_combs(const struct xfrm_tmpl *t)
 {
        int i, sz = 0;
 
        for (i = 0; ; i++) {
-               struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
+               const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
                if (!aalg)
                        break;
                if (aalg_tmpl_set(t, aalg) && aalg->available)
@@ -2815,12 +2828,12 @@ static int count_ah_combs(struct xfrm_tmpl *t)
        return sz + sizeof(struct sadb_prop);
 }
 
-static int count_esp_combs(struct xfrm_tmpl *t)
+static int count_esp_combs(const struct xfrm_tmpl *t)
 {
        int i, k, sz = 0;
 
        for (i = 0; ; i++) {
-               struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
+               const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
                if (!ealg)
                        break;
 
@@ -2828,7 +2841,7 @@ static int count_esp_combs(struct xfrm_tmpl *t)
                        continue;
 
                for (k = 1; ; k++) {
-                       struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
+                       const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
                        if (!aalg)
                                break;
 
@@ -2839,7 +2852,7 @@ static int count_esp_combs(struct xfrm_tmpl *t)
        return sz + sizeof(struct sadb_prop);
 }
 
-static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
+static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
 {
        struct sadb_prop *p;
        int i;
@@ -2851,7 +2864,7 @@ static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
 
        for (i = 0; ; i++) {
-               struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
+               const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
                if (!aalg)
                        break;
 
@@ -2871,7 +2884,7 @@ static void dump_ah_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        }
 }
 
-static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
+static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
 {
        struct sadb_prop *p;
        int i, k;
@@ -2883,7 +2896,7 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
 
        for (i=0; ; i++) {
-               struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
+               const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
                if (!ealg)
                        break;
 
@@ -2892,7 +2905,7 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
 
                for (k = 1; ; k++) {
                        struct sadb_comb *c;
-                       struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
+                       const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
                        if (!aalg)
                                break;
                        if (!(aalg_tmpl_set(t, aalg) && aalg->available))
@@ -2914,12 +2927,12 @@ static void dump_esp_combs(struct sk_buff *skb, struct xfrm_tmpl *t)
        }
 }
 
-static int key_notify_policy_expire(struct xfrm_policy *xp, struct km_event *c)
+static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
 {
        return 0;
 }
 
-static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
+static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
 {
        struct sk_buff *out_skb;
        struct sadb_msg *out_hdr;
@@ -2949,7 +2962,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c)
        return 0;
 }
 
-static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
+static int pfkey_send_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = x ? xs_net(x) : c->net;
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
@@ -2976,7 +2989,7 @@ static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c)
        return 0;
 }
 
-static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        if (xp && xp->type != XFRM_POLICY_TYPE_MAIN)
                return 0;
@@ -3318,7 +3331,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
 
 #ifdef CONFIG_NET_KEY_MIGRATE
 static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
-                           struct xfrm_selector *sel)
+                           const struct xfrm_selector *sel)
 {
        struct sadb_address *addr;
        addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
@@ -3348,7 +3361,7 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
 }
 
 
-static int set_sadb_kmaddress(struct sk_buff *skb, struct xfrm_kmaddress *k)
+static int set_sadb_kmaddress(struct sk_buff *skb, const struct xfrm_kmaddress *k)
 {
        struct sadb_x_kmaddress *kma;
        u8 *sa;
@@ -3376,7 +3389,7 @@ static int set_sadb_kmaddress(struct sk_buff *skb, struct xfrm_kmaddress *k)
 static int set_ipsecrequest(struct sk_buff *skb,
                            uint8_t proto, uint8_t mode, int level,
                            uint32_t reqid, uint8_t family,
-                           xfrm_address_t *src, xfrm_address_t *dst)
+                           const xfrm_address_t *src, const xfrm_address_t *dst)
 {
        struct sadb_x_ipsecrequest *rq;
        u8 *sa;
@@ -3404,9 +3417,9 @@ static int set_ipsecrequest(struct sk_buff *skb,
 #endif
 
 #ifdef CONFIG_NET_KEY_MIGRATE
-static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                             struct xfrm_migrate *m, int num_bundles,
-                             struct xfrm_kmaddress *k)
+static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                             const struct xfrm_migrate *m, int num_bundles,
+                             const struct xfrm_kmaddress *k)
 {
        int i;
        int sasize_sel;
@@ -3415,7 +3428,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
        struct sk_buff *skb;
        struct sadb_msg *hdr;
        struct sadb_x_policy *pol;
-       struct xfrm_migrate *mp;
+       const struct xfrm_migrate *mp;
 
        if (type != XFRM_POLICY_TYPE_MAIN)
                return 0;
@@ -3513,9 +3526,9 @@ err:
        return -EINVAL;
 }
 #else
-static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                             struct xfrm_migrate *m, int num_bundles,
-                             struct xfrm_kmaddress *k)
+static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                             const struct xfrm_migrate *m, int num_bundles,
+                             const struct xfrm_kmaddress *k)
 {
        return -ENOPROTOOPT;
 }
@@ -3655,6 +3668,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
 }
 
 static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
+       __acquires(rcu)
 {
        struct net *net = seq_file_net(f);
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
@@ -3672,6 +3686,7 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
 }
 
 static void pfkey_seq_stop(struct seq_file *f, void *v)
+       __releases(rcu)
 {
        rcu_read_unlock();
 }
index 110efb704c9b93eca1d86db0d252592f117de6b0..2a698ff89db69d3bf31f484a6028aeb089a753dc 100644 (file)
@@ -320,11 +320,12 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
                goto out;
 
-       rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
+       rt = ip_route_connect(lsa->l2tp_addr.s_addr, saddr,
                              RT_CONN_FLAGS(sk), oif,
                              IPPROTO_L2TP,
-                             0, 0, sk, 1);
-       if (rc) {
+                             0, 0, sk, true);
+       if (IS_ERR(rt)) {
+               rc = PTR_ERR(rt);
                if (rc == -ENETUNREACH)
                        IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
                goto out;
@@ -489,7 +490,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                         * itself out.
                         */
                        security_sk_classify_flow(sk, &fl);
-                       if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
+                       rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+                       if (IS_ERR(rt))
                                goto no_route;
                }
                sk_setup_caps(sk, &rt->dst);
index f99687439139faf68f2464a39bd48d6100481dca..058f1e9a91281859bf39501619da6eb5ece8d16a 100644 (file)
@@ -181,25 +181,26 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
         * LLC functionality
         */
        rcv = rcu_dereference(sap->rcv_func);
-       if (rcv) {
-               struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
-               if (cskb)
-                       rcv(cskb, dev, pt, orig_dev);
-       }
        dest = llc_pdu_type(skb);
-       if (unlikely(!dest || !llc_type_handlers[dest - 1]))
-               goto drop_put;
-       llc_type_handlers[dest - 1](sap, skb);
-out_put:
+       if (unlikely(!dest || !llc_type_handlers[dest - 1])) {
+               if (rcv)
+                       rcv(skb, dev, pt, orig_dev);
+               else
+                       kfree_skb(skb);
+       } else {
+               if (rcv) {
+                       struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
+                       if (cskb)
+                               rcv(cskb, dev, pt, orig_dev);
+               }
+               llc_type_handlers[dest - 1](sap, skb);
+       }
        llc_sap_put(sap);
 out:
        return 0;
 drop:
        kfree_skb(skb);
        goto out;
-drop_put:
-       kfree_skb(skb);
-       goto out_put;
 handle_station:
        if (!llc_station_handler)
                goto drop;
index c766056d0488eb2826def69d7ba9e923e171304f..513f85cc2ae1b1ad707f8a878fcf2fd5e6e93a91 100644 (file)
@@ -17,7 +17,7 @@ comment "CFG80211 needs to be enabled for MAC80211"
 if MAC80211 != n
 
 config MAC80211_HAS_RC
-       def_bool n
+       bool
 
 config MAC80211_RC_PID
        bool "PID controller based rate control algorithm" if EXPERT
@@ -78,7 +78,7 @@ config MAC80211_RC_DEFAULT
 endif
 
 comment "Some wireless drivers require a rate control algorithm"
-       depends on MAC80211_HAS_RC=n
+       depends on MAC80211 && MAC80211_HAS_RC=n
 
 config MAC80211_MESH
        bool "Enable mac80211 mesh networking (pre-802.11s) support"
index 227ca82eef72178253b3f4adf831ce5ae297dd28..0c9d0c07eae6cae6ffc6f3ef1f0e91ae64107e2b 100644 (file)
@@ -76,7 +76,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
        if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
-                            &sta->sta, tid, NULL))
+                            &sta->sta, tid, NULL, 0))
                printk(KERN_DEBUG "HW problem - can not stop rx "
                                "aggregation for tid %d\n", tid);
 
@@ -232,6 +232,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
        if (buf_size == 0)
                buf_size = IEEE80211_MAX_AMPDU_BUF;
 
+       /* make sure the size doesn't exceed the maximum supported by the hw */
+       if (buf_size > local->hw.max_rx_aggregation_subframes)
+               buf_size = local->hw.max_rx_aggregation_subframes;
 
        /* examine state machine */
        mutex_lock(&sta->ampdu_mlme.mtx);
@@ -287,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
        }
 
        ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
-                              &sta->sta, tid, &start_seq_num);
+                              &sta->sta, tid, &start_seq_num, 0);
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
index 9cc472c6a6a5497dcadea0af44fe011f14c2f5a7..63d852cb4ca2e55e3a6f70721e4487ec656862ce 100644 (file)
@@ -190,7 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 
        ret = drv_ampdu_action(local, sta->sdata,
                               IEEE80211_AMPDU_TX_STOP,
-                              &sta->sta, tid, NULL);
+                              &sta->sta, tid, NULL, 0);
 
        /* HW shall not deny going back to legacy */
        if (WARN_ON(ret)) {
@@ -311,7 +311,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        start_seq_num = sta->tid_seq[tid] >> 4;
 
        ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-                              &sta->sta, tid, &start_seq_num);
+                              &sta->sta, tid, &start_seq_num, 0);
        if (ret) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "BA request denied - HW unavailable for"
@@ -342,7 +342,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
                                     tid_tx->dialog_token, start_seq_num,
-                                    0x40, tid_tx->timeout);
+                                    local->hw.max_tx_aggregation_subframes,
+                                    tid_tx->timeout);
 }
 
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
@@ -487,7 +488,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
 
        drv_ampdu_action(local, sta->sdata,
                         IEEE80211_AMPDU_TX_OPERATIONAL,
-                        &sta->sta, tid, NULL);
+                        &sta->sta, tid, NULL,
+                        sta->ampdu_mlme.tid_tx[tid]->buf_size);
 
        /*
         * synchronize with TX path, while splicing the TX path
@@ -742,9 +744,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 {
        struct tid_ampdu_tx *tid_tx;
        u16 capab, tid;
+       u8 buf_size;
 
        capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
        tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+       buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
 
        mutex_lock(&sta->ampdu_mlme.mtx);
 
@@ -767,12 +771,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 
        if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
                        == WLAN_STATUS_SUCCESS) {
+               /*
+                * IEEE 802.11-2007 7.3.1.14:
+                * In an ADDBA Response frame, when the Status Code field
+                * is set to 0, the Buffer Size subfield is set to a value
+                * of at least 1.
+                */
+               if (!buf_size)
+                       goto out;
+
                if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
                                     &tid_tx->state)) {
                        /* ignore duplicate response */
                        goto out;
                }
 
+               tid_tx->buf_size = buf_size;
+
                if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
                        ieee80211_agg_tx_operational(local, sta, tid);
 
index 9cd73b11506e85b1a0d5ecf26563da8d1d439d18..7b701dcddb50cb308d6b4f7d80b5da908583649f 100644 (file)
@@ -316,6 +316,17 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
        return 0;
 }
 
+static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx)
+{
+       if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
+               struct ieee80211_supported_band *sband;
+               sband = sta->local->hw.wiphy->bands[
+                               sta->local->hw.conf.channel->band];
+               rate->legacy = sband->bitrates[idx].bitrate;
+       } else
+               rate->mcs = idx;
+}
+
 static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -330,6 +341,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        STATION_INFO_TX_RETRIES |
                        STATION_INFO_TX_FAILED |
                        STATION_INFO_TX_BITRATE |
+                       STATION_INFO_RX_BITRATE |
                        STATION_INFO_RX_DROP_MISC;
 
        sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
@@ -355,15 +367,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
        if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
                sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+       rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
 
-       if (!(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) {
-               struct ieee80211_supported_band *sband;
-               sband = sta->local->hw.wiphy->bands[
-                               sta->local->hw.conf.channel->band];
-               sinfo->txrate.legacy =
-                       sband->bitrates[sta->last_tx_rate.idx].bitrate;
-       } else
-               sinfo->txrate.mcs = sta->last_tx_rate.idx;
+       sinfo->rxrate.flags = 0;
+       if (sta->last_rx_rate_flag & RX_FLAG_HT)
+               sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS;
+       if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
+               sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+       if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
+               sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+       rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx);
 
        if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
@@ -1215,6 +1228,9 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
        struct ieee80211_sub_if_data *sdata = NULL;
+       struct ieee80211_channel *old_oper;
+       enum nl80211_channel_type old_oper_type;
+       enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
 
        if (netdev)
                sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
@@ -1232,13 +1248,23 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
                break;
        }
 
-       local->oper_channel = chan;
+       if (sdata)
+               old_vif_oper_type = sdata->vif.bss_conf.channel_type;
+       old_oper_type = local->_oper_channel_type;
 
        if (!ieee80211_set_channel_type(local, sdata, channel_type))
                return -EBUSY;
 
-       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-       if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
+       old_oper = local->oper_channel;
+       local->oper_channel = chan;
+
+       /* Update driver if changes were actually made. */
+       if ((old_oper != local->oper_channel) ||
+           (old_oper_type != local->_oper_channel_type))
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+       if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+           old_vif_oper_type != sdata->vif.bss_conf.channel_type)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
 
        return 0;
@@ -1274,8 +1300,11 @@ static int ieee80211_scan(struct wiphy *wiphy,
        case NL80211_IFTYPE_P2P_GO:
                if (sdata->local->ops->hw_scan)
                        break;
-               /* FIXME: implement NoA while scanning in software */
-               return -EOPNOTSUPP;
+               /*
+                * FIXME: implement NoA while scanning in software,
+                * for now fall through to allow scanning only when
+                * beaconing hasn't been configured yet
+                */
        case NL80211_IFTYPE_AP:
                if (sdata->u.ap.beacon)
                        return -EOPNOTSUPP;
@@ -1784,6 +1813,33 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
 
        *cookie = (unsigned long) skb;
 
+       if (is_offchan && local->ops->offchannel_tx) {
+               int ret;
+
+               IEEE80211_SKB_CB(skb)->band = chan->band;
+
+               mutex_lock(&local->mtx);
+
+               if (local->hw_offchan_tx_cookie) {
+                       mutex_unlock(&local->mtx);
+                       return -EBUSY;
+               }
+
+               /* TODO: bitrate control, TX processing? */
+               ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
+
+               if (ret == 0)
+                       local->hw_offchan_tx_cookie = *cookie;
+               mutex_unlock(&local->mtx);
+
+               /*
+                * Allow driver to return 1 to indicate it wants to have the
+                * frame transmitted with a remain_on_channel + regular TX.
+                */
+               if (ret != 1)
+                       return ret;
+       }
+
        if (is_offchan && local->ops->remain_on_channel) {
                unsigned int duration;
                int ret;
@@ -1847,6 +1903,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
 
        wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
        wk->chan = chan;
+       wk->chan_type = channel_type;
        wk->sdata = sdata;
        wk->done = ieee80211_offchan_tx_done;
        wk->offchan_tx.frame = skb;
@@ -1869,6 +1926,18 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
 
        mutex_lock(&local->mtx);
 
+       if (local->ops->offchannel_tx_cancel_wait &&
+           local->hw_offchan_tx_cookie == cookie) {
+               ret = drv_offchannel_tx_cancel_wait(local);
+
+               if (!ret)
+                       local->hw_offchan_tx_cookie = 0;
+
+               mutex_unlock(&local->mtx);
+
+               return ret;
+       }
+
        if (local->ops->cancel_remain_on_channel) {
                cookie ^= 2;
                ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
index 1f02e599a3185c06de0b214711eece19ff82c136..51f0d780dafadfd300719a3b2f19514940824383 100644 (file)
@@ -60,6 +60,10 @@ static const struct file_operations name## _ops = {                  \
        debugfs_create_file(#name, mode, phyd, local, &name## _ops);
 
 
+DEBUGFS_READONLY_FILE(user_power, "%d",
+                     local->user_power_level);
+DEBUGFS_READONLY_FILE(power, "%d",
+                     local->hw.conf.power_level);
 DEBUGFS_READONLY_FILE(frequency, "%d",
                      local->hw.conf.channel->center_freq);
 DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
@@ -391,6 +395,8 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_ADD(uapsd_queues);
        DEBUGFS_ADD(uapsd_max_sp_len);
        DEBUGFS_ADD(channel_type);
+       DEBUGFS_ADD(user_power);
+       DEBUGFS_ADD(power);
 
        statsd = debugfs_create_dir("statistics", phyd);
 
index 2dabdf7680d0659a45d60e1aafa8536ef8e711bc..dacace6b139301ad5625a738465190e2827af160 100644 (file)
@@ -36,7 +36,7 @@ static ssize_t ieee80211_if_read(
                ret = (*format)(sdata, buf, sizeof(buf));
        read_unlock(&dev_base_lock);
 
-       if (ret != -EINVAL)
+       if (ret >= 0)
                ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
 
        return ret;
@@ -81,6 +81,8 @@ static ssize_t ieee80211_if_fmt_##name(                                       \
                IEEE80211_IF_FMT(name, field, "%d\n")
 #define IEEE80211_IF_FMT_HEX(name, field)                              \
                IEEE80211_IF_FMT(name, field, "%#x\n")
+#define IEEE80211_IF_FMT_LHEX(name, field)                             \
+               IEEE80211_IF_FMT(name, field, "%#lx\n")
 #define IEEE80211_IF_FMT_SIZE(name, field)                             \
                IEEE80211_IF_FMT(name, field, "%zd\n")
 
@@ -145,6 +147,9 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
                  HEX);
 IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
                  HEX);
+IEEE80211_IF_FILE(flags, flags, HEX);
+IEEE80211_IF_FILE(state, state, LHEX);
+IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
 
 /* STA attributes */
 IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
@@ -216,6 +221,104 @@ static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
 
 __IEEE80211_IF_FILE_W(smps);
 
+static ssize_t ieee80211_if_fmt_tkip_mic_test(
+       const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+       return -EOPNOTSUPP;
+}
+
+static int hwaddr_aton(const char *txt, u8 *addr)
+{
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               int a, b;
+
+               a = hex_to_bin(*txt++);
+               if (a < 0)
+                       return -1;
+               b = hex_to_bin(*txt++);
+               if (b < 0)
+                       return -1;
+               *addr++ = (a << 4) | b;
+               if (i < 5 && *txt++ != ':')
+                       return -1;
+       }
+
+       return 0;
+}
+
+static ssize_t ieee80211_if_parse_tkip_mic_test(
+       struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+       struct ieee80211_local *local = sdata->local;
+       u8 addr[ETH_ALEN];
+       struct sk_buff *skb;
+       struct ieee80211_hdr *hdr;
+       __le16 fc;
+
+       /*
+        * Assume colon-delimited MAC address with possible white space
+        * following.
+        */
+       if (buflen < 3 * ETH_ALEN - 1)
+               return -EINVAL;
+       if (hwaddr_aton(buf, addr) < 0)
+               return -EINVAL;
+
+       if (!ieee80211_sdata_running(sdata))
+               return -ENOTCONN;
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
+       if (!skb)
+               return -ENOMEM;
+       skb_reserve(skb, local->hw.extra_tx_headroom);
+
+       hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
+       memset(hdr, 0, 24);
+       fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP:
+               fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+               /* DA BSSID SA */
+               memcpy(hdr->addr1, addr, ETH_ALEN);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
+               break;
+       case NL80211_IFTYPE_STATION:
+               fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+               /* BSSID SA DA */
+               if (sdata->vif.bss_conf.bssid == NULL) {
+                       dev_kfree_skb(skb);
+                       return -ENOTCONN;
+               }
+               memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               memcpy(hdr->addr3, addr, ETH_ALEN);
+               break;
+       default:
+               dev_kfree_skb(skb);
+               return -EOPNOTSUPP;
+       }
+       hdr->frame_control = fc;
+
+       /*
+        * Add some length to the test frame to make it look bit more valid.
+        * The exact contents does not matter since the recipient is required
+        * to drop this because of the Michael MIC failure.
+        */
+       memset(skb_put(skb, 50), 0, 50);
+
+       IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
+
+       ieee80211_tx_skb(sdata, skb);
+
+       return buflen;
+}
+
+__IEEE80211_IF_FILE_W(tkip_mic_test);
+
 /* AP attributes */
 IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
 IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
@@ -283,6 +386,9 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
@@ -291,22 +397,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(last_beacon);
        DEBUGFS_ADD(ave_beacon);
        DEBUGFS_ADD_MODE(smps, 0600);
+       DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
 }
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
        DEBUGFS_ADD(num_sta_ps);
        DEBUGFS_ADD(dtim_count);
        DEBUGFS_ADD(num_buffered_multicast);
+       DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
 }
 
 static void add_wds_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 
@@ -316,12 +430,18 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
 static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD(drop_unencrypted);
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
        DEBUGFS_ADD(rc_rateidx_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 }
 
 static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
 {
+       DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(state);
+       DEBUGFS_ADD(channel_type);
 }
 
 #ifdef CONFIG_MAC80211_MESH
index 98d589960a4913c96f3b717f9b6a997215c9484d..3729296f6f9580d97500f023afca38ff3004ff9f 100644 (file)
@@ -5,9 +5,9 @@
 #include "ieee80211_i.h"
 #include "driver-trace.h"
 
-static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
 {
-       return local->ops->tx(&local->hw, skb);
+       local->ops->tx(&local->hw, skb);
 }
 
 static inline int drv_start(struct ieee80211_local *local)
@@ -382,17 +382,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
                                   struct ieee80211_sub_if_data *sdata,
                                   enum ieee80211_ampdu_mlme_action action,
                                   struct ieee80211_sta *sta, u16 tid,
-                                  u16 *ssn)
+                                  u16 *ssn, u8 buf_size)
 {
        int ret = -EOPNOTSUPP;
 
        might_sleep();
 
-       trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+       trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
 
        if (local->ops->ampdu_action)
                ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
-                                              sta, tid, ssn);
+                                              sta, tid, ssn, buf_size);
 
        trace_drv_return_int(local, ret);
 
@@ -495,4 +495,35 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
        return ret;
 }
 
+static inline int drv_offchannel_tx(struct ieee80211_local *local,
+                                   struct sk_buff *skb,
+                                   struct ieee80211_channel *chan,
+                                   enum nl80211_channel_type channel_type,
+                                   unsigned int wait)
+{
+       int ret;
+
+       might_sleep();
+
+       trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
+       ret = local->ops->offchannel_tx(&local->hw, skb, chan,
+                                       channel_type, wait);
+       trace_drv_return_int(local, ret);
+
+       return ret;
+}
+
+static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
+{
+       int ret;
+
+       might_sleep();
+
+       trace_drv_offchannel_tx_cancel_wait(local);
+       ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
+       trace_drv_return_int(local, ret);
+
+       return ret;
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
index 49c84218b2f41036bf34a28c6b2c02f5580803ed..520fe24448931d4e320d7712ac13a87d61db29e6 100644 (file)
@@ -9,6 +9,11 @@
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, ...) \
 static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
 #endif
 
 #undef TRACE_SYSTEM
@@ -38,7 +43,7 @@ static inline void trace_ ## name(proto) {}
  * Tracing for driver callbacks.
  */
 
-TRACE_EVENT(drv_return_void,
+DECLARE_EVENT_CLASS(local_only_evt,
        TP_PROTO(struct ieee80211_local *local),
        TP_ARGS(local),
        TP_STRUCT__entry(
@@ -50,6 +55,11 @@ TRACE_EVENT(drv_return_void,
        TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
 );
 
+DEFINE_EVENT(local_only_evt, drv_return_void,
+       TP_PROTO(struct ieee80211_local *local),
+       TP_ARGS(local)
+);
+
 TRACE_EVENT(drv_return_int,
        TP_PROTO(struct ieee80211_local *local, int ret),
        TP_ARGS(local, ret),
@@ -78,40 +88,14 @@ TRACE_EVENT(drv_return_u64,
        TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
 );
 
-TRACE_EVENT(drv_start,
+DEFINE_EVENT(local_only_evt, drv_start,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_stop,
+DEFINE_EVENT(local_only_evt, drv_stop,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_add_interface,
@@ -439,40 +423,14 @@ TRACE_EVENT(drv_hw_scan,
        )
 );
 
-TRACE_EVENT(drv_sw_scan_start,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_sw_scan_complete,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_complete,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_get_stats,
@@ -702,23 +660,9 @@ TRACE_EVENT(drv_conf_tx,
        )
 );
 
-TRACE_EVENT(drv_get_tsf,
+DEFINE_EVENT(local_only_evt, drv_get_tsf,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT,
-               LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_set_tsf,
@@ -742,41 +686,14 @@ TRACE_EVENT(drv_set_tsf,
        )
 );
 
-TRACE_EVENT(drv_reset_tsf,
+DEFINE_EVENT(local_only_evt, drv_reset_tsf,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(drv_tx_last_beacon,
+DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT,
-               LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(drv_ampdu_action,
@@ -784,9 +701,9 @@ TRACE_EVENT(drv_ampdu_action,
                 struct ieee80211_sub_if_data *sdata,
                 enum ieee80211_ampdu_mlme_action action,
                 struct ieee80211_sta *sta, u16 tid,
-                u16 *ssn),
+                u16 *ssn, u8 buf_size),
 
-       TP_ARGS(local, sdata, action, sta, tid, ssn),
+       TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
@@ -794,6 +711,7 @@ TRACE_EVENT(drv_ampdu_action,
                __field(u32, action)
                __field(u16, tid)
                __field(u16, ssn)
+               __field(u8, buf_size)
                VIF_ENTRY
        ),
 
@@ -804,11 +722,13 @@ TRACE_EVENT(drv_ampdu_action,
                __entry->action = action;
                __entry->tid = tid;
                __entry->ssn = ssn ? *ssn : 0;
+               __entry->buf_size = buf_size;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
-               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
+               LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d",
+               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
+               __entry->tid, __entry->buf_size
        )
 );
 
@@ -959,24 +879,44 @@ TRACE_EVENT(drv_remain_on_channel,
        )
 );
 
-TRACE_EVENT(drv_cancel_remain_on_channel,
+DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel,
        TP_PROTO(struct ieee80211_local *local),
+       TP_ARGS(local)
+);
 
-       TP_ARGS(local),
+TRACE_EVENT(drv_offchannel_tx,
+       TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb,
+                struct ieee80211_channel *chan,
+                enum nl80211_channel_type channel_type,
+                unsigned int wait),
+
+       TP_ARGS(local, skb, chan, channel_type, wait),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
+               __field(int, center_freq)
+               __field(int, channel_type)
+               __field(unsigned int, wait)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
+               __entry->center_freq = chan->center_freq;
+               __entry->channel_type = channel_type;
+               __entry->wait = wait;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
+               LOCAL_PR_FMT " freq:%dMHz, wait:%dms",
+               LOCAL_PR_ARG, __entry->center_freq, __entry->wait
        )
 );
 
+DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
+       TP_PROTO(struct ieee80211_local *local),
+       TP_ARGS(local)
+);
+
 /*
  * Tracing for API calls that drivers call.
  */
@@ -1069,23 +1009,9 @@ TRACE_EVENT(api_stop_tx_ba_cb,
        )
 );
 
-TRACE_EVENT(api_restart_hw,
+DEFINE_EVENT(local_only_evt, api_restart_hw,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT,
-               LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 TRACE_EVENT(api_beacon_loss,
@@ -1214,40 +1140,14 @@ TRACE_EVENT(api_chswitch_done,
        )
 );
 
-TRACE_EVENT(api_ready_on_channel,
+DEFINE_EVENT(local_only_evt, api_ready_on_channel,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
-TRACE_EVENT(api_remain_on_channel_expired,
+DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
        TP_PROTO(struct ieee80211_local *local),
-
-       TP_ARGS(local),
-
-       TP_STRUCT__entry(
-               LOCAL_ENTRY
-       ),
-
-       TP_fast_assign(
-               LOCAL_ASSIGN;
-       ),
-
-       TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
-       )
+       TP_ARGS(local)
 );
 
 /*
index 75d679d75e63e92143d55a47601a1dfc1377ba03..b9e4b9bd217904cb5333f6a439ad7560803f0030 100644 (file)
@@ -66,6 +66,9 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
        /* own MCS TX capabilities */
        tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
 
+       /* Copy peer MCS TX capabilities, the driver might need them. */
+       ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params;
+
        /* can we TX with MCS rates? */
        if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
                return;
@@ -79,7 +82,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
                max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS;
 
        /*
-        * 802.11n D5.0 20.3.5 / 20.6 says:
+        * 802.11n-2009 20.3.5 / 20.6 says:
         * - indices 0 to 7 and 32 are single spatial stream
         * - 8 to 31 are multiple spatial streams using equal modulation
         *   [8..15 for two streams, 16..23 for three and 24..31 for four]
index 53c7077ffd4f2ff36e5ddcbdcc3f33bd378cb1c7..3e81af1fce58177da28120b47111283c443fad99 100644 (file)
@@ -31,7 +31,6 @@
 #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
 
 #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
-#define IEEE80211_IBSS_MERGE_DELAY 0x400000
 #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
 
 #define IEEE80211_IBSS_MAX_STA_ENTRIES 128
@@ -270,7 +269,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_band band = rx_status->band;
 
        if (elems->ds_params && elems->ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+                                                     band);
        else
                freq = rx_status->freq;
 
@@ -354,7 +354,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
                goto put_bss;
 
-       if (rx_status->flag & RX_FLAG_TSFT) {
+       if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
                /*
                 * For correct IBSS merging we need mactime; since mactime is
                 * defined as the time the first data symbol of the frame hits
@@ -396,10 +396,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
               jiffies);
 #endif
 
-       /* give slow hardware some time to do the TSF sync */
-       if (rx_timestamp < IEEE80211_IBSS_MERGE_DELAY)
-               goto put_bss;
-
        if (beacon_timestamp > rx_timestamp) {
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
                printk(KERN_DEBUG "%s: beacon TSF higher than "
@@ -663,12 +659,13 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
 }
 
 static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
-                                       struct ieee80211_mgmt *mgmt,
-                                       size_t len)
+                                       struct sk_buff *req)
 {
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
+       struct ieee80211_mgmt *mgmt = (void *)req->data;
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
-       int tx_last_beacon;
+       int tx_last_beacon, len = req->len;
        struct sk_buff *skb;
        struct ieee80211_mgmt *resp;
        u8 *pos, *end;
@@ -688,7 +685,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
               mgmt->bssid, tx_last_beacon);
 #endif /* CONFIG_MAC80211_IBSS_DEBUG */
 
-       if (!tx_last_beacon)
+       if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH))
                return;
 
        if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
@@ -785,7 +782,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 
        switch (fc & IEEE80211_FCTL_STYPE) {
        case IEEE80211_STYPE_PROBE_REQ:
-               ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
+               ieee80211_rx_mgmt_probe_req(sdata, skb);
                break;
        case IEEE80211_STYPE_PROBE_RESP:
                ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
index 533fd32f49fff23aff77b5faa45b87a07d19d5f9..a404017014246c19fd9c64d6edda717015cbe140 100644 (file)
@@ -225,6 +225,7 @@ struct ieee80211_if_ap {
        struct sk_buff_head ps_bc_buf;
        atomic_t num_sta_ps; /* number of stations in PS mode */
        int dtim_count;
+       bool dtim_bc_mc;
 };
 
 struct ieee80211_if_wds {
@@ -654,8 +655,6 @@ struct tpt_led_trigger {
  *     well be on the operating channel
  * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
  *     determine if we are on the operating channel or not
- * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
- *     gets only set in conjunction with SCAN_SW_SCANNING
  * @SCAN_COMPLETED: Set for our scan work function when the driver reported
  *     that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -664,7 +663,6 @@ struct tpt_led_trigger {
 enum {
        SCAN_SW_SCANNING,
        SCAN_HW_SCANNING,
-       SCAN_OFF_CHANNEL,
        SCAN_COMPLETED,
        SCAN_ABORTED,
 };
@@ -959,6 +957,7 @@ struct ieee80211_local {
        unsigned int hw_roc_duration;
        u32 hw_roc_cookie;
        bool hw_roc_for_tx;
+       unsigned long hw_offchan_tx_cookie;
 
        /* dummy netdev for use w/ NAPI */
        struct net_device napi_dev;
@@ -1068,8 +1067,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
 void ieee80211_configure_filter(struct ieee80211_local *local);
 u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
 
-extern bool ieee80211_disable_40mhz_24ghz;
-
 /* STA code */
 void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -1147,10 +1144,14 @@ void ieee80211_rx_bss_put(struct ieee80211_local *local,
                          struct ieee80211_bss *bss);
 
 /* off-channel helpers */
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+                                       bool tell_ap);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+                                   bool offchannel_ps_enable);
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing);
+                                bool enable_beaconing,
+                                bool offchannel_ps_disable);
 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
 
 /* interface handling */
index 7a10a8d1b2d0db48f1c8f2eca47e6372a1089f4f..4054399be907f8be54d07cf786afc5693e562982 100644 (file)
@@ -382,6 +382,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        struct sk_buff *skb, *tmp;
        u32 hw_reconf_flags = 0;
        int i;
+       enum nl80211_channel_type orig_ct;
 
        if (local->scan_sdata == sdata)
                ieee80211_scan_cancel(local);
@@ -542,8 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                hw_reconf_flags = 0;
        }
 
+       /* Re-calculate channel-type, in case there are multiple vifs
+        * on different channel types.
+        */
+       orig_ct = local->_oper_channel_type;
+       ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT);
+
        /* do after stop to avoid reconfiguring when we stop anyway */
-       if (hw_reconf_flags)
+       if (hw_reconf_flags || (orig_ct != local->_oper_channel_type))
                ieee80211_hw_config(local, hw_reconf_flags);
 
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
index a46ff06d7cb8ce9d8e31fd593555925c9aa07500..2543e48bd813b7e32ec069d0246b1b7e887890d7 100644 (file)
@@ -34,7 +34,7 @@
 #include "debugfs.h"
 
 
-bool ieee80211_disable_40mhz_24ghz;
+static bool ieee80211_disable_40mhz_24ghz;
 module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
 MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
                 "Disable 40MHz support in the 2.4GHz band");
@@ -98,6 +98,47 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
        ieee80211_configure_filter(local);
 }
 
+/*
+ * Returns true if we are logically configured to be on
+ * the operating channel AND the hardware-conf is currently
+ * configured on the operating channel.  Compares channel-type
+ * as well.
+ */
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+{
+       struct ieee80211_channel *chan, *scan_chan;
+       enum nl80211_channel_type channel_type;
+
+       /* This logic needs to match logic in ieee80211_hw_config */
+       if (local->scan_channel) {
+               chan = local->scan_channel;
+               /* If scanning on oper channel, use whatever channel-type
+                * is currently in use.
+                */
+               if (chan == local->oper_channel)
+                       channel_type = local->_oper_channel_type;
+               else
+                       channel_type = NL80211_CHAN_NO_HT;
+       } else if (local->tmp_channel) {
+               chan = scan_chan = local->tmp_channel;
+               channel_type = local->tmp_channel_type;
+       } else {
+               chan = local->oper_channel;
+               channel_type = local->_oper_channel_type;
+       }
+
+       if (chan != local->oper_channel ||
+           channel_type != local->_oper_channel_type)
+               return false;
+
+       /* Check current hardware-config against oper_channel. */
+       if ((local->oper_channel != local->hw.conf.channel) ||
+           (local->_oper_channel_type != local->hw.conf.channel_type))
+               return false;
+
+       return true;
+}
+
 int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 {
        struct ieee80211_channel *chan, *scan_chan;
@@ -110,21 +151,33 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 
        scan_chan = local->scan_channel;
 
+       /* If this off-channel logic ever changes,  ieee80211_on_oper_channel
+        * may need to change as well.
+        */
        offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
        if (scan_chan) {
                chan = scan_chan;
-               channel_type = NL80211_CHAN_NO_HT;
-               local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
-       } else if (local->tmp_channel &&
-                  local->oper_channel != local->tmp_channel) {
+               /* If scanning on oper channel, use whatever channel-type
+                * is currently in use.
+                */
+               if (chan == local->oper_channel)
+                       channel_type = local->_oper_channel_type;
+               else
+                       channel_type = NL80211_CHAN_NO_HT;
+       } else if (local->tmp_channel) {
                chan = scan_chan = local->tmp_channel;
                channel_type = local->tmp_channel_type;
-               local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
        } else {
                chan = local->oper_channel;
                channel_type = local->_oper_channel_type;
-               local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
        }
+
+       if (chan != local->oper_channel ||
+           channel_type != local->_oper_channel_type)
+               local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+       else
+               local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
+
        offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
 
        if (offchannel_flag || chan != local->hw.conf.channel ||
@@ -146,7 +199,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
                changed |= IEEE80211_CONF_CHANGE_SMPS;
        }
 
-       if (scan_chan)
+       if ((local->scanning & SCAN_SW_SCANNING) ||
+           (local->scanning & SCAN_HW_SCANNING))
                power = chan->max_power;
        else
                power = local->power_constr_level ?
@@ -231,7 +285,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
 
        if (changed & BSS_CHANGED_BEACON_ENABLED) {
                if (local->quiescing || !ieee80211_sdata_running(sdata) ||
-                   test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+                   test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
                        sdata->vif.bss_conf.enable_beacon = false;
                } else {
                        /*
@@ -554,6 +608,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        local->hw.queues = 1;
        local->hw.max_rates = 1;
        local->hw.max_report_rates = 0;
+       local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
        local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
        local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
        local->user_power_level = -1;
@@ -668,6 +723,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                }
                channels += sband->n_channels;
 
+               /*
+                * Since ieee80211_disable_40mhz_24ghz is global, we can
+                * modify the sband's ht data even if the driver uses a
+                * global structure for that.
+                */
+               if (ieee80211_disable_40mhz_24ghz &&
+                   band == IEEE80211_BAND_2GHZ &&
+                   sband->ht_cap.ht_supported) {
+                       sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+                       sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+               }
+
                if (max_bitrates < sband->n_bitrates)
                        max_bitrates = sband->n_bitrates;
                supp_ht = supp_ht || sband->ht_cap.ht_supported;
index ca3af4685b0a6983e05aca4f87d76929ab8b4d23..2a57cc02c6189415387cbba01585778a7ab1674a 100644 (file)
@@ -574,7 +574,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                               &elems);
 
        if (elems.ds_params && elems.ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
        else
                freq = rx_status->freq;
 
@@ -645,7 +645,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
        if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
                mesh_mpath_table_grow();
 
-       if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
+       if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
                mesh_mpp_table_grow();
 
        if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
index c9ceb4d57ab0e62ab3c250ad1e0d959ce1de06ef..cc984bd861cfacd7d463a56928b44666e66155db 100644 (file)
 #include "rate.h"
 #include "led.h"
 
-#define IEEE80211_MAX_NULLFUNC_TRIES 2
-#define IEEE80211_MAX_PROBE_TRIES 5
+static int max_nullfunc_tries = 2;
+module_param(max_nullfunc_tries, int, 0644);
+MODULE_PARM_DESC(max_nullfunc_tries,
+                "Maximum nullfunc tx tries before disconnecting (reason 4).");
+
+static int max_probe_tries = 5;
+module_param(max_probe_tries, int, 0644);
+MODULE_PARM_DESC(max_probe_tries,
+                "Maximum probe tries before disconnecting (reason 4).");
 
 /*
  * Beacon loss timeout is calculated as N frames times the
  * a probe request because of beacon loss or for
  * checking the connection still works.
  */
-#define IEEE80211_PROBE_WAIT           (HZ / 2)
+static int probe_wait_ms = 500;
+module_param(probe_wait_ms, int, 0644);
+MODULE_PARM_DESC(probe_wait_ms,
+                "Maximum time(ms) to wait for probe response"
+                " before disconnecting (reason 4).");
 
 /*
  * Weight given to the latest Beacon frame when calculating average signal
@@ -134,6 +145,9 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       if (unlikely(!sdata->u.mgd.associated))
+               return;
+
        if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
                return;
 
@@ -161,6 +175,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_supported_band *sband;
        struct sta_info *sta;
        u32 changed = 0;
+       int hti_cfreq;
        u16 ht_opmode;
        bool enable_ht = true;
        enum nl80211_channel_type prev_chantype;
@@ -174,10 +189,27 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
        if (!sband->ht_cap.ht_supported)
                enable_ht = false;
 
-       /* check that channel matches the right operating channel */
-       if (local->hw.conf.channel->center_freq !=
-           ieee80211_channel_to_frequency(hti->control_chan))
-               enable_ht = false;
+       if (enable_ht) {
+               hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
+                                                          sband->band);
+               /* check that channel matches the right operating channel */
+               if (local->hw.conf.channel->center_freq != hti_cfreq) {
+                       /* Some APs mess this up, evidently.
+                        * Netgear WNDR3700 sometimes reports 4 higher than
+                        * the actual channel, for instance.
+                        */
+                       printk(KERN_DEBUG
+                              "%s: Wrong control channel in association"
+                              " response: configured center-freq: %d"
+                              " hti-cfreq: %d  hti->control_chan: %d"
+                              " band: %d.  Disabling HT.\n",
+                              sdata->name,
+                              local->hw.conf.channel->center_freq,
+                              hti_cfreq, hti->control_chan,
+                              sband->band);
+                       enable_ht = false;
+               }
+       }
 
        if (enable_ht) {
                channel_type = NL80211_CHAN_HT20;
@@ -429,7 +461,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                container_of((void *)bss, struct cfg80211_bss, priv);
        struct ieee80211_channel *new_ch;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
+       int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num,
+                                                     cbss->channel->band);
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -600,6 +633,14 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_AP) {
+                       /* If an AP vif is found, then disable PS
+                        * by setting the count to zero thereby setting
+                        * ps_sdata to NULL.
+                        */
+                       count = 0;
+                       break;
+               }
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
                        continue;
                found = sdata;
@@ -700,9 +741,19 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
                return;
 
        if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
-           (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
+           (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
+               netif_tx_stop_all_queues(sdata->dev);
+               /*
+                * Flush all the frames queued in the driver before
+                * going to power save
+                */
+               drv_flush(local, false);
                ieee80211_send_nullfunc(local, sdata, 1);
 
+               /* Flush once again to get the tx status of nullfunc frame */
+               drv_flush(local, false);
+       }
+
        if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
              (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
            (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
@@ -710,6 +761,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
                local->hw.conf.flags |= IEEE80211_CONF_PS;
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        }
+
+       netif_tx_start_all_queues(sdata->dev);
 }
 
 void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -1033,12 +1086,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
        if (is_multicast_ether_addr(hdr->addr1))
                return;
 
-       /*
-        * In case we receive frames after disassociation.
-        */
-       if (!sdata->u.mgd.associated)
-               return;
-
        ieee80211_sta_reset_conn_monitor(sdata);
 }
 
@@ -1095,7 +1142,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        const u8 *ssid;
        u8 *dst = ifmgd->associated->bssid;
-       u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
+       u8 unicast_limit = max(1, max_probe_tries - 3);
 
        /*
         * Try sending broadcast probe requests for the last three
@@ -1121,7 +1168,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
        }
 
        ifmgd->probe_send_count++;
-       ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
+       ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
        run_again(ifmgd, ifmgd->probe_timeout);
 }
 
@@ -1222,7 +1269,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
 
        memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
 
-       printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
+       printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
+              sdata->name, bssid);
 
        ieee80211_set_disassoc(sdata, true, true);
        mutex_unlock(&ifmgd->mtx);
@@ -1525,7 +1573,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        }
 
        if (elems->ds_params && elems->ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+                                                     rx_status->band);
        else
                freq = rx_status->freq;
 
@@ -1966,9 +2015,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
 
                if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
-                       max_tries = IEEE80211_MAX_NULLFUNC_TRIES;
+                       max_tries = max_nullfunc_tries;
                else
-                       max_tries = IEEE80211_MAX_PROBE_TRIES;
+                       max_tries = max_probe_tries;
 
                /* ACK received for nullfunc probing frame */
                if (!ifmgd->probe_send_count)
@@ -1978,9 +2027,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
                                wiphy_debug(local->hw.wiphy,
                                            "%s: No ack for nullfunc frame to"
-                                           " AP %pM, try %d\n",
+                                           " AP %pM, try %d/%i\n",
                                            sdata->name, bssid,
-                                           ifmgd->probe_send_count);
+                                           ifmgd->probe_send_count, max_tries);
 #endif
                                ieee80211_mgd_probe_ap_send(sdata);
                        } else {
@@ -2000,17 +2049,17 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                    "%s: Failed to send nullfunc to AP %pM"
                                    " after %dms, disconnecting.\n",
                                    sdata->name,
-                                   bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+                                   bssid, probe_wait_ms);
 #endif
                        ieee80211_sta_connection_lost(sdata, bssid);
                } else if (ifmgd->probe_send_count < max_tries) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
                        wiphy_debug(local->hw.wiphy,
                                    "%s: No probe response from AP %pM"
-                                   " after %dms, try %d\n",
+                                   " after %dms, try %d/%i\n",
                                    sdata->name,
-                                   bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ,
-                                   ifmgd->probe_send_count);
+                                   bssid, probe_wait_ms,
+                                   ifmgd->probe_send_count, max_tries);
 #endif
                        ieee80211_mgd_probe_ap_send(sdata);
                } else {
@@ -2022,7 +2071,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                    "%s: No probe response from AP %pM"
                                    " after %dms, disconnecting.\n",
                                    sdata->name,
-                                   bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+                                   bssid, probe_wait_ms);
 
                        ieee80211_sta_connection_lost(sdata, bssid);
                }
@@ -2260,6 +2309,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        else
                wk->type = IEEE80211_WORK_DIRECT_PROBE;
        wk->chan = req->bss->channel;
+       wk->chan_type = NL80211_CHAN_NO_HT;
        wk->sdata = sdata;
        wk->done = ieee80211_probe_auth_done;
 
@@ -2409,6 +2459,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
 
        wk->chan = req->bss->channel;
+       wk->chan_type = NL80211_CHAN_NO_HT;
        wk->sdata = sdata;
        wk->done = ieee80211_assoc_done;
        if (!bss->dtim_period &&
index b4e52676f3fb9d1de2f835d5f0d41d03586d9946..13427b194ced05d961dcf32fabf0e2c02cd073d5 100644 (file)
 #include "driver-trace.h"
 
 /*
- * inform AP that we will go to sleep so that it will buffer the frames
- * while we scan
+ * Tell our hardware to disable PS.
+ * Optionally inform AP that we will go to sleep so that it will buffer
+ * the frames while we are doing off-channel work.  This is optional
+ * because we *may* be doing work on-operating channel, and want our
+ * hardware unconditionally awake, but still let the AP send us normal frames.
  */
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+                                          bool tell_ap)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -41,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        }
 
-       if (!(local->offchannel_ps_enabled) ||
-           !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+       if (tell_ap && (!local->offchannel_ps_enabled ||
+                       !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
                /*
                 * If power save was enabled, no need to send a nullfunc
                 * frame because AP knows that we are sleeping. But if the
@@ -77,6 +81,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
                 * we are sleeping, let's just enable power save mode in
                 * hardware.
                 */
+               /* TODO:  Only set hardware if CONF_PS changed?
+                * TODO:  Should we set offchannel_ps_enabled to false?
+                */
                local->hw.conf.flags |= IEEE80211_CONF_PS;
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        } else if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -95,63 +102,61 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
        ieee80211_sta_reset_conn_monitor(sdata);
 }
 
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+                                   bool offchannel_ps_enable)
 {
        struct ieee80211_sub_if_data *sdata;
 
+       /*
+        * notify the AP about us leaving the channel and stop all
+        * STA interfaces.
+        */
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
-               /* disable beaconing */
+               if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+                       set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+
+               /* Check to see if we should disable beaconing. */
                if (sdata->vif.type == NL80211_IFTYPE_AP ||
                    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
                    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                        ieee80211_bss_info_change_notify(
                                sdata, BSS_CHANGED_BEACON_ENABLED);
 
-               /*
-                * only handle non-STA interfaces here, STA interfaces
-                * are handled in ieee80211_offchannel_stop_station(),
-                * e.g., from the background scan state machine.
-                *
-                * In addition, do not stop monitor interface to allow it to be
-                * used from user space controlled off-channel operations.
-                */
-               if (sdata->vif.type != NL80211_IFTYPE_STATION &&
-                   sdata->vif.type != NL80211_IFTYPE_MONITOR) {
-                       set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+               if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
                        netif_tx_stop_all_queues(sdata->dev);
+                       if (offchannel_ps_enable &&
+                           (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+                           sdata->u.mgd.associated)
+                               ieee80211_offchannel_ps_enable(sdata, true);
                }
        }
        mutex_unlock(&local->iflist_mtx);
 }
 
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+                                       bool tell_ap)
 {
        struct ieee80211_sub_if_data *sdata;
 
-       /*
-        * notify the AP about us leaving the channel and stop all STA interfaces
-        */
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
-               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-                       set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
-                       netif_tx_stop_all_queues(sdata->dev);
-                       if (sdata->u.mgd.associated)
-                               ieee80211_offchannel_ps_enable(sdata);
-               }
+               if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+                   sdata->u.mgd.associated)
+                       ieee80211_offchannel_ps_enable(sdata, tell_ap);
        }
        mutex_unlock(&local->iflist_mtx);
 }
 
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing)
+                                bool enable_beaconing,
+                                bool offchannel_ps_disable)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -161,7 +166,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        continue;
 
                /* Tell AP we're back */
-               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+               if (offchannel_ps_disable &&
+                   sdata->vif.type == NL80211_IFTYPE_STATION) {
                        if (sdata->u.mgd.associated)
                                ieee80211_offchannel_ps_disable(sdata);
                }
@@ -181,7 +187,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        netif_tx_wake_all_queues(sdata->dev);
                }
 
-               /* re-enable beaconing */
+               /* Check to see if we should re-enable beaconing */
                if (enable_beaconing &&
                    (sdata->vif.type == NL80211_IFTYPE_AP ||
                     sdata->vif.type == NL80211_IFTYPE_ADHOC ||
index a6701ed87f0d1b3fc2524ec122236df726a4f1f4..5c1930ba8ebe98d1727d014a182e9057d00bced4 100644 (file)
@@ -77,7 +77,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        /* always present fields */
        len = sizeof(struct ieee80211_radiotap_header) + 9;
 
-       if (status->flag & RX_FLAG_TSFT)
+       if (status->flag & RX_FLAG_MACTIME_MPDU)
                len += 8;
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
                len += 1;
@@ -85,6 +85,9 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        if (len & 1) /* padding for RX_FLAGS if necessary */
                len++;
 
+       if (status->flag & RX_FLAG_HT) /* HT info */
+               len += 3;
+
        return len;
 }
 
@@ -120,7 +123,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        /* the order of the following fields is important */
 
        /* IEEE80211_RADIOTAP_TSFT */
-       if (status->flag & RX_FLAG_TSFT) {
+       if (status->flag & RX_FLAG_MACTIME_MPDU) {
                put_unaligned_le64(status->mactime, pos);
                rthdr->it_present |=
                        cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
@@ -139,11 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        /* IEEE80211_RADIOTAP_RATE */
        if (status->flag & RX_FLAG_HT) {
                /*
-                * TODO: add following information into radiotap header once
-                * suitable fields are defined for it:
-                * - MCS index (status->rate_idx)
-                * - HT40 (status->flag & RX_FLAG_40MHZ)
-                * - short-GI (status->flag & RX_FLAG_SHORT_GI)
+                * MCS information is a separate field in radiotap,
+                * added below.
                 */
                *pos = 0;
        } else {
@@ -193,6 +193,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
        put_unaligned_le16(rx_flags, pos);
        pos += 2;
+
+       if (status->flag & RX_FLAG_HT) {
+               rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+               *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+                        IEEE80211_RADIOTAP_MCS_HAVE_GI |
+                        IEEE80211_RADIOTAP_MCS_HAVE_BW;
+               *pos = 0;
+               if (status->flag & RX_FLAG_SHORT_GI)
+                       *pos |= IEEE80211_RADIOTAP_MCS_SGI;
+               if (status->flag & RX_FLAG_40MHZ)
+                       *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+               pos++;
+               *pos++ = status->rate_idx;
+       }
 }
 
 /*
@@ -392,16 +406,10 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
        if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
                return RX_CONTINUE;
 
-       if (test_bit(SCAN_HW_SCANNING, &local->scanning))
+       if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+           test_bit(SCAN_SW_SCANNING, &local->scanning))
                return ieee80211_scan_rx(rx->sdata, skb);
 
-       if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
-               /* drop all the other packets during a software scan anyway */
-               if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
-                       dev_kfree_skb(skb);
-               return RX_QUEUED;
-       }
-
        /* scanning finished during invoking of handlers */
        I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
        return RX_DROP_UNUSABLE;
@@ -798,7 +806,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
                                rx->local->dot11FrameDuplicateCount++;
                                rx->sta->num_duplicates++;
                        }
-                       return RX_DROP_MONITOR;
+                       return RX_DROP_UNUSABLE;
                } else
                        rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
        }
@@ -824,18 +832,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
                      ieee80211_is_pspoll(hdr->frame_control)) &&
                     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
                     rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
-                    (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
-               if ((!ieee80211_has_fromds(hdr->frame_control) &&
-                    !ieee80211_has_tods(hdr->frame_control) &&
-                    ieee80211_is_data(hdr->frame_control)) ||
-                   !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
-                       /* Drop IBSS frames and frames for other hosts
-                        * silently. */
-                       return RX_DROP_MONITOR;
-               }
-
+                    (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
                return RX_DROP_MONITOR;
-       }
 
        return RX_CONTINUE;
 }
@@ -1088,7 +1086,8 @@ static void ap_sta_ps_start(struct sta_info *sta)
 
        atomic_inc(&sdata->bss->num_sta_ps);
        set_sta_flags(sta, WLAN_STA_PS_STA);
-       drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
+       if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+               drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
        printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
               sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1117,6 +1116,27 @@ static void ap_sta_ps_end(struct sta_info *sta)
        ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+{
+       struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+       bool in_ps;
+
+       WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
+
+       /* Don't let the same PS state be set twice */
+       in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+       if ((start && in_ps) || (!start && !in_ps))
+               return -EINVAL;
+
+       if (start)
+               ap_sta_ps_start(sta_inf);
+       else
+               ap_sta_ps_end(sta_inf);
+
+       return 0;
+}
+EXPORT_SYMBOL(ieee80211_sta_ps_transition);
+
 static ieee80211_rx_result debug_noinline
 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 {
@@ -1136,14 +1156,23 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
        if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
                u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
                                                NL80211_IFTYPE_ADHOC);
-               if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
+               if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
                        sta->last_rx = jiffies;
+                       if (ieee80211_is_data(hdr->frame_control)) {
+                               sta->last_rx_rate_idx = status->rate_idx;
+                               sta->last_rx_rate_flag = status->flag;
+                       }
+               }
        } else if (!is_multicast_ether_addr(hdr->addr1)) {
                /*
                 * Mesh beacons will update last_rx when if they are found to
                 * match the current local configuration when processed.
                 */
                sta->last_rx = jiffies;
+               if (ieee80211_is_data(hdr->frame_control)) {
+                       sta->last_rx_rate_idx = status->rate_idx;
+                       sta->last_rx_rate_flag = status->flag;
+               }
        }
 
        if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -1161,7 +1190,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         * Change STA power saving mode only at the end of a frame
         * exchange sequence.
         */
-       if (!ieee80211_has_morefrags(hdr->frame_control) &&
+       if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
+           !ieee80211_has_morefrags(hdr->frame_control) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
            (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
             rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
@@ -1556,17 +1586,36 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+       bool check_port_control = false;
+       struct ethhdr *ehdr;
+       int ret;
 
        if (ieee80211_has_a4(hdr->frame_control) &&
            sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
                return -1;
 
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
+
+               if (!sdata->u.mgd.use_4addr)
+                       return -1;
+               else
+                       check_port_control = true;
+       }
+
        if (is_multicast_ether_addr(hdr->addr1) &&
-           ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
-            (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
                return -1;
 
-       return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+       ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+       if (ret < 0 || !check_port_control)
+               return ret;
+
+       ehdr = (struct ethhdr *) rx->skb->data;
+       if (ehdr->h_proto != rx->sdata->control_port_protocol)
+               return -1;
+
+       return 0;
 }
 
 /*
@@ -1893,7 +1942,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
        dev->stats.rx_bytes += rx->skb->len;
 
        if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
-           !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) {
+           !is_multicast_ether_addr(
+                   ((struct ethhdr *)rx->skb->data)->h_dest) &&
+           (!local->scanning &&
+            !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
                        mod_timer(&local->dynamic_ps_timer, jiffies +
                         msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
        }
@@ -2590,7 +2642,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                        return 0;
                if (!multicast &&
                    compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
-                       if (!(sdata->dev->flags & IFF_PROMISC))
+                       if (!(sdata->dev->flags & IFF_PROMISC) ||
+                           sdata->u.mgd.use_4addr)
                                return 0;
                        status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
                }
@@ -2639,7 +2692,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                                return 0;
                } else if (!ieee80211_bssid_match(bssid,
                                        sdata->vif.addr)) {
-                       if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
+                       if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+                           !ieee80211_is_beacon(hdr->frame_control))
                                return 0;
                        status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
                }
@@ -2692,7 +2746,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
                if (!skb) {
                        if (net_ratelimit())
                                wiphy_debug(local->hw.wiphy,
-                                       "failed to copy multicast frame for %s\n",
+                                       "failed to copy skb for %s\n",
                                        sdata->name);
                        return true;
                }
@@ -2730,7 +2784,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
                local->dot11ReceivedFragmentCount++;
 
        if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
-                    test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+                    test_bit(SCAN_SW_SCANNING, &local->scanning)))
                status->rx_flags |= IEEE80211_RX_IN_SCAN;
 
        if (ieee80211_is_mgmt(fc))
index fb274db77e3cc73a1b1d9f537966ffc0a8a0cd14..842954509925cbda1476da0155e5008421a564a5 100644 (file)
@@ -196,7 +196,8 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
 
        if (elems.ds_params && elems.ds_params_len == 1)
-               freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+               freq = ieee80211_channel_to_frequency(elems.ds_params[0],
+                                                     rx_status->band);
        else
                freq = rx_status->freq;
 
@@ -211,6 +212,14 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        if (bss)
                ieee80211_rx_bss_put(sdata->local, bss);
 
+       /* If we are on-operating-channel, and this packet is for the
+        * current channel, pass the pkt on up the stack so that
+        * the rest of the stack can make use of it.
+        */
+       if (ieee80211_cfg_on_oper_channel(sdata->local)
+           && (channel == sdata->local->oper_channel))
+               return RX_CONTINUE;
+
        dev_kfree_skb(skb);
        return RX_QUEUED;
 }
@@ -292,15 +301,35 @@ static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
                                              bool was_hw_scan)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       bool on_oper_chan;
+       bool enable_beacons = false;
+
+       mutex_lock(&local->mtx);
+       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+       WARN_ON(local->scanning & (SCAN_SW_SCANNING | SCAN_HW_SCANNING));
+
+       if (was_hw_scan || !on_oper_chan) {
+               if (WARN_ON(local->scan_channel))
+                       local->scan_channel = NULL;
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+       } else
+               /* Set power back to normal operating levels. */
+               ieee80211_hw_config(local, 0);
 
-       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
        if (!was_hw_scan) {
+               bool on_oper_chan2;
                ieee80211_configure_filter(local);
                drv_sw_scan_complete(local);
-               ieee80211_offchannel_return(local, true);
+               on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+               /* We should always be on-channel at this point. */
+               WARN_ON(!on_oper_chan2);
+               if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
+                       enable_beacons = true;
+
+               ieee80211_offchannel_return(local, enable_beacons, true);
        }
 
-       mutex_lock(&local->mtx);
        ieee80211_recalc_idle(local);
        mutex_unlock(&local->mtx);
 
@@ -340,16 +369,21 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
         */
        drv_sw_scan_start(local);
 
-       ieee80211_offchannel_stop_beaconing(local);
-
        local->leave_oper_channel_time = 0;
        local->next_scan_state = SCAN_DECISION;
        local->scan_channel_idx = 0;
 
-       drv_flush(local, false);
+       /* We always want to use off-channel PS, even if we
+        * are not really leaving oper-channel.  Don't
+        * tell the AP though, as long as we are on-channel.
+        */
+       ieee80211_offchannel_enable_all_ps(local, false);
 
        ieee80211_configure_filter(local);
 
+       /* We need to set power level at maximum rate for scanning. */
+       ieee80211_hw_config(local, 0);
+
        ieee80211_queue_delayed_work(&local->hw,
                                     &local->scan_work,
                                     IEEE80211_CHANNEL_TIME);
@@ -486,7 +520,20 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
        }
        mutex_unlock(&local->iflist_mtx);
 
-       if (local->scan_channel) {
+       next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+       if (ieee80211_cfg_on_oper_channel(local)) {
+               /* We're currently on operating channel. */
+               if (next_chan == local->oper_channel)
+                       /* We don't need to move off of operating channel. */
+                       local->next_scan_state = SCAN_SET_CHANNEL;
+               else
+                       /*
+                        * We do need to leave operating channel, as next
+                        * scan is somewhere else.
+                        */
+                       local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+       } else {
                /*
                 * we're currently scanning a different channel, let's
                 * see if we can scan another channel without interfering
@@ -502,7 +549,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
                 *
                 * Otherwise switch back to the operating channel.
                 */
-               next_chan = local->scan_req->channels[local->scan_channel_idx];
 
                bad_latency = time_after(jiffies +
                                ieee80211_scan_get_channel_time(next_chan),
@@ -520,12 +566,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
                        local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
                else
                        local->next_scan_state = SCAN_SET_CHANNEL;
-       } else {
-               /*
-                * we're on the operating channel currently, let's
-                * leave that channel now to scan another one
-                */
-               local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
        }
 
        *next_delay = 0;
@@ -534,9 +574,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
 static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
                                                    unsigned long *next_delay)
 {
-       ieee80211_offchannel_stop_station(local);
-
-       __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+       /* PS will already be in off-channel mode,
+        * we do that once at the beginning of scanning.
+        */
+       ieee80211_offchannel_stop_vifs(local, false);
 
        /*
         * What if the nullfunc frames didn't arrive?
@@ -559,15 +600,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
 {
        /* switch back to the operating channel */
        local->scan_channel = NULL;
-       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+       if (!ieee80211_cfg_on_oper_channel(local))
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        /*
-        * Only re-enable station mode interface now; beaconing will be
-        * re-enabled once the full scan has been completed.
+        * Re-enable vifs and beaconing.  Leave PS
+        * in off-channel state..will put that back
+        * on-channel at the end of scanning.
         */
-       ieee80211_offchannel_return(local, false);
-
-       __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+       ieee80211_offchannel_return(local, true, false);
 
        *next_delay = HZ / 5;
        local->next_scan_state = SCAN_DECISION;
@@ -583,8 +624,11 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
        chan = local->scan_req->channels[local->scan_channel_idx];
 
        local->scan_channel = chan;
-       if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
-               skip = 1;
+
+       /* Only call hw-config if we really need to change channels. */
+       if (chan != local->hw.conf.channel)
+               if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+                       skip = 1;
 
        /* advance state machine to next channel/band */
        local->scan_channel_idx++;
index c426504ed1cfeecddefb3589871668c02448326e..5a11078827ab25b75133ebc8e1bcd4e8bc13e793 100644 (file)
@@ -899,7 +899,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        struct ieee80211_local *local = sdata->local;
        int sent, buffered;
 
-       drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
+       if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+               drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
 
        if (!skb_queue_empty(&sta->ps_tx_buf))
                sta_info_clear_tim_bit(sta);
index bbdd2a86a94b2ad4eb0863f5a1be8a110c9c456e..57681149e37fd1cf45bb4540f9a2a2ae8fe8e68c 100644 (file)
@@ -82,6 +82,7 @@ enum ieee80211_sta_info_flags {
  * @state: session state (see above)
  * @stop_initiator: initiator of a session stop
  * @tx_stop: TX DelBA frame when stopping
+ * @buf_size: reorder buffer size at receiver
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -101,6 +102,7 @@ struct tid_ampdu_tx {
        u8 dialog_token;
        u8 stop_initiator;
        bool tx_stop;
+       u8 buf_size;
 };
 
 /**
@@ -207,6 +209,8 @@ enum plink_state {
  * @rate_ctrl_priv: rate control private per-STA pointer
  * @last_tx_rate: rate used for last transmit, to report to userspace as
  *     "the" transmit rate
+ * @last_rx_rate_idx: rx status rate index of the last data packet
+ * @last_rx_rate_flag: rx status flag of the last data packet
  * @lock: used for locking all fields that require locking, see comments
  *     in the header file.
  * @flaglock: spinlock for flags accesses
@@ -309,6 +313,8 @@ struct sta_info {
        unsigned long tx_bytes;
        unsigned long tx_fragments;
        struct ieee80211_tx_rate last_tx_rate;
+       int last_rx_rate_idx;
+       int last_rx_rate_flag;
        u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
 
        /*
index 071ac95c4aa0faf64a3dcfb1c054d60b2af958fb..b936dd29e92bc7bb2d73f9b75eaec55077d5b55a 100644 (file)
@@ -98,6 +98,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
         *  (b) always process RX events before TX status events if ordering
         *      can be unknown, for example with different interrupt status
         *      bits.
+        *  (c) if PS mode transitions are manual (i.e. the flag
+        *      %IEEE80211_HW_AP_LINK_PS is set), always process PS state
+        *      changes before calling TX status events if ordering can be
+        *      unknown.
         */
        if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
            skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
@@ -314,8 +318,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                if (info->flags & IEEE80211_TX_STAT_ACK) {
                        local->ps_sdata->u.mgd.flags |=
                                        IEEE80211_STA_NULLFUNC_ACKED;
-                       ieee80211_queue_work(&local->hw,
-                                       &local->dynamic_ps_enable_work);
                } else
                        mod_timer(&local->dynamic_ps_timer, jiffies +
                                        msecs_to_jiffies(10));
@@ -339,6 +341,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                        cookie = local->hw_roc_cookie ^ 2;
                        local->hw_roc_skb_for_status = NULL;
                }
+
+               if (cookie == local->hw_offchan_tx_cookie)
+                       local->hw_offchan_tx_cookie = 0;
+
                cfg80211_mgmt_tx_status(
                        skb->dev, cookie, skb->data, skb->len,
                        !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
index b0beaa58246bbfe9019ccde998b8ba2043e38ccd..081dcaf6577be41ff6444101b1ec2ce87ff7f349 100644 (file)
 #include "wme.h"
 #include "rate.h"
 
-#define IEEE80211_TX_OK                0
-#define IEEE80211_TX_AGAIN     1
-#define IEEE80211_TX_PENDING   2
-
 /* misc utils */
 
 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
@@ -236,6 +232,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (local->hw.conf.flags & IEEE80211_CONF_PS) {
                ieee80211_stop_queues_by_reason(&local->hw,
                                                IEEE80211_QUEUE_STOP_REASON_PS);
+               ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
                ieee80211_queue_work(&local->hw,
                                     &local->dynamic_ps_disable_work);
        }
@@ -257,7 +254,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
        if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
                return TX_CONTINUE;
 
-       if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
+       if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
+           test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
            !ieee80211_is_probe_req(hdr->frame_control) &&
            !ieee80211_is_nullfunc(hdr->frame_control))
                /*
@@ -1283,16 +1281,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        return TX_CONTINUE;
 }
 
-static int __ieee80211_tx(struct ieee80211_local *local,
-                         struct sk_buff **skbp,
-                         struct sta_info *sta,
-                         bool txpending)
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
+                          struct sta_info *sta, bool txpending)
 {
        struct sk_buff *skb = *skbp, *next;
        struct ieee80211_tx_info *info;
        struct ieee80211_sub_if_data *sdata;
        unsigned long flags;
-       int ret, len;
+       int len;
        bool fragm = false;
 
        while (skb) {
@@ -1300,13 +1299,37 @@ static int __ieee80211_tx(struct ieee80211_local *local,
                __le16 fc;
 
                spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-               ret = IEEE80211_TX_OK;
                if (local->queue_stop_reasons[q] ||
-                   (!txpending && !skb_queue_empty(&local->pending[q])))
-                       ret = IEEE80211_TX_PENDING;
+                   (!txpending && !skb_queue_empty(&local->pending[q]))) {
+                       /*
+                        * Since queue is stopped, queue up frames for later
+                        * transmission from the tx-pending tasklet when the
+                        * queue is woken again.
+                        */
+
+                       do {
+                               next = skb->next;
+                               skb->next = NULL;
+                               /*
+                                * NB: If txpending is true, next must already
+                                * be NULL since we must've gone through this
+                                * loop before already; therefore we can just
+                                * queue the frame to the head without worrying
+                                * about reordering of fragments.
+                                */
+                               if (unlikely(txpending))
+                                       __skb_queue_head(&local->pending[q],
+                                                        skb);
+                               else
+                                       __skb_queue_tail(&local->pending[q],
+                                                        skb);
+                       } while ((skb = next));
+
+                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+                                              flags);
+                       return false;
+               }
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-               if (ret != IEEE80211_TX_OK)
-                       return ret;
 
                info = IEEE80211_SKB_CB(skb);
 
@@ -1341,15 +1364,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
                        info->control.sta = NULL;
 
                fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
-               ret = drv_tx(local, skb);
-               if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
-                       dev_kfree_skb(skb);
-                       ret = NETDEV_TX_OK;
-               }
-               if (ret != NETDEV_TX_OK) {
-                       info->control.vif = &sdata->vif;
-                       return IEEE80211_TX_AGAIN;
-               }
+               drv_tx(local, skb);
 
                ieee80211_tpt_led_trig_tx(local, fc, len);
                *skbp = skb = next;
@@ -1357,7 +1372,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
                fragm = true;
        }
 
-       return IEEE80211_TX_OK;
+       return true;
 }
 
 /*
@@ -1394,7 +1409,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
        /* handlers after fragment must be aware of tx info fragmentation! */
        CALL_TXH(ieee80211_tx_h_stats);
        CALL_TXH(ieee80211_tx_h_encrypt);
-       CALL_TXH(ieee80211_tx_h_calculate_duration);
+       if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+               CALL_TXH(ieee80211_tx_h_calculate_duration);
 #undef CALL_TXH
 
  txh_done:
@@ -1416,23 +1432,24 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
        return 0;
 }
 
-static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                         struct sk_buff *skb, bool txpending)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_data tx;
        ieee80211_tx_result res_prepare;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct sk_buff *next;
-       unsigned long flags;
-       int ret, retries;
        u16 queue;
+       bool result = true;
 
        queue = skb_get_queue_mapping(skb);
 
        if (unlikely(skb->len < 10)) {
                dev_kfree_skb(skb);
-               return;
+               return true;
        }
 
        rcu_read_lock();
@@ -1442,85 +1459,19 @@ static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
 
        if (unlikely(res_prepare == TX_DROP)) {
                dev_kfree_skb(skb);
-               rcu_read_unlock();
-               return;
+               goto out;
        } else if (unlikely(res_prepare == TX_QUEUED)) {
-               rcu_read_unlock();
-               return;
+               goto out;
        }
 
        tx.channel = local->hw.conf.channel;
        info->band = tx.channel->band;
 
-       if (invoke_tx_handlers(&tx))
-               goto out;
-
-       retries = 0;
- retry:
-       ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
-       switch (ret) {
-       case IEEE80211_TX_OK:
-               break;
-       case IEEE80211_TX_AGAIN:
-               /*
-                * Since there are no fragmented frames on A-MPDU
-                * queues, there's no reason for a driver to reject
-                * a frame there, warn and drop it.
-                */
-               if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
-                       goto drop;
-               /* fall through */
-       case IEEE80211_TX_PENDING:
-               skb = tx.skb;
-
-               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-
-               if (local->queue_stop_reasons[queue] ||
-                   !skb_queue_empty(&local->pending[queue])) {
-                       /*
-                        * if queue is stopped, queue up frames for later
-                        * transmission from the tasklet
-                        */
-                       do {
-                               next = skb->next;
-                               skb->next = NULL;
-                               if (unlikely(txpending))
-                                       __skb_queue_head(&local->pending[queue],
-                                                        skb);
-                               else
-                                       __skb_queue_tail(&local->pending[queue],
-                                                        skb);
-                       } while ((skb = next));
-
-                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
-                                              flags);
-               } else {
-                       /*
-                        * otherwise retry, but this is a race condition or
-                        * a driver bug (which we warn about if it persists)
-                        */
-                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
-                                              flags);
-
-                       retries++;
-                       if (WARN(retries > 10, "tx refused but queue active\n"))
-                               goto drop;
-                       goto retry;
-               }
-       }
+       if (!invoke_tx_handlers(&tx))
+               result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
  out:
        rcu_read_unlock();
-       return;
-
- drop:
-       rcu_read_unlock();
-
-       skb = tx.skb;
-       while (skb) {
-               next = skb->next;
-               dev_kfree_skb(skb);
-               skb = next;
-       }
+       return result;
 }
 
 /* device xmit handlers */
@@ -1750,7 +1701,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        __le16 fc;
        struct ieee80211_hdr hdr;
        struct ieee80211s_hdr mesh_hdr __maybe_unused;
-       struct mesh_path *mppath = NULL;
+       struct mesh_path __maybe_unused *mppath = NULL;
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
@@ -1815,19 +1766,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        mppath = mpp_path_lookup(skb->data, sdata);
 
                /*
-                * Do not use address extension, if it is a packet from
-                * the same interface and the destination is not being
-                * proxied by any other mest point.
+                * Use address extension if it is a packet from
+                * another interface or if we know the destination
+                * is being proxied by a portal (i.e. portal address
+                * differs from proxied address)
                 */
                if (compare_ether_addr(sdata->vif.addr,
                                       skb->data + ETH_ALEN) == 0 &&
-                   (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) {
+                   !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
                                        skb->data, skb->data + ETH_ALEN);
                        meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
                                        sdata, NULL, NULL);
                } else {
-                       /* packet from other interface */
                        int is_mesh_mcast = 1;
                        const u8 *mesh_da;
 
@@ -2067,6 +2018,11 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local)
                skb_queue_purge(&local->pending[i]);
 }
 
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead,
+ * which in this case means re-queued -- take as an indication to stop sending
+ * more pending frames.
+ */
 static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
                                     struct sk_buff *skb)
 {
@@ -2074,20 +2030,17 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
        struct ieee80211_sub_if_data *sdata;
        struct sta_info *sta;
        struct ieee80211_hdr *hdr;
-       int ret;
-       bool result = true;
+       bool result;
 
        sdata = vif_to_sdata(info->control.vif);
 
        if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
-               ieee80211_tx(sdata, skb, true);
+               result = ieee80211_tx(sdata, skb, true);
        } else {
                hdr = (struct ieee80211_hdr *)skb->data;
                sta = sta_info_get(sdata, hdr->addr1);
 
-               ret = __ieee80211_tx(local, &skb, sta, true);
-               if (ret != IEEE80211_TX_OK)
-                       result = false;
+               result = __ieee80211_tx(local, &skb, sta, true);
        }
 
        return result;
@@ -2129,8 +2082,6 @@ void ieee80211_tx_pending(unsigned long data)
                                                flags);
 
                        txok = ieee80211_tx_pending_skb(local, skb);
-                       if (!txok)
-                               __skb_queue_head(&local->pending[i], skb);
                        spin_lock_irqsave(&local->queue_stop_reason_lock,
                                          flags);
                        if (!txok)
@@ -2178,6 +2129,8 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
        if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
                aid0 = 1;
 
+       bss->dtim_bc_mc = aid0 == 1;
+
        if (have_bits) {
                /* Find largest even number N1 so that bits numbered 1 through
                 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
@@ -2241,7 +2194,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                ap = &sdata->u.ap;
                beacon = rcu_dereference(ap->beacon);
-               if (ap && beacon) {
+               if (beacon) {
                        /*
                         * headroom, head length,
                         * tail length and maximum TIM length
@@ -2302,6 +2255,11 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                struct ieee80211_mgmt *mgmt;
                u8 *pos;
 
+#ifdef CONFIG_MAC80211_MESH
+               if (!sdata->u.mesh.mesh_id_len)
+                       goto out;
+#endif
+
                /* headroom, head length, tail length and maximum TIM length */
                skb = dev_alloc_skb(local->tx_headroom + 400 +
                                sdata->u.mesh.vendor_ie_len);
@@ -2543,7 +2501,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
        if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
                goto out;
 
-       if (bss->dtim_count != 0)
+       if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
                goto out; /* send buffered bc/mc only after DTIM beacon */
 
        while (1) {
index d036597aabbed9e7658f6ff26b3ee8afbdf9e532..556647a910acc8ea96d7d6d2e1a4a56e063a2c55 100644 (file)
@@ -986,12 +986,6 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                u16 cap = sband->ht_cap.cap;
                __le16 tmp;
 
-               if (ieee80211_disable_40mhz_24ghz &&
-                   sband->band == IEEE80211_BAND_2GHZ) {
-                       cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-                       cap &= ~IEEE80211_HT_CAP_SGI_40;
-               }
-
                *pos++ = WLAN_EID_HT_CAPABILITY;
                *pos++ = sizeof(struct ieee80211_ht_cap);
                memset(pos, 0, sizeof(struct ieee80211_ht_cap));
index 36305e0d06ef409dd9943c3a9364587d7136aa0f..204f0a4db96911c41df34a209fa7ead03f4aa511 100644 (file)
@@ -126,12 +126,6 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
 
        /* determine capability flags */
 
-       if (ieee80211_disable_40mhz_24ghz &&
-           sband->band == IEEE80211_BAND_2GHZ) {
-               cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-               cap &= ~IEEE80211_HT_CAP_SGI_40;
-       }
-
        switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
        case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
                if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
@@ -874,6 +868,44 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
        kfree_skb(skb);
 }
 
+static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
+                                      enum nl80211_channel_type oper_ct)
+{
+       switch (wk_ct) {
+       case NL80211_CHAN_NO_HT:
+               return true;
+       case NL80211_CHAN_HT20:
+               if (oper_ct != NL80211_CHAN_NO_HT)
+                       return true;
+               return false;
+       case NL80211_CHAN_HT40MINUS:
+       case NL80211_CHAN_HT40PLUS:
+               return (wk_ct == oper_ct);
+       }
+       WARN_ON(1); /* shouldn't get here */
+       return false;
+}
+
+static enum nl80211_channel_type
+ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
+                 enum nl80211_channel_type oper_ct)
+{
+       switch (wk_ct) {
+       case NL80211_CHAN_NO_HT:
+               return oper_ct;
+       case NL80211_CHAN_HT20:
+               if (oper_ct != NL80211_CHAN_NO_HT)
+                       return oper_ct;
+               return wk_ct;
+       case NL80211_CHAN_HT40MINUS:
+       case NL80211_CHAN_HT40PLUS:
+               return wk_ct;
+       }
+       WARN_ON(1); /* shouldn't get here */
+       return wk_ct;
+}
+
+
 static void ieee80211_work_timer(unsigned long data)
 {
        struct ieee80211_local *local = (void *) data;
@@ -924,18 +956,52 @@ static void ieee80211_work_work(struct work_struct *work)
                }
 
                if (!started && !local->tmp_channel) {
+                       bool on_oper_chan;
+                       bool tmp_chan_changed = false;
+                       bool on_oper_chan2;
+                       enum nl80211_channel_type wk_ct;
+                       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+                       /* Work with existing channel type if possible. */
+                       wk_ct = wk->chan_type;
+                       if (wk->chan == local->hw.conf.channel)
+                               wk_ct = ieee80211_calc_ct(wk->chan_type,
+                                               local->hw.conf.channel_type);
+
+                       if (local->tmp_channel)
+                               if ((local->tmp_channel != wk->chan) ||
+                                   (local->tmp_channel_type != wk_ct))
+                                       tmp_chan_changed = true;
+
+                       local->tmp_channel = wk->chan;
+                       local->tmp_channel_type = wk_ct;
                        /*
-                        * TODO: could optimize this by leaving the
-                        *       station vifs in awake mode if they
-                        *       happen to be on the same channel as
-                        *       the requested channel
+                        * Leave the station vifs in awake mode if they
+                        * happen to be on the same channel as
+                        * the requested channel.
                         */
-                       ieee80211_offchannel_stop_beaconing(local);
-                       ieee80211_offchannel_stop_station(local);
+                       on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+                       if (on_oper_chan != on_oper_chan2) {
+                               if (on_oper_chan2) {
+                                       /* going off oper channel, PS too */
+                                       ieee80211_offchannel_stop_vifs(local,
+                                                                      true);
+                                       ieee80211_hw_config(local, 0);
+                               } else {
+                                       /* going on channel, but leave PS
+                                        * off-channel. */
+                                       ieee80211_hw_config(local, 0);
+                                       ieee80211_offchannel_return(local,
+                                                                   true,
+                                                                   false);
+                               }
+                       } else if (tmp_chan_changed)
+                               /* Still off-channel, but on some other
+                                * channel, so update hardware.
+                                * PS should already be off-channel.
+                                */
+                               ieee80211_hw_config(local, 0);
 
-                       local->tmp_channel = wk->chan;
-                       local->tmp_channel_type = wk->chan_type;
-                       ieee80211_hw_config(local, 0);
                        started = true;
                        wk->timeout = jiffies;
                }
@@ -1005,15 +1071,34 @@ static void ieee80211_work_work(struct work_struct *work)
                        continue;
                if (wk->chan != local->tmp_channel)
                        continue;
-               if (wk->chan_type != local->tmp_channel_type)
+               if (ieee80211_work_ct_coexists(wk->chan_type,
+                                              local->tmp_channel_type))
                        continue;
                remain_off_channel = true;
        }
 
        if (!remain_off_channel && local->tmp_channel) {
+               bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
                local->tmp_channel = NULL;
-               ieee80211_hw_config(local, 0);
-               ieee80211_offchannel_return(local, true);
+               /* If tmp_channel wasn't operating channel, then
+                * we need to go back on-channel.
+                * NOTE:  If we can ever be here while scannning,
+                * or if the hw_config() channel config logic changes,
+                * then we may need to do a more thorough check to see if
+                * we still need to do a hardware config.  Currently,
+                * we cannot be here while scanning, however.
+                */
+               if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+                       ieee80211_hw_config(local, 0);
+
+               /* At the least, we need to disable offchannel_ps,
+                * so just go ahead and run the entire offchannel
+                * return logic here.  We *could* skip enabling
+                * beaconing if we were already on-oper-channel
+                * as a future optimization.
+                */
+               ieee80211_offchannel_return(local, true, true);
+
                /* give connection some time to breathe */
                run_again(local, jiffies + HZ/2);
        }
index bee230d8fd1168849c14c6dfaee26b5b696ed220..f1765de2f4bf7a42e06794b8155a79d6bb52b694 100644 (file)
 ieee80211_tx_result
 ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 {
-       u8 *data, *key, *mic, key_offset;
+       u8 *data, *key, *mic;
        size_t data_len;
        unsigned int hdrlen;
        struct ieee80211_hdr *hdr;
        struct sk_buff *skb = tx->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       int authenticator;
        int tail;
 
        hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,6 +46,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
        data = skb->data + hdrlen;
        data_len = skb->len - hdrlen;
 
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) {
+               /* Need to use software crypto for the test */
+               info->control.hw_key = NULL;
+       }
+
        if (info->control.hw_key &&
            !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
            !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
@@ -62,17 +66,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
                    skb_headroom(skb) < TKIP_IV_LEN))
                return TX_DROP;
 
-#if 0
-       authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
-#else
-       authenticator = 1;
-#endif
-       key_offset = authenticator ?
-               NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY :
-               NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
-       key = &tx->key->conf.key[key_offset];
+       key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
        mic = skb_put(skb, MICHAEL_MIC_LEN);
        michael_mic(key, hdr, data, data_len, mic);
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
+               mic[0]++;
 
        return TX_CONTINUE;
 }
@@ -81,14 +79,13 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 ieee80211_rx_result
 ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 {
-       u8 *data, *key = NULL, key_offset;
+       u8 *data, *key = NULL;
        size_t data_len;
        unsigned int hdrlen;
        u8 mic[MICHAEL_MIC_LEN];
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       int authenticator = 1, wpa_test = 0;
 
        /* No way to verify the MIC if the hardware stripped it */
        if (status->flag & RX_FLAG_MMIC_STRIPPED)
@@ -106,17 +103,9 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
        data = skb->data + hdrlen;
        data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
 
-#if 0
-       authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */
-#else
-       authenticator = 1;
-#endif
-       key_offset = authenticator ?
-               NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY :
-               NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
-       key = &rx->key->conf.key[key_offset];
+       key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
        michael_mic(key, hdr, data, data_len, mic);
-       if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
+       if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) {
                if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
                        return RX_DROP_UNUSABLE;
 
@@ -208,7 +197,7 @@ ieee80211_rx_result
 ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
-       int hdrlen, res, hwaccel = 0, wpa_test = 0;
+       int hdrlen, res, hwaccel = 0;
        struct ieee80211_key *key = rx->key;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -235,7 +224,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
                                          hdr->addr1, hwaccel, rx->queue,
                                          &rx->tkip_iv32,
                                          &rx->tkip_iv16);
-       if (res != TKIP_DECRYPT_OK || wpa_test)
+       if (res != TKIP_DECRYPT_OK)
                return RX_DROP_UNUSABLE;
 
        /* Trim ICV */
index 1534f2b44cafbf8d465d55229f3ee807cf5f6fb2..82a6e0d80f058dcbdc25863c97fb2d878836024b 100644 (file)
@@ -85,6 +85,17 @@ config NF_CONNTRACK_EVENTS
 
          If unsure, say `N'.
 
+config NF_CONNTRACK_TIMESTAMP
+       bool  'Connection tracking timestamping'
+       depends on NETFILTER_ADVANCED
+       help
+         This option enables support for connection tracking timestamping.
+         This allows you to store the flow start-time and to obtain
+         the flow-stop time (once it has been destroyed) via Connection
+         tracking events.
+
+         If unsure, say `N'.
+
 config NF_CT_PROTO_DCCP
        tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
        depends on EXPERIMENTAL
@@ -185,9 +196,13 @@ config NF_CONNTRACK_IRC
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_BROADCAST
+       tristate
+
 config NF_CONNTRACK_NETBIOS_NS
        tristate "NetBIOS name service protocol support"
        depends on NETFILTER_ADVANCED
+       select NF_CONNTRACK_BROADCAST
        help
          NetBIOS name service requests are sent as broadcast messages from an
          unprivileged port and responded to with unicast messages to the
@@ -204,6 +219,21 @@ config NF_CONNTRACK_NETBIOS_NS
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_SNMP
+       tristate "SNMP service protocol support"
+       depends on NETFILTER_ADVANCED
+       select NF_CONNTRACK_BROADCAST
+       help
+         SNMP service requests are sent as broadcast messages from an
+         unprivileged port and responded to with unicast messages to the
+         same port. This make them hard to firewall properly because connection
+         tracking doesn't deal with broadcasts. This helper tracks locally
+         originating SNMP service requests and the corresponding
+         responses. It relies on correct IP address configuration, specifically
+         netmask and broadcast address.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NF_CONNTRACK_PPTP
        tristate "PPtP protocol support"
        depends on NETFILTER_ADVANCED
@@ -322,10 +352,32 @@ config NETFILTER_XT_CONNMARK
        ctmark), similarly to the packet mark (nfmark). Using this
        target and match, you can set and match on this mark.
 
+config NETFILTER_XT_SET
+       tristate 'set target and match support'
+       depends on IP_SET
+       depends on NETFILTER_ADVANCED
+       help
+         This option adds the "SET" target and "set" match.
+
+         Using this target and match, you can add/delete and match
+         elements in the sets created by ipset(8).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 # alphabetically ordered list of targets
 
 comment "Xtables targets"
 
+config NETFILTER_XT_TARGET_AUDIT
+       tristate "AUDIT target support"
+       depends on AUDIT
+       depends on NETFILTER_ADVANCED
+       ---help---
+         This option adds a 'AUDIT' target, which can be used to create
+         audit records for packets dropped/accepted.
+
+         To compileit as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_CHECKSUM
        tristate "CHECKSUM target support"
        depends on IP_NF_MANGLE || IP6_NF_MANGLE
@@ -477,6 +529,7 @@ config NETFILTER_XT_TARGET_NFLOG
 config NETFILTER_XT_TARGET_NFQUEUE
        tristate '"NFQUEUE" target Support'
        depends on NETFILTER_ADVANCED
+       select NETFILTER_NETLINK_QUEUE
        help
          This target replaced the old obsolete QUEUE target.
 
@@ -685,6 +738,15 @@ config NETFILTER_XT_MATCH_DCCP
          If you want to compile it as a module, say M here and read
          <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_DEVGROUP
+       tristate '"devgroup" match support'
+       depends on NETFILTER_ADVANCED
+       help
+         This options adds a `devgroup' match, which allows to match on the
+         device group a network device is assigned to.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_DSCP
        tristate '"dscp" and "tos" match support'
        depends on NETFILTER_ADVANCED
@@ -886,7 +948,7 @@ config NETFILTER_XT_MATCH_RATEEST
 config NETFILTER_XT_MATCH_REALM
        tristate  '"realm" match support'
        depends on NETFILTER_ADVANCED
-       select NET_CLS_ROUTE
+       select IP_ROUTE_CLASSID
        help
          This option adds a `realm' match, which allows you to use the realm
          key from the routing subsystem inside iptables.
@@ -1011,4 +1073,6 @@ endif # NETFILTER_XTABLES
 
 endmenu
 
+source "net/netfilter/ipset/Kconfig"
+
 source "net/netfilter/ipvs/Kconfig"
index 441050f31111ae92ddda5db747d99afe8dad6293..d57a890eaee51d036cc59efa16dc7621704501e7 100644 (file)
@@ -1,6 +1,7 @@
 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
 
 nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
 nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
 
 obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -28,7 +29,9 @@ obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o
 obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
 obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
 obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o
 obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o
 obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
 obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
 obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
@@ -43,8 +46,10 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 # combos
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
 
 # targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -72,6 +77,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
@@ -101,5 +107,8 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
 
+# ipset
+obj-$(CONFIG_IP_SET) += ipset/
+
 # IPVS
 obj-$(CONFIG_IP_VS) += ipvs/
index 4aa614b8a96aeab391938aa18047156fcb03037d..899b71c0ff5d0a4c8004e5c8a3cef27aa3a173cc 100644 (file)
@@ -176,13 +176,21 @@ next_hook:
                ret = 1;
        } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
                kfree_skb(skb);
-               ret = -(verdict >> NF_VERDICT_BITS);
+               ret = NF_DROP_GETERR(verdict);
                if (ret == 0)
                        ret = -EPERM;
        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
-               if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
-                             verdict >> NF_VERDICT_BITS))
-                       goto next_hook;
+               ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+                              verdict >> NF_VERDICT_QBITS);
+               if (ret < 0) {
+                       if (ret == -ECANCELED)
+                               goto next_hook;
+                       if (ret == -ESRCH &&
+                          (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+                               goto next_hook;
+                       kfree_skb(skb);
+               }
+               ret = 0;
        }
        rcu_read_unlock();
        return ret;
@@ -215,7 +223,7 @@ EXPORT_SYMBOL(skb_make_writable);
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
 EXPORT_SYMBOL(ip_ct_attach);
 
 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
@@ -232,7 +240,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(nf_ct_attach);
 
-void (*nf_ct_destroy)(struct nf_conntrack *);
+void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
 EXPORT_SYMBOL(nf_ct_destroy);
 
 void nf_conntrack_destroy(struct nf_conntrack *nfct)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
new file mode 100644 (file)
index 0000000..2c5b348
--- /dev/null
@@ -0,0 +1,122 @@
+menuconfig IP_SET
+       tristate "IP set support"
+       depends on INET && NETFILTER
+       depends on NETFILTER_NETLINK
+       help
+         This option adds IP set support to the kernel.
+         In order to define and use the sets, you need the userspace utility
+         ipset(8). You can use the sets in netfilter via the "set" match
+         and "SET" target.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+if IP_SET
+
+config IP_SET_MAX
+       int "Maximum number of IP sets"
+       default 256
+       range 2 65534
+       depends on IP_SET
+       help
+         You can define here default value of the maximum number 
+         of IP sets for the kernel.
+
+         The value can be overriden by the 'max_sets' module
+         parameter of the 'ip_set' module.
+
+config IP_SET_BITMAP_IP
+       tristate "bitmap:ip set support"
+       depends on IP_SET
+       help
+         This option adds the bitmap:ip set type support, by which one
+         can store IPv4 addresses (or network addresse) from a range.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_BITMAP_IPMAC
+       tristate "bitmap:ip,mac set support"
+       depends on IP_SET
+       help
+         This option adds the bitmap:ip,mac set type support, by which one
+         can store IPv4 address and (source) MAC address pairs from a range.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_BITMAP_PORT
+       tristate "bitmap:port set support"
+       depends on IP_SET
+       help
+         This option adds the bitmap:port set type support, by which one
+         can store TCP/UDP port numbers from a range.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IP
+       tristate "hash:ip set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip set type support, by which one
+         can store arbitrary IPv4 or IPv6 addresses (or network addresses)
+         in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORT
+       tristate "hash:ip,port set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,port set type support, by which one
+         can store IPv4/IPv6 address and protocol/port pairs.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORTIP
+       tristate "hash:ip,port,ip set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,port,ip set type support, by which
+         one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+         address triples in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORTNET
+       tristate "hash:ip,port,net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,port,net set type support, by which
+         one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+         network address/prefix triples in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_NET
+       tristate "hash:net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net set type support, by which
+         one can store IPv4/IPv6 network address/prefix elements in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_NETPORT
+       tristate "hash:net,port set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net,port set type support, by which
+         one can store IPv4/IPv6 network address/prefix and
+         protocol/port pairs as elements in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_LIST_SET
+       tristate "list:set set support"
+       depends on IP_SET
+       help
+         This option adds the list:set set type support. In this
+         kind of set one can store the name of other sets and it forms
+         an ordered union of the member sets.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+endif # IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
new file mode 100644 (file)
index 0000000..5adbdab
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Makefile for the ipset modules
+#
+
+ip_set-y := ip_set_core.o ip_set_getport.o pfxlen.o
+
+# ipset core
+obj-$(CONFIG_IP_SET) += ip_set.o
+
+# bitmap types
+obj-$(CONFIG_IP_SET_BITMAP_IP) += ip_set_bitmap_ip.o
+obj-$(CONFIG_IP_SET_BITMAP_IPMAC) += ip_set_bitmap_ipmac.o
+obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
+
+# hash types
+obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
+obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+
+# list types
+obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
new file mode 100644 (file)
index 0000000..bca9699
--- /dev/null
@@ -0,0 +1,587 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip");
+
+/* Type structure */
+struct bitmap_ip {
+       void *members;          /* the set members */
+       u32 first_ip;           /* host byte order, included in range */
+       u32 last_ip;            /* host byte order, included in range */
+       u32 elements;           /* number of max elements in the set */
+       u32 hosts;              /* number of hosts in a subnet */
+       size_t memsize;         /* members size */
+       u8 netmask;             /* subnet netmask */
+       u32 timeout;            /* timeout parameter */
+       struct timer_list gc;   /* garbage collection */
+};
+
+/* Base variant */
+
+static inline u32
+ip_to_id(const struct bitmap_ip *m, u32 ip)
+{
+       return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+}
+
+static int
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ip *map = set->data;
+       u16 id = *(u16 *)value;
+
+       return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (test_and_set_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (!test_and_clear_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_ip_list(const struct ip_set *set,
+              struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ip *map = set->data;
+       struct nlattr *atd, *nested;
+       u32 id, first = cb->args[2];
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] < map->elements; cb->args[2]++) {
+               id = cb->args[2];
+               if (!test_bit(id, map->members))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id * map->hosts));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ip *map = set->data;
+       const unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       if (ip_set_timeout_test(members[id]))
+               return -IPSET_ERR_EXIST;
+
+       members[id] = ip_set_timeout_set(timeout);
+
+       return 0;
+}
+
+static int
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ip *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+       int ret = -IPSET_ERR_EXIST;
+
+       if (ip_set_timeout_test(members[id]))
+               ret = 0;
+
+       members[id] = IPSET_ELEM_UNSET;
+       return ret;
+}
+
+static int
+bitmap_ip_tlist(const struct ip_set *set,
+               struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ip *map = set->data;
+       struct nlattr *adt, *nested;
+       u32 id, first = cb->args[2];
+       const unsigned long *members = map->members;
+
+       adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!adt)
+               return -EMSGSIZE;
+       for (; cb->args[2] < map->elements; cb->args[2]++) {
+               id = cb->args[2];
+               if (!ip_set_timeout_test(members[id]))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, adt);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id * map->hosts));
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                             htonl(ip_set_timeout_get(members[id])));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, adt);
+
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, adt);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static int
+bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
+              enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct bitmap_ip *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 ip;
+
+       ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+       if (ip < map->first_ip || ip > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       ip = ip_to_id(map, ip);
+
+       return adtfn(set, &ip, map->timeout);
+}
+
+static int
+bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       struct bitmap_ip *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 timeout = map->timeout;
+       u32 ip, ip_to, id;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       if (ip < map->first_ip || ip > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(map->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST) {
+               id = ip_to_id(map, ip);
+               return adtfn(set, &id, timeout);
+       }
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to) {
+                       swap(ip, ip_to);
+                       if (ip < map->first_ip)
+                               return -IPSET_ERR_BITMAP_RANGE;
+               }
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       if (ip_to > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       for (; !before(ip_to, ip); ip += map->hosts) {
+               id = ip_to_id(map, ip);
+               ret = adtfn(set, &id, timeout);;
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static void
+bitmap_ip_destroy(struct ip_set *set)
+{
+       struct bitmap_ip *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+
+       ip_set_free(map->members);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static void
+bitmap_ip_flush(struct ip_set *set)
+{
+       struct bitmap_ip *map = set->data;
+
+       memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct bitmap_ip *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+       if (map->netmask != 32)
+               NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map) + map->memsize));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static bool
+bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct bitmap_ip *x = a->data;
+       const struct bitmap_ip *y = b->data;
+
+       return x->first_ip == y->first_ip &&
+              x->last_ip == y->last_ip &&
+              x->netmask == y->netmask &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ip = {
+       .kadt   = bitmap_ip_kadt,
+       .uadt   = bitmap_ip_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ip_add,
+               [IPSET_DEL] = bitmap_ip_del,
+               [IPSET_TEST] = bitmap_ip_test,
+       },
+       .destroy = bitmap_ip_destroy,
+       .flush  = bitmap_ip_flush,
+       .head   = bitmap_ip_head,
+       .list   = bitmap_ip_list,
+       .same_set = bitmap_ip_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tip = {
+       .kadt   = bitmap_ip_kadt,
+       .uadt   = bitmap_ip_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ip_tadd,
+               [IPSET_DEL] = bitmap_ip_tdel,
+               [IPSET_TEST] = bitmap_ip_ttest,
+       },
+       .destroy = bitmap_ip_destroy,
+       .flush  = bitmap_ip_flush,
+       .head   = bitmap_ip_head,
+       .list   = bitmap_ip_tlist,
+       .same_set = bitmap_ip_same_set,
+};
+
+static void
+bitmap_ip_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct bitmap_ip *map = set->data;
+       unsigned long *table = map->members;
+       u32 id;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (id = 0; id < map->elements; id++)
+               if (ip_set_timeout_expired(table[id]))
+                       table[id] = IPSET_ELEM_UNSET;
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+bitmap_ip_gc_init(struct ip_set *set)
+{
+       struct bitmap_ip *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = bitmap_ip_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_ip(struct ip_set *set, struct bitmap_ip *map,
+           u32 first_ip, u32 last_ip,
+           u32 elements, u32 hosts, u8 netmask)
+{
+       map->members = ip_set_alloc(map->memsize);
+       if (!map->members)
+               return false;
+       map->first_ip = first_ip;
+       map->last_ip = last_ip;
+       map->elements = elements;
+       map->hosts = hosts;
+       map->netmask = netmask;
+       map->timeout = IPSET_NO_TIMEOUT;
+
+       set->data = map;
+       set->family = AF_INET;
+
+       return true;
+}
+
+static int
+bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct bitmap_ip *map;
+       u32 first_ip, last_ip, hosts, elements;
+       u8 netmask = 32;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+               if (ret)
+                       return ret;
+               if (first_ip > last_ip) {
+                       u32 tmp = first_ip;
+
+                       first_ip = last_ip;
+                       last_ip = tmp;
+               }
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr >= 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               last_ip = first_ip | ~ip_set_hostmask(cidr);
+       } else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_NETMASK]) {
+               netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+               if (netmask > 32)
+                       return -IPSET_ERR_INVALID_NETMASK;
+
+               first_ip &= ip_set_hostmask(netmask);
+               last_ip |= ~ip_set_hostmask(netmask);
+       }
+
+       if (netmask == 32) {
+               hosts = 1;
+               elements = last_ip - first_ip + 1;
+       } else {
+               u8 mask_bits;
+               u32 mask;
+
+               mask = range_to_mask(first_ip, last_ip, &mask_bits);
+
+               if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
+                   netmask <= mask_bits)
+                       return -IPSET_ERR_BITMAP_RANGE;
+
+               pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+               hosts = 2 << (32 - netmask - 1);
+               elements = 2 << (netmask - mask_bits - 1);
+       }
+       if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+               return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+       pr_debug("hosts %u, elements %u\n", hosts, elements);
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               map->memsize = elements * sizeof(unsigned long);
+
+               if (!init_map_ip(set, map, first_ip, last_ip,
+                                elements, hosts, netmask)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+               set->variant = &bitmap_tip;
+
+               bitmap_ip_gc_init(set);
+       } else {
+               map->memsize = bitmap_bytes(0, elements - 1);
+
+               if (!init_map_ip(set, map, first_ip, last_ip,
+                                elements, hosts, netmask)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               set->variant = &bitmap_ip;
+       }
+       return 0;
+}
+
+static struct ip_set_type bitmap_ip_type __read_mostly = {
+       .name           = "bitmap:ip",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_INET,
+       .revision       = 0,
+       .create         = bitmap_ip_create,
+       .create_policy  = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_NETMASK]    = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+bitmap_ip_init(void)
+{
+       return ip_set_type_register(&bitmap_ip_type);
+}
+
+static void __exit
+bitmap_ip_fini(void)
+{
+       ip_set_type_unregister(&bitmap_ip_type);
+}
+
+module_init(bitmap_ip_init);
+module_exit(bitmap_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
new file mode 100644 (file)
index 0000000..5e79017
--- /dev/null
@@ -0,0 +1,652 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                        Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip,mac");
+
+enum {
+       MAC_EMPTY,              /* element is not set */
+       MAC_FILLED,             /* element is set with MAC */
+       MAC_UNSET,              /* element is set, without MAC */
+};
+
+/* Type structure */
+struct bitmap_ipmac {
+       void *members;          /* the set members */
+       u32 first_ip;           /* host byte order, included in range */
+       u32 last_ip;            /* host byte order, included in range */
+       u32 timeout;            /* timeout value */
+       struct timer_list gc;   /* garbage collector */
+       size_t dsize;           /* size of element */
+};
+
+/* ADT structure for generic function args */
+struct ipmac {
+       u32 id;                 /* id in array */
+       unsigned char *ether;   /* ethernet address */
+};
+
+/* Member element without and with timeout */
+
+struct ipmac_elem {
+       unsigned char ether[ETH_ALEN];
+       unsigned char match;
+} __attribute__ ((aligned));
+
+struct ipmac_telem {
+       unsigned char ether[ETH_ALEN];
+       unsigned char match;
+       unsigned long timeout;
+} __attribute__ ((aligned));
+
+static inline void *
+bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
+{
+       return (void *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
+{
+       const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+       return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+bitmap_expired(const struct bitmap_ipmac *map, u32 id)
+{
+       const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+       return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+bitmap_ipmac_exist(const struct ipmac_telem *elem)
+{
+       return elem->match == MAC_UNSET ||
+              (elem->match == MAC_FILLED &&
+               !ip_set_timeout_expired(elem->timeout));
+}
+
+/* Base variant */
+
+static int
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               /* Trigger kernel to fill out the ethernet address */
+               return -EAGAIN;
+       case MAC_FILLED:
+               return data->ether == NULL ||
+                      compare_ether_addr(data->ether, elem->ether) == 0;
+       }
+       return 0;
+}
+
+static int
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               if (!data->ether)
+                       /* Already added without ethernet address */
+                       return -IPSET_ERR_EXIST;
+               /* Fill the MAC address */
+               memcpy(elem->ether, data->ether, ETH_ALEN);
+               elem->match = MAC_FILLED;
+               break;
+       case MAC_FILLED:
+               return -IPSET_ERR_EXIST;
+       case MAC_EMPTY:
+               if (data->ether) {
+                       memcpy(elem->ether, data->ether, ETH_ALEN);
+                       elem->match = MAC_FILLED;
+               } else
+                       elem->match = MAC_UNSET;
+       }
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       if (elem->match == MAC_EMPTY)
+               return -IPSET_ERR_EXIST;
+
+       elem->match = MAC_EMPTY;
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_list(const struct ip_set *set,
+                 struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac_elem *elem;
+       struct nlattr *atd, *nested;
+       u32 id, first = cb->args[2];
+       u32 last = map->last_ip - map->first_ip;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               elem = bitmap_ipmac_elem(map, id);
+               if (elem->match == MAC_EMPTY)
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id));
+               if (elem->match == MAC_FILLED)
+                       NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+                               elem->ether);
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               /* Trigger kernel to fill out the ethernet address */
+               return -EAGAIN;
+       case MAC_FILLED:
+               return (data->ether == NULL ||
+                       compare_ether_addr(data->ether, elem->ether) == 0) &&
+                      !bitmap_expired(map, data->id);
+       }
+       return 0;
+}
+
+static int
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+       switch (elem->match) {
+       case MAC_UNSET:
+               if (!data->ether)
+                       /* Already added without ethernet address */
+                       return -IPSET_ERR_EXIST;
+               /* Fill the MAC address and activate the timer */
+               memcpy(elem->ether, data->ether, ETH_ALEN);
+               elem->match = MAC_FILLED;
+               if (timeout == map->timeout)
+                       /* Timeout was not specified, get stored one */
+                       timeout = elem->timeout;
+               elem->timeout = ip_set_timeout_set(timeout);
+               break;
+       case MAC_FILLED:
+               if (!bitmap_expired(map, data->id))
+                       return -IPSET_ERR_EXIST;
+               /* Fall through */
+       case MAC_EMPTY:
+               if (data->ether) {
+                       memcpy(elem->ether, data->ether, ETH_ALEN);
+                       elem->match = MAC_FILLED;
+               } else
+                       elem->match = MAC_UNSET;
+               /* If MAC is unset yet, we store plain timeout value
+                * because the timer is not activated yet
+                * and we can reuse it later when MAC is filled out,
+                * possibly by the kernel */
+               elem->timeout = data->ether ? ip_set_timeout_set(timeout)
+                                           : timeout;
+               break;
+       }
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_ipmac *map = set->data;
+       const struct ipmac *data = value;
+       struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+       if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
+               return -IPSET_ERR_EXIST;
+
+       elem->match = MAC_EMPTY;
+
+       return 0;
+}
+
+static int
+bitmap_ipmac_tlist(const struct ip_set *set,
+                  struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_ipmac *map = set->data;
+       const struct ipmac_telem *elem;
+       struct nlattr *atd, *nested;
+       u32 id, first = cb->args[2];
+       u32 timeout, last = map->last_ip - map->first_ip;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               elem = bitmap_ipmac_elem(map, id);
+               if (!bitmap_ipmac_exist(elem))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+                               htonl(map->first_ip + id));
+               if (elem->match == MAC_FILLED)
+                       NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+                               elem->ether);
+               timeout = elem->match == MAC_UNSET ? elem->timeout
+                               : ip_set_timeout_get(elem->timeout);
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       return -EMSGSIZE;
+}
+
+static int
+bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct bitmap_ipmac *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct ipmac data;
+
+       data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+       if (data.id < map->first_ip || data.id > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       /* Backward compatibility: we don't check the second flag */
+       if (skb_mac_header(skb) < skb->head ||
+           (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+               return -EINVAL;
+
+       data.id -= map->first_ip;
+       data.ether = eth_hdr(skb)->h_source;
+
+       return adtfn(set, &data, map->timeout);
+}
+
+static int
+bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct bitmap_ipmac *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct ipmac data;
+       u32 timeout = map->timeout;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
+       if (ret)
+               return ret;
+
+       if (data.id < map->first_ip || data.id > map->last_ip)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       if (tb[IPSET_ATTR_ETHER])
+               data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
+       else
+               data.ether = NULL;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(map->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       data.id -= map->first_ip;
+
+       ret = adtfn(set, &data, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+bitmap_ipmac_destroy(struct ip_set *set)
+{
+       struct bitmap_ipmac *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+
+       ip_set_free(map->members);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static void
+bitmap_ipmac_flush(struct ip_set *set)
+{
+       struct bitmap_ipmac *map = set->data;
+
+       memset(map->members, 0,
+              (map->last_ip - map->first_ip + 1) * map->dsize);
+}
+
+static int
+bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct bitmap_ipmac *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map)
+                           + (map->last_ip - map->first_ip + 1) * map->dsize));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static bool
+bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct bitmap_ipmac *x = a->data;
+       const struct bitmap_ipmac *y = b->data;
+
+       return x->first_ip == y->first_ip &&
+              x->last_ip == y->last_ip &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ipmac = {
+       .kadt   = bitmap_ipmac_kadt,
+       .uadt   = bitmap_ipmac_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ipmac_add,
+               [IPSET_DEL] = bitmap_ipmac_del,
+               [IPSET_TEST] = bitmap_ipmac_test,
+       },
+       .destroy = bitmap_ipmac_destroy,
+       .flush  = bitmap_ipmac_flush,
+       .head   = bitmap_ipmac_head,
+       .list   = bitmap_ipmac_list,
+       .same_set = bitmap_ipmac_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tipmac = {
+       .kadt   = bitmap_ipmac_kadt,
+       .uadt   = bitmap_ipmac_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_ipmac_tadd,
+               [IPSET_DEL] = bitmap_ipmac_tdel,
+               [IPSET_TEST] = bitmap_ipmac_ttest,
+       },
+       .destroy = bitmap_ipmac_destroy,
+       .flush  = bitmap_ipmac_flush,
+       .head   = bitmap_ipmac_head,
+       .list   = bitmap_ipmac_tlist,
+       .same_set = bitmap_ipmac_same_set,
+};
+
+static void
+bitmap_ipmac_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct bitmap_ipmac *map = set->data;
+       struct ipmac_telem *elem;
+       u32 id, last = map->last_ip - map->first_ip;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (id = 0; id <= last; id++) {
+               elem = bitmap_ipmac_elem(map, id);
+               if (elem->match == MAC_FILLED &&
+                   ip_set_timeout_expired(elem->timeout))
+                       elem->match = MAC_EMPTY;
+       }
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+bitmap_ipmac_gc_init(struct ip_set *set)
+{
+       struct bitmap_ipmac *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = bitmap_ipmac_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create bitmap:ip,mac type of sets */
+
+static bool
+init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
+              u32 first_ip, u32 last_ip)
+{
+       map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+       if (!map->members)
+               return false;
+       map->first_ip = first_ip;
+       map->last_ip = last_ip;
+       map->timeout = IPSET_NO_TIMEOUT;
+
+       set->data = map;
+       set->family = AF_INET;
+
+       return true;
+}
+
+static int
+bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+                   u32 flags)
+{
+       u32 first_ip, last_ip, elements;
+       struct bitmap_ipmac *map;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+               if (ret)
+                       return ret;
+               if (first_ip > last_ip) {
+                       u32 tmp = first_ip;
+
+                       first_ip = last_ip;
+                       last_ip = tmp;
+               }
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr >= 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               last_ip = first_ip | ~ip_set_hostmask(cidr);
+       } else
+               return -IPSET_ERR_PROTOCOL;
+
+       elements = last_ip - first_ip + 1;
+
+       if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+               return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               map->dsize = sizeof(struct ipmac_telem);
+
+               if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = &bitmap_tipmac;
+
+               bitmap_ipmac_gc_init(set);
+       } else {
+               map->dsize = sizeof(struct ipmac_elem);
+
+               if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+               set->variant = &bitmap_ipmac;
+
+       }
+       return 0;
+}
+
+static struct ip_set_type bitmap_ipmac_type = {
+       .name           = "bitmap:ip,mac",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_MAC,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = AF_INET,
+       .revision       = 0,
+       .create         = bitmap_ipmac_create,
+       .create_policy  = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_ETHER]      = { .type = NLA_BINARY, .len  = ETH_ALEN },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+bitmap_ipmac_init(void)
+{
+       return ip_set_type_register(&bitmap_ipmac_type);
+}
+
+static void __exit
+bitmap_ipmac_fini(void)
+{
+       ip_set_type_unregister(&bitmap_ipmac_type);
+}
+
+module_init(bitmap_ipmac_init);
+module_exit(bitmap_ipmac_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
new file mode 100644 (file)
index 0000000..165f09b
--- /dev/null
@@ -0,0 +1,515 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:port type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:port type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:port");
+
+/* Type structure */
+struct bitmap_port {
+       void *members;          /* the set members */
+       u16 first_port;         /* host byte order, included in range */
+       u16 last_port;          /* host byte order, included in range */
+       size_t memsize;         /* members size */
+       u32 timeout;            /* timeout parameter */
+       struct timer_list gc;   /* garbage collection */
+};
+
+/* Base variant */
+
+static int
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_port *map = set->data;
+       u16 id = *(u16 *)value;
+
+       return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (test_and_set_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       u16 id = *(u16 *)value;
+
+       if (!test_and_clear_bit(id, map->members))
+               return -IPSET_ERR_EXIST;
+
+       return 0;
+}
+
+static int
+bitmap_port_list(const struct ip_set *set,
+                struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_port *map = set->data;
+       struct nlattr *atd, *nested;
+       u16 id, first = cb->args[2];
+       u16 last = map->last_port - map->first_port;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               if (!test_bit(id, map->members))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+                             htons(map->first_port + id));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+       const struct bitmap_port *map = set->data;
+       const unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+
+       if (ip_set_timeout_test(members[id]))
+               return -IPSET_ERR_EXIST;
+
+       members[id] = ip_set_timeout_set(timeout);
+
+       return 0;
+}
+
+static int
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+       struct bitmap_port *map = set->data;
+       unsigned long *members = map->members;
+       u16 id = *(u16 *)value;
+       int ret = -IPSET_ERR_EXIST;
+
+       if (ip_set_timeout_test(members[id]))
+               ret = 0;
+
+       members[id] = IPSET_ELEM_UNSET;
+       return ret;
+}
+
+static int
+bitmap_port_tlist(const struct ip_set *set,
+                 struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct bitmap_port *map = set->data;
+       struct nlattr *adt, *nested;
+       u16 id, first = cb->args[2];
+       u16 last = map->last_port - map->first_port;
+       const unsigned long *members = map->members;
+
+       adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!adt)
+               return -EMSGSIZE;
+       for (; cb->args[2] <= last; cb->args[2]++) {
+               id = cb->args[2];
+               if (!ip_set_timeout_test(members[id]))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (id == first) {
+                               nla_nest_cancel(skb, adt);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+                             htons(map->first_port + id));
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                             htonl(ip_set_timeout_get(members[id])));
+               ipset_nest_end(skb, nested);
+       }
+       ipset_nest_end(skb, adt);
+
+       /* Set listing finished */
+       cb->args[2] = 0;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, adt);
+       if (unlikely(id == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static int
+bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
+                enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct bitmap_port *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       __be16 __port;
+       u16 port = 0;
+
+       if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+               return -EINVAL;
+
+       port = ntohs(__port);
+
+       if (port < map->first_port || port > map->last_port)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       port -= map->first_port;
+
+       return adtfn(set, &port, map->timeout);
+}
+
+static int
+bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
+                enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       struct bitmap_port *map = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 timeout = map->timeout;
+       u32 port;       /* wraparound */
+       u16 id, port_to;
+       int ret = 0;
+
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+       if (port < map->first_port || port > map->last_port)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(map->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST) {
+               id = port - map->first_port;
+               return adtfn(set, &id, timeout);
+       }
+
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to) {
+                       swap(port, port_to);
+                       if (port < map->first_port)
+                               return -IPSET_ERR_BITMAP_RANGE;
+               }
+       } else
+               port_to = port;
+
+       if (port_to > map->last_port)
+               return -IPSET_ERR_BITMAP_RANGE;
+
+       for (; port <= port_to; port++) {
+               id = port - map->first_port;
+               ret = adtfn(set, &id, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static void
+bitmap_port_destroy(struct ip_set *set)
+{
+       struct bitmap_port *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+
+       ip_set_free(map->members);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static void
+bitmap_port_flush(struct ip_set *set)
+{
+       struct bitmap_port *map = set->data;
+
+       memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct bitmap_port *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map) + map->memsize));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static bool
+bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct bitmap_port *x = a->data;
+       const struct bitmap_port *y = b->data;
+
+       return x->first_port == y->first_port &&
+              x->last_port == y->last_port &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_port = {
+       .kadt   = bitmap_port_kadt,
+       .uadt   = bitmap_port_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_port_add,
+               [IPSET_DEL] = bitmap_port_del,
+               [IPSET_TEST] = bitmap_port_test,
+       },
+       .destroy = bitmap_port_destroy,
+       .flush  = bitmap_port_flush,
+       .head   = bitmap_port_head,
+       .list   = bitmap_port_list,
+       .same_set = bitmap_port_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tport = {
+       .kadt   = bitmap_port_kadt,
+       .uadt   = bitmap_port_uadt,
+       .adt    = {
+               [IPSET_ADD] = bitmap_port_tadd,
+               [IPSET_DEL] = bitmap_port_tdel,
+               [IPSET_TEST] = bitmap_port_ttest,
+       },
+       .destroy = bitmap_port_destroy,
+       .flush  = bitmap_port_flush,
+       .head   = bitmap_port_head,
+       .list   = bitmap_port_tlist,
+       .same_set = bitmap_port_same_set,
+};
+
+static void
+bitmap_port_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct bitmap_port *map = set->data;
+       unsigned long *table = map->members;
+       u32 id; /* wraparound */
+       u16 last = map->last_port - map->first_port;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (id = 0; id <= last; id++)
+               if (ip_set_timeout_expired(table[id]))
+                       table[id] = IPSET_ELEM_UNSET;
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+bitmap_port_gc_init(struct ip_set *set)
+{
+       struct bitmap_port *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = bitmap_port_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_port(struct ip_set *set, struct bitmap_port *map,
+             u16 first_port, u16 last_port)
+{
+       map->members = ip_set_alloc(map->memsize);
+       if (!map->members)
+               return false;
+       map->first_port = first_port;
+       map->last_port = last_port;
+       map->timeout = IPSET_NO_TIMEOUT;
+
+       set->data = map;
+       set->family = AF_UNSPEC;
+
+       return true;
+}
+
+static int
+bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
+                u32 flags)
+{
+       struct bitmap_port *map;
+       u16 first_port, last_port;
+
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+       last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (first_port > last_port) {
+               u16 tmp = first_port;
+
+               first_port = last_port;
+               last_port = tmp;
+       }
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               map->memsize = (last_port - first_port + 1)
+                              * sizeof(unsigned long);
+
+               if (!init_map_port(set, map, first_port, last_port)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+               set->variant = &bitmap_tport;
+
+               bitmap_port_gc_init(set);
+       } else {
+               map->memsize = bitmap_bytes(0, last_port - first_port);
+               pr_debug("memsize: %zu\n", map->memsize);
+               if (!init_map_port(set, map, first_port, last_port)) {
+                       kfree(map);
+                       return -ENOMEM;
+               }
+
+               set->variant = &bitmap_port;
+       }
+       return 0;
+}
+
+static struct ip_set_type bitmap_port_type = {
+       .name           = "bitmap:port",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_PORT,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = bitmap_port_create,
+       .create_policy  = {
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+bitmap_port_init(void)
+{
+       return ip_set_type_register(&bitmap_port_type);
+}
+
+static void __exit
+bitmap_port_fini(void)
+{
+       ip_set_type_unregister(&bitmap_port_type);
+}
+
+module_init(bitmap_port_init);
+module_exit(bitmap_port_fini);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
new file mode 100644 (file)
index 0000000..8b1a54c
--- /dev/null
@@ -0,0 +1,1671 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+static LIST_HEAD(ip_set_type_list);            /* all registered set types */
+static DEFINE_MUTEX(ip_set_type_mutex);                /* protects ip_set_type_list */
+
+static struct ip_set **ip_set_list;            /* all individual sets */
+static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+
+#define STREQ(a, b)    (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+
+static unsigned int max_sets;
+
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("core IP set support");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+/*
+ * The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+ * serialized by ip_set_type_mutex.
+ */
+
+static inline void
+ip_set_type_lock(void)
+{
+       mutex_lock(&ip_set_type_mutex);
+}
+
+static inline void
+ip_set_type_unlock(void)
+{
+       mutex_unlock(&ip_set_type_mutex);
+}
+
+/* Register and deregister settype */
+
+static struct ip_set_type *
+find_set_type(const char *name, u8 family, u8 revision)
+{
+       struct ip_set_type *type;
+
+       list_for_each_entry_rcu(type, &ip_set_type_list, list)
+               if (STREQ(type->name, name) &&
+                   (type->family == family || type->family == AF_UNSPEC) &&
+                   type->revision == revision)
+                       return type;
+       return NULL;
+}
+
+/* Unlock, try to load a set type module and lock again */
+static int
+try_to_load_type(const char *name)
+{
+       nfnl_unlock();
+       pr_debug("try to load ip_set_%s\n", name);
+       if (request_module("ip_set_%s", name) < 0) {
+               pr_warning("Can't find ip_set type %s\n", name);
+               nfnl_lock();
+               return -IPSET_ERR_FIND_TYPE;
+       }
+       nfnl_lock();
+       return -EAGAIN;
+}
+
+/* Find a set type and reference it */
+static int
+find_set_type_get(const char *name, u8 family, u8 revision,
+                 struct ip_set_type **found)
+{
+       rcu_read_lock();
+       *found = find_set_type(name, family, revision);
+       if (*found) {
+               int err = !try_module_get((*found)->me);
+               rcu_read_unlock();
+               return err ? -EFAULT : 0;
+       }
+       rcu_read_unlock();
+
+       return try_to_load_type(name);
+}
+
+/* Find a given set type by name and family.
+ * If we succeeded, the supported minimal and maximum revisions are
+ * filled out.
+ */
+static int
+find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+{
+       struct ip_set_type *type;
+       bool found = false;
+
+       *min = *max = 0;
+       rcu_read_lock();
+       list_for_each_entry_rcu(type, &ip_set_type_list, list)
+               if (STREQ(type->name, name) &&
+                   (type->family == family || type->family == AF_UNSPEC)) {
+                       found = true;
+                       if (type->revision < *min)
+                               *min = type->revision;
+                       else if (type->revision > *max)
+                               *max = type->revision;
+               }
+       rcu_read_unlock();
+       if (found)
+               return 0;
+
+       return try_to_load_type(name);
+}
+
+#define family_name(f) ((f) == AF_INET ? "inet" : \
+                        (f) == AF_INET6 ? "inet6" : "any")
+
+/* Register a set type structure. The type is identified by
+ * the unique triple of name, family and revision.
+ */
+int
+ip_set_type_register(struct ip_set_type *type)
+{
+       int ret = 0;
+
+       if (type->protocol != IPSET_PROTOCOL) {
+               pr_warning("ip_set type %s, family %s, revision %u uses "
+                          "wrong protocol version %u (want %u)\n",
+                          type->name, family_name(type->family),
+                          type->revision, type->protocol, IPSET_PROTOCOL);
+               return -EINVAL;
+       }
+
+       ip_set_type_lock();
+       if (find_set_type(type->name, type->family, type->revision)) {
+               /* Duplicate! */
+               pr_warning("ip_set type %s, family %s, revision %u "
+                          "already registered!\n", type->name,
+                          family_name(type->family), type->revision);
+               ret = -EINVAL;
+               goto unlock;
+       }
+       list_add_rcu(&type->list, &ip_set_type_list);
+       pr_debug("type %s, family %s, revision %u registered.\n",
+                type->name, family_name(type->family), type->revision);
+unlock:
+       ip_set_type_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_type_register);
+
+/* Unregister a set type. There's a small race with ip_set_create */
+void
+ip_set_type_unregister(struct ip_set_type *type)
+{
+       ip_set_type_lock();
+       if (!find_set_type(type->name, type->family, type->revision)) {
+               pr_warning("ip_set type %s, family %s, revision %u "
+                          "not registered\n", type->name,
+                          family_name(type->family), type->revision);
+               goto unlock;
+       }
+       list_del_rcu(&type->list);
+       pr_debug("type %s, family %s, revision %u unregistered.\n",
+                type->name, family_name(type->family), type->revision);
+unlock:
+       ip_set_type_unlock();
+
+       synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(ip_set_type_unregister);
+
+/* Utility functions */
+void *
+ip_set_alloc(size_t size)
+{
+       void *members = NULL;
+
+       if (size < KMALLOC_MAX_SIZE)
+               members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+       if (members) {
+               pr_debug("%p: allocated with kmalloc\n", members);
+               return members;
+       }
+
+       members = vzalloc(size);
+       if (!members)
+               return NULL;
+       pr_debug("%p: allocated with vmalloc\n", members);
+
+       return members;
+}
+EXPORT_SYMBOL_GPL(ip_set_alloc);
+
+void
+ip_set_free(void *members)
+{
+       pr_debug("%p: free with %s\n", members,
+                is_vmalloc_addr(members) ? "vfree" : "kfree");
+       if (is_vmalloc_addr(members))
+               vfree(members);
+       else
+               kfree(members);
+}
+EXPORT_SYMBOL_GPL(ip_set_free);
+
+static inline bool
+flag_nested(const struct nlattr *nla)
+{
+       return nla->nla_type & NLA_F_NESTED;
+}
+
+static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+       [IPSET_ATTR_IPADDR_IPV4]        = { .type = NLA_U32 },
+       [IPSET_ATTR_IPADDR_IPV6]        = { .type = NLA_BINARY,
+                                           .len = sizeof(struct in6_addr) },
+};
+
+int
+ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr)
+{
+       struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+       if (unlikely(!flag_nested(nla)))
+               return -IPSET_ERR_PROTOCOL;
+       if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
+               return -IPSET_ERR_PROTOCOL;
+
+       *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
+
+int
+ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
+{
+       struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+       if (unlikely(!flag_nested(nla)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
+               return -IPSET_ERR_PROTOCOL;
+
+       memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
+               sizeof(struct in6_addr));
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+
+/*
+ * Creating/destroying/renaming/swapping affect the existence and
+ * the properties of a set. All of these can be executed from userspace
+ * only and serialized by the nfnl mutex indirectly from nfnetlink.
+ *
+ * Sets are identified by their index in ip_set_list and the index
+ * is used by the external references (set/SET netfilter modules).
+ *
+ * The set behind an index may change by swapping only, from userspace.
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+       atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+       atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Add, del and test set entries from kernel.
+ *
+ * The set behind the index must exist and must be referenced
+ * so it can't be destroyed (or changed) under our foot.
+ */
+
+int
+ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
+           u8 family, u8 dim, u8 flags)
+{
+       struct ip_set *set = ip_set_list[index];
+       int ret = 0;
+
+       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       pr_debug("set %s, index %u\n", set->name, index);
+
+       if (dim < set->type->dimension ||
+           !(family == set->family || set->family == AF_UNSPEC))
+               return 0;
+
+       read_lock_bh(&set->lock);
+       ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+       read_unlock_bh(&set->lock);
+
+       if (ret == -EAGAIN) {
+               /* Type requests element to be completed */
+               pr_debug("element must be competed, ADD is triggered\n");
+               write_lock_bh(&set->lock);
+               set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+               write_unlock_bh(&set->lock);
+               ret = 1;
+       }
+
+       /* Convert error codes to nomatch */
+       return (ret < 0 ? 0 : ret);
+}
+EXPORT_SYMBOL_GPL(ip_set_test);
+
+int
+ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
+          u8 family, u8 dim, u8 flags)
+{
+       struct ip_set *set = ip_set_list[index];
+       int ret;
+
+       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       pr_debug("set %s, index %u\n", set->name, index);
+
+       if (dim < set->type->dimension ||
+           !(family == set->family || set->family == AF_UNSPEC))
+               return 0;
+
+       write_lock_bh(&set->lock);
+       ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+       write_unlock_bh(&set->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_add);
+
+int
+ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
+          u8 family, u8 dim, u8 flags)
+{
+       struct ip_set *set = ip_set_list[index];
+       int ret = 0;
+
+       BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+       pr_debug("set %s, index %u\n", set->name, index);
+
+       if (dim < set->type->dimension ||
+           !(family == set->family || set->family == AF_UNSPEC))
+               return 0;
+
+       write_lock_bh(&set->lock);
+       ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+       write_unlock_bh(&set->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_del);
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex must already be activated.
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name, struct ip_set **set)
+{
+       ip_set_id_t i, index = IPSET_INVALID_ID;
+       struct ip_set *s;
+
+       for (i = 0; i < ip_set_max; i++) {
+               s = ip_set_list[i];
+               if (s != NULL && STREQ(s->name, name)) {
+                       __ip_set_get(i);
+                       index = i;
+                       *set = s;
+               }
+       }
+
+       return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_byname);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex must already be activated.
+ */
+void
+ip_set_put_byindex(ip_set_id_t index)
+{
+       if (ip_set_list[index] != NULL) {
+               BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+               __ip_set_put(index);
+       }
+}
+EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+/*
+ * Get the name of a set behind a set index.
+ * We assume the set is referenced, so it does exist and
+ * can't be destroyed. The set cannot be renamed due to
+ * the referencing either.
+ *
+ * The nfnl mutex must already be activated.
+ */
+const char *
+ip_set_name_byindex(ip_set_id_t index)
+{
+       const struct ip_set *set = ip_set_list[index];
+
+       BUG_ON(set == NULL);
+       BUG_ON(atomic_read(&set->ref) == 0);
+
+       /* Referenced, so it's safe */
+       return set->name;
+}
+EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+/*
+ * Routines to call by external subsystems, which do not
+ * call nfnl_lock for us.
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get(const char *name)
+{
+       struct ip_set *s;
+       ip_set_id_t index;
+
+       nfnl_lock();
+       index = ip_set_get_byname(name, &s);
+       nfnl_unlock();
+
+       return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get_byindex(ip_set_id_t index)
+{
+       if (index > ip_set_max)
+               return IPSET_INVALID_ID;
+
+       nfnl_lock();
+       if (ip_set_list[index])
+               __ip_set_get(index);
+       else
+               index = IPSET_INVALID_ID;
+       nfnl_unlock();
+
+       return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex is used in the function.
+ */
+void
+ip_set_nfnl_put(ip_set_id_t index)
+{
+       nfnl_lock();
+       if (ip_set_list[index] != NULL) {
+               BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+               __ip_set_put(index);
+       }
+       nfnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
+
+/*
+ * Communication protocol with userspace over netlink.
+ *
+ * We already locked by nfnl_lock.
+ */
+
+static inline bool
+protocol_failed(const struct nlattr * const tb[])
+{
+       return !tb[IPSET_ATTR_PROTOCOL] ||
+              nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+}
+
+static inline u32
+flag_exist(const struct nlmsghdr *nlh)
+{
+       return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
+}
+
+static struct nlmsghdr *
+start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+         enum ipset_cmd cmd)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+
+       nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+                       sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               return NULL;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = AF_INET;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       return nlh;
+}
+
+/* Create a set */
+
+static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_TYPENAME]   = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1},
+       [IPSET_ATTR_REVISION]   = { .type = NLA_U8 },
+       [IPSET_ATTR_FAMILY]     = { .type = NLA_U8 },
+       [IPSET_ATTR_DATA]       = { .type = NLA_NESTED },
+};
+
+static ip_set_id_t
+find_set_id(const char *name)
+{
+       ip_set_id_t i, index = IPSET_INVALID_ID;
+       const struct ip_set *set;
+
+       for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
+               set = ip_set_list[i];
+               if (set != NULL && STREQ(set->name, name))
+                       index = i;
+       }
+       return index;
+}
+
+static inline struct ip_set *
+find_set(const char *name)
+{
+       ip_set_id_t index = find_set_id(name);
+
+       return index == IPSET_INVALID_ID ? NULL : ip_set_list[index];
+}
+
+static int
+find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+{
+       ip_set_id_t i;
+
+       *index = IPSET_INVALID_ID;
+       for (i = 0;  i < ip_set_max; i++) {
+               if (ip_set_list[i] == NULL) {
+                       if (*index == IPSET_INVALID_ID)
+                               *index = i;
+               } else if (STREQ(name, ip_set_list[i]->name)) {
+                       /* Name clash */
+                       *set = ip_set_list[i];
+                       return -EEXIST;
+               }
+       }
+       if (*index == IPSET_INVALID_ID)
+               /* No free slot remained */
+               return -IPSET_ERR_MAX_SETS;
+       return 0;
+}
+
+static int
+ip_set_create(struct sock *ctnl, struct sk_buff *skb,
+             const struct nlmsghdr *nlh,
+             const struct nlattr * const attr[])
+{
+       struct ip_set *set, *clash;
+       ip_set_id_t index = IPSET_INVALID_ID;
+       struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+       const char *name, *typename;
+       u8 family, revision;
+       u32 flags = flag_exist(nlh);
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_TYPENAME] == NULL ||
+                    attr[IPSET_ATTR_REVISION] == NULL ||
+                    attr[IPSET_ATTR_FAMILY] == NULL ||
+                    (attr[IPSET_ATTR_DATA] != NULL &&
+                     !flag_nested(attr[IPSET_ATTR_DATA]))))
+               return -IPSET_ERR_PROTOCOL;
+
+       name = nla_data(attr[IPSET_ATTR_SETNAME]);
+       typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+       family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+       revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
+       pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
+                name, typename, family_name(family), revision);
+
+       /*
+        * First, and without any locks, allocate and initialize
+        * a normal base set structure.
+        */
+       set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+       if (!set)
+               return -ENOMEM;
+       rwlock_init(&set->lock);
+       strlcpy(set->name, name, IPSET_MAXNAMELEN);
+       atomic_set(&set->ref, 0);
+       set->family = family;
+
+       /*
+        * Next, check that we know the type, and take
+        * a reference on the type, to make sure it stays available
+        * while constructing our new set.
+        *
+        * After referencing the type, we try to create the type
+        * specific part of the set without holding any locks.
+        */
+       ret = find_set_type_get(typename, family, revision, &(set->type));
+       if (ret)
+               goto out;
+
+       /*
+        * Without holding any locks, create private part.
+        */
+       if (attr[IPSET_ATTR_DATA] &&
+           nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+                            set->type->create_policy)) {
+               ret = -IPSET_ERR_PROTOCOL;
+               goto put_out;
+       }
+
+       ret = set->type->create(set, tb, flags);
+       if (ret != 0)
+               goto put_out;
+
+       /* BTW, ret==0 here. */
+
+       /*
+        * Here, we have a valid, constructed set and we are protected
+        * by nfnl_lock. Find the first free index in ip_set_list and
+        * check clashing.
+        */
+       if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+               /* If this is the same set and requested, ignore error */
+               if (ret == -EEXIST &&
+                   (flags & IPSET_FLAG_EXIST) &&
+                   STREQ(set->type->name, clash->type->name) &&
+                   set->type->family == clash->type->family &&
+                   set->type->revision == clash->type->revision &&
+                   set->variant->same_set(set, clash))
+                       ret = 0;
+               goto cleanup;
+       }
+
+       /*
+        * Finally! Add our shiny new set to the list, and be done.
+        */
+       pr_debug("create: '%s' created with index %u!\n", set->name, index);
+       ip_set_list[index] = set;
+
+       return ret;
+
+cleanup:
+       set->variant->destroy(set);
+put_out:
+       module_put(set->type->me);
+out:
+       kfree(set);
+       return ret;
+}
+
+/* Destroy sets */
+
+static const struct nla_policy
+ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+       struct ip_set *set = ip_set_list[index];
+
+       pr_debug("set: %s\n",  set->name);
+       ip_set_list[index] = NULL;
+
+       /* Must call it without holding any lock */
+       set->variant->destroy(set);
+       module_put(set->type->me);
+       kfree(set);
+}
+
+static int
+ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
+              const struct nlmsghdr *nlh,
+              const struct nlattr * const attr[])
+{
+       ip_set_id_t i;
+
+       if (unlikely(protocol_failed(attr)))
+               return -IPSET_ERR_PROTOCOL;
+
+       /* References are protected by the nfnl mutex */
+       if (!attr[IPSET_ATTR_SETNAME]) {
+               for (i = 0; i < ip_set_max; i++) {
+                       if (ip_set_list[i] != NULL &&
+                           (atomic_read(&ip_set_list[i]->ref)))
+                               return -IPSET_ERR_BUSY;
+               }
+               for (i = 0; i < ip_set_max; i++) {
+                       if (ip_set_list[i] != NULL)
+                               ip_set_destroy_set(i);
+               }
+       } else {
+               i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+               if (i == IPSET_INVALID_ID)
+                       return -ENOENT;
+               else if (atomic_read(&ip_set_list[i]->ref))
+                       return -IPSET_ERR_BUSY;
+
+               ip_set_destroy_set(i);
+       }
+       return 0;
+}
+
+/* Flush sets */
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+       pr_debug("set: %s\n",  set->name);
+
+       write_lock_bh(&set->lock);
+       set->variant->flush(set);
+       write_unlock_bh(&set->lock);
+}
+
+static int
+ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh,
+            const struct nlattr * const attr[])
+{
+       ip_set_id_t i;
+
+       if (unlikely(protocol_failed(attr)))
+               return -EPROTO;
+
+       if (!attr[IPSET_ATTR_SETNAME]) {
+               for (i = 0; i < ip_set_max; i++)
+                       if (ip_set_list[i] != NULL)
+                               ip_set_flush_set(ip_set_list[i]);
+       } else {
+               i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+               if (i == IPSET_INVALID_ID)
+                       return -ENOENT;
+
+               ip_set_flush_set(ip_set_list[i]);
+       }
+
+       return 0;
+}
+
+/* Rename a set */
+
+static const struct nla_policy
+ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_SETNAME2]   = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static int
+ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
+             const struct nlmsghdr *nlh,
+             const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       const char *name2;
+       ip_set_id_t i;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_SETNAME2] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+       if (atomic_read(&set->ref) != 0)
+               return -IPSET_ERR_REFERENCED;
+
+       name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
+       for (i = 0; i < ip_set_max; i++) {
+               if (ip_set_list[i] != NULL &&
+                   STREQ(ip_set_list[i]->name, name2))
+                       return -IPSET_ERR_EXIST_SETNAME2;
+       }
+       strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+       return 0;
+}
+
+/* Swap two sets so that name/index points to the other.
+ * References and set names are also swapped.
+ *
+ * We are protected by the nfnl mutex and references are
+ * manipulated only by holding the mutex. The kernel interfaces
+ * do not hold the mutex but the pointer settings are atomic
+ * so the ip_set_list always contains valid pointers to the sets.
+ */
+
+static int
+ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct ip_set *from, *to;
+       ip_set_id_t from_id, to_id;
+       char from_name[IPSET_MAXNAMELEN];
+       u32 from_ref;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_SETNAME2] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (from_id == IPSET_INVALID_ID)
+               return -ENOENT;
+
+       to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2]));
+       if (to_id == IPSET_INVALID_ID)
+               return -IPSET_ERR_EXIST_SETNAME2;
+
+       from = ip_set_list[from_id];
+       to = ip_set_list[to_id];
+
+       /* Features must not change.
+        * Not an artifical restriction anymore, as we must prevent
+        * possible loops created by swapping in setlist type of sets. */
+       if (!(from->type->features == to->type->features &&
+             from->type->family == to->type->family))
+               return -IPSET_ERR_TYPE_MISMATCH;
+
+       /* No magic here: ref munging protected by the nfnl_lock */
+       strncpy(from_name, from->name, IPSET_MAXNAMELEN);
+       from_ref = atomic_read(&from->ref);
+
+       strncpy(from->name, to->name, IPSET_MAXNAMELEN);
+       atomic_set(&from->ref, atomic_read(&to->ref));
+       strncpy(to->name, from_name, IPSET_MAXNAMELEN);
+       atomic_set(&to->ref, from_ref);
+
+       ip_set_list[from_id] = to;
+       ip_set_list[to_id] = from;
+
+       return 0;
+}
+
+/* List/save set data */
+
+#define DUMP_INIT      0L
+#define DUMP_ALL       1L
+#define DUMP_ONE       2L
+#define DUMP_LAST      3L
+
+static int
+ip_set_dump_done(struct netlink_callback *cb)
+{
+       if (cb->args[2]) {
+               pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
+               __ip_set_put((ip_set_id_t) cb->args[1]);
+       }
+       return 0;
+}
+
+static inline void
+dump_attrs(struct nlmsghdr *nlh)
+{
+       const struct nlattr *attr;
+       int rem;
+
+       pr_debug("dump nlmsg\n");
+       nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
+               pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
+       }
+}
+
+static int
+dump_init(struct netlink_callback *cb)
+{
+       struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
+       int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+       struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+       struct nlattr *attr = (void *)nlh + min_len;
+       ip_set_id_t index;
+
+       /* Second pass, so parser can't fail */
+       nla_parse(cda, IPSET_ATTR_CMD_MAX,
+                 attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
+
+       /* cb->args[0] : dump single set/all sets
+        *         [1] : set index
+        *         [..]: type specific
+        */
+
+       if (!cda[IPSET_ATTR_SETNAME]) {
+               cb->args[0] = DUMP_ALL;
+               return 0;
+       }
+
+       index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+       if (index == IPSET_INVALID_ID)
+               return -ENOENT;
+
+       cb->args[0] = DUMP_ONE;
+       cb->args[1] = index;
+       return 0;
+}
+
+static int
+ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       ip_set_id_t index = IPSET_INVALID_ID, max;
+       struct ip_set *set = NULL;
+       struct nlmsghdr *nlh = NULL;
+       unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+       int ret = 0;
+
+       if (cb->args[0] == DUMP_INIT) {
+               ret = dump_init(cb);
+               if (ret < 0) {
+                       nlh = nlmsg_hdr(cb->skb);
+                       /* We have to create and send the error message
+                        * manually :-( */
+                       if (nlh->nlmsg_flags & NLM_F_ACK)
+                               netlink_ack(cb->skb, nlh, ret);
+                       return ret;
+               }
+       }
+
+       if (cb->args[1] >= ip_set_max)
+               goto out;
+
+       pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+       max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+       for (; cb->args[1] < max; cb->args[1]++) {
+               index = (ip_set_id_t) cb->args[1];
+               set = ip_set_list[index];
+               if (set == NULL) {
+                       if (cb->args[0] == DUMP_ONE) {
+                               ret = -ENOENT;
+                               goto out;
+                       }
+                       continue;
+               }
+               /* When dumping all sets, we must dump "sorted"
+                * so that lists (unions of sets) are dumped last.
+                */
+               if (cb->args[0] != DUMP_ONE &&
+                   !((cb->args[0] == DUMP_ALL) ^
+                     (set->type->features & IPSET_DUMP_LAST)))
+                       continue;
+               pr_debug("List set: %s\n", set->name);
+               if (!cb->args[2]) {
+                       /* Start listing: make sure set won't be destroyed */
+                       pr_debug("reference set\n");
+                       __ip_set_get(index);
+               }
+               nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+                               cb->nlh->nlmsg_seq, flags,
+                               IPSET_CMD_LIST);
+               if (!nlh) {
+                       ret = -EMSGSIZE;
+                       goto release_refcount;
+               }
+               NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+               NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+               switch (cb->args[2]) {
+               case 0:
+                       /* Core header data */
+                       NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
+                                      set->type->name);
+                       NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
+                                  set->family);
+                       NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
+                                  set->type->revision);
+                       ret = set->variant->head(set, skb);
+                       if (ret < 0)
+                               goto release_refcount;
+                       /* Fall through and add elements */
+               default:
+                       read_lock_bh(&set->lock);
+                       ret = set->variant->list(set, skb, cb);
+                       read_unlock_bh(&set->lock);
+                       if (!cb->args[2]) {
+                               /* Set is done, proceed with next one */
+                               if (cb->args[0] == DUMP_ONE)
+                                       cb->args[1] = IPSET_INVALID_ID;
+                               else
+                                       cb->args[1]++;
+                       }
+                       goto release_refcount;
+               }
+       }
+       goto out;
+
+nla_put_failure:
+       ret = -EFAULT;
+release_refcount:
+       /* If there was an error or set is done, release set */
+       if (ret || !cb->args[2]) {
+               pr_debug("release set %s\n", ip_set_list[index]->name);
+               __ip_set_put(index);
+       }
+
+       /* If we dump all sets, continue with dumping last ones */
+       if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
+               cb->args[0] = DUMP_LAST;
+
+out:
+       if (nlh) {
+               nlmsg_end(skb, nlh);
+               pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
+               dump_attrs(nlh);
+       }
+
+       return ret < 0 ? ret : skb->len;
+}
+
+static int
+ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       if (unlikely(protocol_failed(attr)))
+               return -IPSET_ERR_PROTOCOL;
+
+       return netlink_dump_start(ctnl, skb, nlh,
+                                 ip_set_dump_start,
+                                 ip_set_dump_done);
+}
+
+/* Add, del and test */
+
+static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_SETNAME]    = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       [IPSET_ATTR_DATA]       = { .type = NLA_NESTED },
+       [IPSET_ATTR_ADT]        = { .type = NLA_NESTED },
+};
+
+static int
+call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+       struct nlattr *tb[], enum ipset_adt adt,
+       u32 flags, bool use_lineno)
+{
+       int ret, retried = 0;
+       u32 lineno = 0;
+       bool eexist = flags & IPSET_FLAG_EXIST;
+
+       do {
+               write_lock_bh(&set->lock);
+               ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+               write_unlock_bh(&set->lock);
+       } while (ret == -EAGAIN &&
+                set->variant->resize &&
+                (ret = set->variant->resize(set, retried++)) == 0);
+
+       if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+               return 0;
+       if (lineno && use_lineno) {
+               /* Error in restore/batch mode: send back lineno */
+               struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
+               struct sk_buff *skb2;
+               struct nlmsgerr *errmsg;
+               size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+               int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+               struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+               struct nlattr *cmdattr;
+               u32 *errline;
+
+               skb2 = nlmsg_new(payload, GFP_KERNEL);
+               if (skb2 == NULL)
+                       return -ENOMEM;
+               rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+                                 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
+               errmsg = nlmsg_data(rep);
+               errmsg->error = ret;
+               memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+               cmdattr = (void *)&errmsg->msg + min_len;
+
+               nla_parse(cda, IPSET_ATTR_CMD_MAX,
+                         cmdattr, nlh->nlmsg_len - min_len,
+                         ip_set_adt_policy);
+
+               errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+               *errline = lineno;
+
+               netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+               /* Signal netlink not to send its ACK/errmsg.  */
+               return -EINTR;
+       }
+
+       return ret;
+}
+
+static int
+ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+       const struct nlattr *nla;
+       u32 flags = flag_exist(nlh);
+       bool use_lineno;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    !((attr[IPSET_ATTR_DATA] != NULL) ^
+                      (attr[IPSET_ATTR_ADT] != NULL)) ||
+                    (attr[IPSET_ATTR_DATA] != NULL &&
+                     !flag_nested(attr[IPSET_ATTR_DATA])) ||
+                    (attr[IPSET_ATTR_ADT] != NULL &&
+                     (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+                      attr[IPSET_ATTR_LINENO] == NULL))))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+
+       use_lineno = !!attr[IPSET_ATTR_LINENO];
+       if (attr[IPSET_ATTR_DATA]) {
+               if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+                                    attr[IPSET_ATTR_DATA],
+                                    set->type->adt_policy))
+                       return -IPSET_ERR_PROTOCOL;
+               ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
+                             use_lineno);
+       } else {
+               int nla_rem;
+
+               nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+                       memset(tb, 0, sizeof(tb));
+                       if (nla_type(nla) != IPSET_ATTR_DATA ||
+                           !flag_nested(nla) ||
+                           nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+                                            set->type->adt_policy))
+                               return -IPSET_ERR_PROTOCOL;
+                       ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
+                                     flags, use_lineno);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return ret;
+}
+
+static int
+ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+       const struct nlattr *nla;
+       u32 flags = flag_exist(nlh);
+       bool use_lineno;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    !((attr[IPSET_ATTR_DATA] != NULL) ^
+                      (attr[IPSET_ATTR_ADT] != NULL)) ||
+                    (attr[IPSET_ATTR_DATA] != NULL &&
+                     !flag_nested(attr[IPSET_ATTR_DATA])) ||
+                    (attr[IPSET_ATTR_ADT] != NULL &&
+                     (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+                      attr[IPSET_ATTR_LINENO] == NULL))))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+
+       use_lineno = !!attr[IPSET_ATTR_LINENO];
+       if (attr[IPSET_ATTR_DATA]) {
+               if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+                                    attr[IPSET_ATTR_DATA],
+                                    set->type->adt_policy))
+                       return -IPSET_ERR_PROTOCOL;
+               ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
+                             use_lineno);
+       } else {
+               int nla_rem;
+
+               nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+                       memset(tb, 0, sizeof(*tb));
+                       if (nla_type(nla) != IPSET_ATTR_DATA ||
+                           !flag_nested(nla) ||
+                           nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+                                            set->type->adt_policy))
+                               return -IPSET_ERR_PROTOCOL;
+                       ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
+                                     flags, use_lineno);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return ret;
+}
+
+static int
+ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
+            const struct nlmsghdr *nlh,
+            const struct nlattr * const attr[])
+{
+       struct ip_set *set;
+       struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL ||
+                    attr[IPSET_ATTR_DATA] == NULL ||
+                    !flag_nested(attr[IPSET_ATTR_DATA])))
+               return -IPSET_ERR_PROTOCOL;
+
+       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (set == NULL)
+               return -ENOENT;
+
+       if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+                            set->type->adt_policy))
+               return -IPSET_ERR_PROTOCOL;
+
+       read_lock_bh(&set->lock);
+       ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+       read_unlock_bh(&set->lock);
+       /* Userspace can't trigger element to be re-added */
+       if (ret == -EAGAIN)
+               ret = 1;
+
+       return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+}
+
+/* Get headed data of a set */
+
+static int
+ip_set_header(struct sock *ctnl, struct sk_buff *skb,
+             const struct nlmsghdr *nlh,
+             const struct nlattr * const attr[])
+{
+       const struct ip_set *set;
+       struct sk_buff *skb2;
+       struct nlmsghdr *nlh2;
+       ip_set_id_t index;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_SETNAME] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+       if (index == IPSET_INVALID_ID)
+               return -ENOENT;
+       set = ip_set_list[index];
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+                        IPSET_CMD_HEADER);
+       if (!nlh2)
+               goto nlmsg_failure;
+       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
+       NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
+       NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
+       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+       nlmsg_end(skb2, nlh2);
+
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+       kfree_skb(skb2);
+       return -EMSGSIZE;
+}
+
+/* Get type data */
+
+static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+       [IPSET_ATTR_TYPENAME]   = { .type = NLA_NUL_STRING,
+                                   .len = IPSET_MAXNAMELEN - 1 },
+       [IPSET_ATTR_FAMILY]     = { .type = NLA_U8 },
+};
+
+static int
+ip_set_type(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       struct sk_buff *skb2;
+       struct nlmsghdr *nlh2;
+       u8 family, min, max;
+       const char *typename;
+       int ret = 0;
+
+       if (unlikely(protocol_failed(attr) ||
+                    attr[IPSET_ATTR_TYPENAME] == NULL ||
+                    attr[IPSET_ATTR_FAMILY] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+       typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+       ret = find_set_type_minmax(typename, family, &min, &max);
+       if (ret)
+               return ret;
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+                        IPSET_CMD_TYPE);
+       if (!nlh2)
+               goto nlmsg_failure;
+       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
+       NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
+       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
+       NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+       nlmsg_end(skb2, nlh2);
+
+       pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+       kfree_skb(skb2);
+       return -EMSGSIZE;
+}
+
+/* Get protocol version */
+
+static const struct nla_policy
+ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
+       [IPSET_ATTR_PROTOCOL]   = { .type = NLA_U8 },
+};
+
+static int
+ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
+               const struct nlmsghdr *nlh,
+               const struct nlattr * const attr[])
+{
+       struct sk_buff *skb2;
+       struct nlmsghdr *nlh2;
+       int ret = 0;
+
+       if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+               return -IPSET_ERR_PROTOCOL;
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+                        IPSET_CMD_PROTOCOL);
+       if (!nlh2)
+               goto nlmsg_failure;
+       NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+       nlmsg_end(skb2, nlh2);
+
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+       kfree_skb(skb2);
+       return -EMSGSIZE;
+}
+
+static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_CREATE]      = {
+               .call           = ip_set_create,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_create_policy,
+       },
+       [IPSET_CMD_DESTROY]     = {
+               .call           = ip_set_destroy,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_FLUSH]       = {
+               .call           = ip_set_flush,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_RENAME]      = {
+               .call           = ip_set_rename,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname2_policy,
+       },
+       [IPSET_CMD_SWAP]        = {
+               .call           = ip_set_swap,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname2_policy,
+       },
+       [IPSET_CMD_LIST]        = {
+               .call           = ip_set_dump,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_SAVE]        = {
+               .call           = ip_set_dump,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_ADD] = {
+               .call           = ip_set_uadd,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_adt_policy,
+       },
+       [IPSET_CMD_DEL] = {
+               .call           = ip_set_udel,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_adt_policy,
+       },
+       [IPSET_CMD_TEST]        = {
+               .call           = ip_set_utest,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_adt_policy,
+       },
+       [IPSET_CMD_HEADER]      = {
+               .call           = ip_set_header,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_setname_policy,
+       },
+       [IPSET_CMD_TYPE]        = {
+               .call           = ip_set_type,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_type_policy,
+       },
+       [IPSET_CMD_PROTOCOL]    = {
+               .call           = ip_set_protocol,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+               .policy         = ip_set_protocol_policy,
+       },
+};
+
+static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
+       .name           = "ip_set",
+       .subsys_id      = NFNL_SUBSYS_IPSET,
+       .cb_count       = IPSET_MSG_MAX,
+       .cb             = ip_set_netlink_subsys_cb,
+};
+
+/* Interface to iptables/ip6tables */
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+{
+       unsigned *op;
+       void *data;
+       int copylen = *len, ret = 0;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+       if (optval != SO_IP_SET)
+               return -EBADF;
+       if (*len < sizeof(unsigned))
+               return -EINVAL;
+
+       data = vmalloc(*len);
+       if (!data)
+               return -ENOMEM;
+       if (copy_from_user(data, user, *len) != 0) {
+               ret = -EFAULT;
+               goto done;
+       }
+       op = (unsigned *) data;
+
+       if (*op < IP_SET_OP_VERSION) {
+               /* Check the version at the beginning of operations */
+               struct ip_set_req_version *req_version = data;
+               if (req_version->version != IPSET_PROTOCOL) {
+                       ret = -EPROTO;
+                       goto done;
+               }
+       }
+
+       switch (*op) {
+       case IP_SET_OP_VERSION: {
+               struct ip_set_req_version *req_version = data;
+
+               if (*len != sizeof(struct ip_set_req_version)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+
+               req_version->version = IPSET_PROTOCOL;
+               ret = copy_to_user(user, req_version,
+                                  sizeof(struct ip_set_req_version));
+               goto done;
+       }
+       case IP_SET_OP_GET_BYNAME: {
+               struct ip_set_req_get_set *req_get = data;
+
+               if (*len != sizeof(struct ip_set_req_get_set)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+               nfnl_lock();
+               req_get->set.index = find_set_id(req_get->set.name);
+               nfnl_unlock();
+               goto copy;
+       }
+       case IP_SET_OP_GET_BYINDEX: {
+               struct ip_set_req_get_set *req_get = data;
+
+               if (*len != sizeof(struct ip_set_req_get_set) ||
+                   req_get->set.index >= ip_set_max) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               nfnl_lock();
+               strncpy(req_get->set.name,
+                       ip_set_list[req_get->set.index]
+                               ? ip_set_list[req_get->set.index]->name : "",
+                       IPSET_MAXNAMELEN);
+               nfnl_unlock();
+               goto copy;
+       }
+       default:
+               ret = -EBADMSG;
+               goto done;
+       }       /* end of switch(op) */
+
+copy:
+       ret = copy_to_user(user, data, copylen);
+
+done:
+       vfree(data);
+       if (ret > 0)
+               ret = 0;
+       return ret;
+}
+
+static struct nf_sockopt_ops so_set __read_mostly = {
+       .pf             = PF_INET,
+       .get_optmin     = SO_IP_SET,
+       .get_optmax     = SO_IP_SET + 1,
+       .get            = &ip_set_sockfn_get,
+       .owner          = THIS_MODULE,
+};
+
+static int __init
+ip_set_init(void)
+{
+       int ret;
+
+       if (max_sets)
+               ip_set_max = max_sets;
+       if (ip_set_max >= IPSET_INVALID_ID)
+               ip_set_max = IPSET_INVALID_ID - 1;
+
+       ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
+                             GFP_KERNEL);
+       if (!ip_set_list) {
+               pr_err("ip_set: Unable to create ip_set_list\n");
+               return -ENOMEM;
+       }
+
+       ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+       if (ret != 0) {
+               pr_err("ip_set: cannot register with nfnetlink.\n");
+               kfree(ip_set_list);
+               return ret;
+       }
+       ret = nf_register_sockopt(&so_set);
+       if (ret != 0) {
+               pr_err("SO_SET registry failed: %d\n", ret);
+               nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+               kfree(ip_set_list);
+               return ret;
+       }
+
+       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+       return 0;
+}
+
+static void __exit
+ip_set_fini(void)
+{
+       /* There can't be any existing set */
+       nf_unregister_sockopt(&so_set);
+       nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+       kfree(ip_set_list);
+       pr_debug("these are the famous last words\n");
+}
+
+module_init(ip_set_init);
+module_exit(ip_set_fini);
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
new file mode 100644 (file)
index 0000000..8d52272
--- /dev/null
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Get Layer-4 data from the packets */
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter/ipset/ip_set_getport.h>
+
+/* We must handle non-linear skbs */
+static bool
+get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
+        bool src, __be16 *port, u8 *proto)
+{
+       switch (protocol) {
+       case IPPROTO_TCP: {
+               struct tcphdr _tcph;
+               const struct tcphdr *th;
+
+               th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
+               if (th == NULL)
+                       /* No choice either */
+                       return false;
+
+               *port = src ? th->source : th->dest;
+               break;
+       }
+       case IPPROTO_UDP: {
+               struct udphdr _udph;
+               const struct udphdr *uh;
+
+               uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
+               if (uh == NULL)
+                       /* No choice either */
+                       return false;
+
+               *port = src ? uh->source : uh->dest;
+               break;
+       }
+       case IPPROTO_ICMP: {
+               struct icmphdr _ich;
+               const struct icmphdr *ic;
+
+               ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+               if (ic == NULL)
+                       return false;
+
+               *port = (__force __be16)htons((ic->type << 8) | ic->code);
+               break;
+       }
+       case IPPROTO_ICMPV6: {
+               struct icmp6hdr _ich;
+               const struct icmp6hdr *ic;
+
+               ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+               if (ic == NULL)
+                       return false;
+
+               *port = (__force __be16)
+                       htons((ic->icmp6_type << 8) | ic->icmp6_code);
+               break;
+       }
+       default:
+               break;
+       }
+       *proto = protocol;
+
+       return true;
+}
+
+bool
+ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+                   __be16 *port, u8 *proto)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       unsigned int protooff = ip_hdrlen(skb);
+       int protocol = iph->protocol;
+
+       /* See comments at tcp_match in ip_tables.c */
+       if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+               return false;
+
+       return get_port(skb, protocol, protooff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+bool
+ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+                   __be16 *port, u8 *proto)
+{
+       int protoff;
+       u8 nexthdr;
+
+       nexthdr = ipv6_hdr(skb)->nexthdr;
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+       if (protoff < 0)
+               return false;
+
+       return get_port(skb, nexthdr, protoff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
+#endif
+
+bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+       bool ret;
+       u8 proto;
+
+       switch (pf) {
+       case AF_INET:
+               ret = ip_set_get_ip4_port(skb, src, port, &proto);
+               break;
+       case AF_INET6:
+               ret = ip_set_get_ip6_port(skb, src, port, &proto);
+               break;
+       default:
+               return false;
+       }
+       if (!ret)
+               return ret;
+       switch (proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               return true;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
new file mode 100644 (file)
index 0000000..43bcce2
--- /dev/null
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip");
+
+/* Type specific function prefix */
+#define TYPE           hash_ip
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ip4_same_set      hash_ip_same_set
+#define hash_ip6_same_set      hash_ip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ip4_elem {
+       __be32 ip;
+};
+
+/* Member elements with timeout support */
+struct hash_ip4_telem {
+       __be32 ip;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
+                   const struct hash_ip4_elem *ip2)
+{
+       return ip1->ip == ip2->ip;
+}
+
+static inline bool
+hash_ip4_data_isnull(const struct hash_ip4_elem *elem)
+{
+       return elem->ip == 0;
+}
+
+static inline void
+hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
+{
+       dst->ip = src->ip;
+}
+
+/* Zero valued IP addresses cannot be stored */
+static inline void
+hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
+{
+       elem->ip = 0;
+}
+
+static inline bool
+hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+       const struct hash_ip4_telem *tdata =
+               (const struct hash_ip4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_NETMASK
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+             enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       __be32 ip;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+       ip &= ip_set_netmask(h->netmask);
+       if (ip == 0)
+               return -EINVAL;
+
+       return adtfn(set, &ip, h->timeout);
+}
+
+static int
+hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+             enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       u32 ip, ip_to, hosts, timeout = h->timeout;
+       __be32 nip;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ip &= ip_set_hostmask(h->netmask);
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST) {
+               nip = htonl(ip);
+               if (nip == 0)
+                       return -IPSET_ERR_HASH_ELEM;
+               return adtfn(set, &nip, timeout);
+       }
+
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+
+       for (; !before(ip_to, ip); ip += hosts) {
+               nip = htonl(ip);
+               if (nip == 0)
+                       return -IPSET_ERR_HASH_ELEM;
+               ret = adtfn(set, &nip, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout &&
+              x->netmask == y->netmask;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ip6_elem {
+       union nf_inet_addr ip;
+};
+
+struct hash_ip6_telem {
+       union nf_inet_addr ip;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
+                   const struct hash_ip6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
+}
+
+static inline bool
+hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
+{
+       return ipv6_addr_any(&elem->ip.in6);
+}
+
+static inline void
+hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
+{
+       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+}
+
+static inline void
+hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
+{
+       ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static bool
+hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+       const struct hash_ip6_telem *e =
+               (const struct hash_ip6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+             enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       union nf_inet_addr ip;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+       ip6_netmask(&ip, h->netmask);
+       if (ipv6_addr_any(&ip.in6))
+               return -EINVAL;
+
+       return adtfn(set, &ip, h->timeout);
+}
+
+static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
+       [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+       [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+};
+
+static int
+hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
+             enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       union nf_inet_addr ip;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ip6_netmask(&ip, h->netmask);
+       if (ipv6_addr_any(&ip.in6))
+               return -IPSET_ERR_HASH_ELEM;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       ret = adtfn(set, &ip, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 netmask, hbits;
+       struct ip_set_hash *h;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+       netmask = set->family == AF_INET ? 32 : 128;
+       pr_debug("Create set %s with family %s\n",
+                set->name, set->family == AF_INET ? "inet" : "inet6");
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       if (tb[IPSET_ATTR_NETMASK]) {
+               netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+               if ((set->family == AF_INET && netmask > 32) ||
+                   (set->family == AF_INET6 && netmask > 128) ||
+                   netmask == 0)
+                       return -IPSET_ERR_INVALID_NETMASK;
+       }
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       h->netmask = netmask;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ip4_tvariant : &hash_ip6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ip4_gc_init(set);
+               else
+                       hash_ip6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ip4_variant : &hash_ip6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ip_type __read_mostly = {
+       .name           = "hash:ip",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ip_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_NETMASK]    = { .type = NLA_U8  },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ip_init(void)
+{
+       return ip_set_type_register(&hash_ip_type);
+}
+
+static void __exit
+hash_ip_fini(void)
+{
+       ip_set_type_unregister(&hash_ip_type);
+}
+
+module_init(hash_ip_init);
+module_exit(hash_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
new file mode 100644 (file)
index 0000000..adbe787
--- /dev/null
@@ -0,0 +1,544 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port");
+
+/* Type specific function prefix */
+#define TYPE           hash_ipport
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipport4_same_set  hash_ipport_same_set
+#define hash_ipport6_same_set  hash_ipport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipport4_elem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipport4_telem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
+                       const struct hash_ipport4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
+                      const struct hash_ipport4_elem *src)
+{
+       dst->ip = src->ip;
+       dst->port = src->port;
+       dst->proto = src->proto;
+}
+
+static inline void
+hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipport4_data_list(struct sk_buff *skb,
+                      const struct hash_ipport4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipport4_data_tlist(struct sk_buff *skb,
+                       const struct hash_ipport4_elem *data)
+{
+       const struct hash_ipport4_telem *tdata =
+               (const struct hash_ipport4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport4_elem data = { };
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport4_elem data = { };
+       u32 ip, ip_to, p, port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+             tb[IPSET_ATTR_PORT_TO])) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip = ntohl(data.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       port = ntohs(data.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       } else
+               port_to = port;
+
+       for (; !before(ip_to, ip); ip++)
+               for (p = port; p <= port_to; p++) {
+                       data.ip = htonl(ip);
+                       data.port = htons(p);
+                       ret = adtfn(set, &data, timeout);
+
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+               }
+       return ret;
+}
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipport6_elem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+struct hash_ipport6_telem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
+                       const struct hash_ipport6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
+                      const struct hash_ipport6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipport6_data_list(struct sk_buff *skb,
+                      const struct hash_ipport6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipport6_data_tlist(struct sk_buff *skb,
+                       const struct hash_ipport6_elem *data)
+{
+       const struct hash_ipport6_telem *e =
+               (const struct hash_ipport6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport6_elem data = { };
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipport6_elem data = { };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ipport4_gc_init(set);
+               else
+                       hash_ipport6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ipport4_variant : &hash_ipport6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ipport_type __read_mostly = {
+       .name           = "hash:ip,port",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ipport_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipport_init(void)
+{
+       return ip_set_type_register(&hash_ipport_type);
+}
+
+static void __exit
+hash_ipport_fini(void)
+{
+       ip_set_type_unregister(&hash_ipport_type);
+}
+
+module_init(hash_ipport_init);
+module_exit(hash_ipport_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
new file mode 100644 (file)
index 0000000..22e23ab
--- /dev/null
@@ -0,0 +1,562 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,ip");
+
+/* Type specific function prefix */
+#define TYPE           hash_ipportip
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportip4_same_set        hash_ipportip_same_set
+#define hash_ipportip6_same_set        hash_ipportip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportip4_elem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportip4_telem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
+                         const struct hash_ipportip4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->ip2 == ip2->ip2 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
+                        const struct hash_ipportip4_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipportip4_data_list(struct sk_buff *skb,
+                      const struct hash_ipportip4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportip4_data_tlist(struct sk_buff *skb,
+                       const struct hash_ipportip4_elem *data)
+{
+       const struct hash_ipportip4_telem *tdata =
+               (const struct hash_ipportip4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip4_elem data = { };
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+                   enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip4_elem data = { };
+       u32 ip, ip_to, p, port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+             tb[IPSET_ATTR_PORT_TO])) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip = ntohl(data.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       port = ntohs(data.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       } else
+               port_to = port;
+
+       for (; !before(ip_to, ip); ip++)
+               for (p = port; p <= port_to; p++) {
+                       data.ip = htonl(ip);
+                       data.port = htons(p);
+                       ret = adtfn(set, &data, timeout);
+
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+               }
+       return ret;
+}
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportip6_elem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+};
+
+struct hash_ipportip6_telem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 proto;
+       u8 padding;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
+                         const struct hash_ipportip6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
+                        const struct hash_ipportip6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipportip6_data_list(struct sk_buff *skb,
+                        const struct hash_ipportip6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportip6_data_tlist(struct sk_buff *skb,
+                         const struct hash_ipportip6_elem *data)
+{
+       const struct hash_ipportip6_telem *e =
+               (const struct hash_ipportip6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip6_elem data = { };
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
+                   enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportip6_elem data = { };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ipportip4_gc_init(set);
+               else
+                       hash_ipportip6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportip4_variant : &hash_ipportip6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ipportip_type __read_mostly = {
+       .name           = "hash:ip,port,ip",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+       .dimension      = IPSET_DIM_THREE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ipportip_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipportip_init(void)
+{
+       return ip_set_type_register(&hash_ipportip_type);
+}
+
+static void __exit
+hash_ipportip_fini(void)
+{
+       ip_set_type_unregister(&hash_ipportip_type);
+}
+
+module_init(hash_ipportip_init);
+module_exit(hash_ipportip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
new file mode 100644 (file)
index 0000000..6033e8b
--- /dev/null
@@ -0,0 +1,628 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,net");
+
+/* Type specific function prefix */
+#define TYPE           hash_ipportnet
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportnet4_same_set       hash_ipportnet_same_set
+#define hash_ipportnet6_same_set       hash_ipportnet_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportnet4_elem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportnet4_telem {
+       __be32 ip;
+       __be32 ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
+                          const struct hash_ipportnet4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->ip2 == ip2->ip2 &&
+              ip1->cidr == ip2->cidr &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
+                         const struct hash_ipportnet4_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
+{
+       elem->ip2 &= ip_set_netmask(cidr);
+       elem->cidr = cidr;
+}
+
+static inline void
+hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_ipportnet4_data_list(struct sk_buff *skb,
+                         const struct hash_ipportnet4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportnet4_data_tlist(struct sk_buff *skb,
+                          const struct hash_ipportnet4_elem *data)
+{
+       const struct hash_ipportnet4_telem *tdata =
+               (const struct hash_ipportnet4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet4_elem data =
+               { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+       data.ip2 &= ip_set_netmask(data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+       u32 ip, ip_to, p, port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR2])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       data.ip2 &= ip_set_netmask(data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+             tb[IPSET_ATTR_PORT_TO])) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip = ntohl(data.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip &= ip_set_hostmask(cidr);
+               ip_to = ip | ~ip_set_hostmask(cidr);
+       } else
+               ip_to = ip;
+
+       port = ntohs(data.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       } else
+               port_to = port;
+
+       for (; !before(ip_to, ip); ip++)
+               for (p = port; p <= port_to; p++) {
+                       data.ip = htonl(ip);
+                       data.port = htons(p);
+                       ret = adtfn(set, &data, timeout);
+
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+               }
+       return ret;
+}
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportnet6_elem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+};
+
+struct hash_ipportnet6_telem {
+       union nf_inet_addr ip;
+       union nf_inet_addr ip2;
+       __be16 port;
+       u8 cidr;
+       u8 proto;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
+                          const struct hash_ipportnet6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+              ip1->cidr == ip2->cidr &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
+                         const struct hash_ipportnet6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
+{
+       ip6_netmask(&elem->ip2, cidr);
+       elem->cidr = cidr;
+}
+
+static bool
+hash_ipportnet6_data_list(struct sk_buff *skb,
+                         const struct hash_ipportnet6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_ipportnet6_data_tlist(struct sk_buff *skb,
+                          const struct hash_ipportnet6_elem *data)
+{
+       const struct hash_ipportnet6_telem *e =
+               (const struct hash_ipportnet6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet6_elem data =
+               { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+       ip6_netmask(&data.ip2, data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR2])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&data.ip2, data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h)
+                   + sizeof(struct ip_set_hash_nets)
+                     * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportnet4_tvariant
+                       : &hash_ipportnet6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_ipportnet4_gc_init(set);
+               else
+                       hash_ipportnet6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_ipportnet_type __read_mostly = {
+       .name           = "hash:ip,port,net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+       .dimension      = IPSET_DIM_THREE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_ipportnet_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipportnet_init(void)
+{
+       return ip_set_type_register(&hash_ipportnet_type);
+}
+
+static void __exit
+hash_ipportnet_fini(void)
+{
+       ip_set_type_unregister(&hash_ipportnet_type);
+}
+
+module_init(hash_ipportnet_init);
+module_exit(hash_ipportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
new file mode 100644 (file)
index 0000000..c4db202
--- /dev/null
@@ -0,0 +1,458 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net type of IP sets");
+MODULE_ALIAS("ip_set_hash:net");
+
+/* Type specific function prefix */
+#define TYPE           hash_net
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_net4_same_set     hash_net_same_set
+#define hash_net6_same_set     hash_net_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_net4_elem {
+       __be32 ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_net4_telem {
+       __be32 ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_net4_data_equal(const struct hash_net4_elem *ip1,
+                   const struct hash_net4_elem *ip2)
+{
+       return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net4_data_isnull(const struct hash_net4_elem *elem)
+{
+       return elem->cidr == 0;
+}
+
+static inline void
+hash_net4_data_copy(struct hash_net4_elem *dst,
+                   const struct hash_net4_elem *src)
+{
+       dst->ip = src->ip;
+       dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
+{
+       elem->ip &= ip_set_netmask(cidr);
+       elem->cidr = cidr;
+}
+
+/* Zero CIDR values cannot be stored */
+static inline void
+hash_net4_data_zero_out(struct hash_net4_elem *elem)
+{
+       elem->cidr = 0;
+}
+
+static bool
+hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+       const struct hash_net4_telem *tdata =
+               (const struct hash_net4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
+              enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       data.ip &= ip_set_netmask(data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net4_elem data = { .cidr = HOST_MASK };
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       data.ip &= ip_set_netmask(data.cidr);
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       ret = adtfn(set, &data, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_net6_elem {
+       union nf_inet_addr ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+};
+
+struct hash_net6_telem {
+       union nf_inet_addr ip;
+       u16 padding0;
+       u8 padding1;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_net6_data_equal(const struct hash_net6_elem *ip1,
+                    const struct hash_net6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net6_data_isnull(const struct hash_net6_elem *elem)
+{
+       return elem->cidr == 0;
+}
+
+static inline void
+hash_net6_data_copy(struct hash_net6_elem *dst,
+                   const struct hash_net6_elem *src)
+{
+       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+       dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net6_data_zero_out(struct hash_net6_elem *elem)
+{
+       elem->cidr = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
+{
+       ip6_netmask(&elem->ip, cidr);
+       elem->cidr = cidr;
+}
+
+static bool
+hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+       const struct hash_net6_telem *e =
+               (const struct hash_net6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
+              enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6_netmask(&data.ip, data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_net6_elem data = { .cidr = HOST_MASK };
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&data.ip, data.cidr);
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       ret = adtfn(set, &data, timeout);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       struct ip_set_hash *h;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h)
+                   + sizeof(struct ip_set_hash_nets)
+                     * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_net4_tvariant : &hash_net6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_net4_gc_init(set);
+               else
+                       hash_net6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_net4_variant : &hash_net6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_net_type __read_mostly = {
+       .name           = "hash:net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_net_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_net_init(void)
+{
+       return ip_set_type_register(&hash_net_type);
+}
+
+static void __exit
+hash_net_fini(void)
+{
+       ip_set_type_unregister(&hash_net_type);
+}
+
+module_init(hash_net_init);
+module_exit(hash_net_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
new file mode 100644 (file)
index 0000000..34a1656
--- /dev/null
@@ -0,0 +1,578 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,port");
+
+/* Type specific function prefix */
+#define TYPE           hash_netport
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netport4_same_set hash_netport_same_set
+#define hash_netport6_same_set hash_netport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netport4_elem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_netport4_telem {
+       __be32 ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
+                        const struct hash_netport4_elem *ip2)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto &&
+              ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport4_data_isnull(const struct hash_netport4_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_netport4_data_copy(struct hash_netport4_elem *dst,
+                       const struct hash_netport4_elem *src)
+{
+       dst->ip = src->ip;
+       dst->port = src->port;
+       dst->proto = src->proto;
+       dst->cidr = src->cidr;
+}
+
+static inline void
+hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
+{
+       elem->ip &= ip_set_netmask(cidr);
+       elem->cidr = cidr;
+}
+
+static inline void
+hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static bool
+hash_netport4_data_list(struct sk_buff *skb,
+                       const struct hash_netport4_elem *data)
+{
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_netport4_data_tlist(struct sk_buff *skb,
+                        const struct hash_netport4_elem *data)
+{
+       const struct hash_netport4_telem *tdata =
+               (const struct hash_netport4_telem *)data;
+
+       NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(tdata->timeout)));
+
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF             4
+#define HOST_MASK      32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport4_elem data = {
+               .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+       data.ip &= ip_set_netmask(data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+                  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport4_elem data = { .cidr = HOST_MASK };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+       data.ip &= ip_set_netmask(data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMP:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct ip_set_hash *x = a->data;
+       const struct ip_set_hash *y = b->data;
+
+       /* Resizing changes htable_bits, so we ignore it */
+       return x->maxelem == y->maxelem &&
+              x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netport6_elem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+};
+
+struct hash_netport6_telem {
+       union nf_inet_addr ip;
+       __be16 port;
+       u8 proto;
+       u8 cidr;
+       unsigned long timeout;
+};
+
+static inline bool
+hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
+                        const struct hash_netport6_elem *ip2)
+{
+       return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto &&
+              ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
+{
+       return elem->proto == 0;
+}
+
+static inline void
+hash_netport6_data_copy(struct hash_netport6_elem *dst,
+                       const struct hash_netport6_elem *src)
+{
+       memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
+{
+       elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+       ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+       ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+       ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+       ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
+{
+       ip6_netmask(&elem->ip, cidr);
+       elem->cidr = cidr;
+}
+
+static bool
+hash_netport6_data_list(struct sk_buff *skb,
+                       const struct hash_netport6_elem *data)
+{
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static bool
+hash_netport6_data_tlist(struct sk_buff *skb,
+                        const struct hash_netport6_elem *data)
+{
+       const struct hash_netport6_telem *e =
+               (const struct hash_netport6_telem *)data;
+
+       NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+       NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+       NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+       NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                     htonl(ip_set_timeout_get(e->timeout)));
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF             6
+#define HOST_MASK      128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport6_elem data = {
+               .cidr = h->nets[0].cidr || HOST_MASK };
+
+       if (data.cidr == 0)
+               return -EINVAL;
+       if (adt == IPSET_TEST)
+               data.cidr = HOST_MASK;
+
+       if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+                                &data.port, &data.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+       ip6_netmask(&data.ip, data.cidr);
+
+       return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
+                  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       const struct ip_set_hash *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netport6_elem data = { .cidr = HOST_MASK };
+       u32 port, port_to;
+       u32 timeout = h->timeout;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+       if (!data.cidr)
+               return -IPSET_ERR_INVALID_CIDR;
+       ip6_netmask(&data.ip, data.cidr);
+
+       if (tb[IPSET_ATTR_PORT])
+               data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+               if (data.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       switch (data.proto) {
+       case IPPROTO_UDP:
+       case IPPROTO_TCP:
+       case IPPROTO_ICMPV6:
+               break;
+       default:
+               data.port = 0;
+               break;
+       }
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout(h->timeout))
+                       return -IPSET_ERR_TIMEOUT;
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       if (adt == IPSET_TEST ||
+           !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+           !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &data, timeout);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(data.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       for (; port <= port_to; port++) {
+               data.port = htons(port);
+               ret = adtfn(set, &data, timeout);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       struct ip_set_hash *h;
+       u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+       u8 hbits;
+
+       if (!(set->family == AF_INET || set->family == AF_INET6))
+               return -IPSET_ERR_INVALID_FAMILY;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_HASHSIZE]) {
+               hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+               if (hashsize < IPSET_MIMINAL_HASHSIZE)
+                       hashsize = IPSET_MIMINAL_HASHSIZE;
+       }
+
+       if (tb[IPSET_ATTR_MAXELEM])
+               maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+       h = kzalloc(sizeof(*h)
+                   + sizeof(struct ip_set_hash_nets)
+                     * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+       if (!h)
+               return -ENOMEM;
+
+       h->maxelem = maxelem;
+       get_random_bytes(&h->initval, sizeof(h->initval));
+       h->timeout = IPSET_NO_TIMEOUT;
+
+       hbits = htable_bits(hashsize);
+       h->table = ip_set_alloc(
+                       sizeof(struct htable)
+                       + jhash_size(hbits) * sizeof(struct hbucket));
+       if (!h->table) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table->htable_bits = hbits;
+
+       set->data = h;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+               set->variant = set->family == AF_INET
+                       ? &hash_netport4_tvariant : &hash_netport6_tvariant;
+
+               if (set->family == AF_INET)
+                       hash_netport4_gc_init(set);
+               else
+                       hash_netport6_gc_init(set);
+       } else {
+               set->variant = set->family == AF_INET
+                       ? &hash_netport4_variant : &hash_netport6_variant;
+       }
+
+       pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+                set->name, jhash_size(h->table->htable_bits),
+                h->table->htable_bits, h->maxelem, set->data, h->table);
+
+       return 0;
+}
+
+static struct ip_set_type hash_netport_type __read_mostly = {
+       .name           = "hash:net,port",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = hash_netport_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_netport_init(void)
+{
+       return ip_set_type_register(&hash_netport_type);
+}
+
+static void __exit
+hash_netport_fini(void)
+{
+       ip_set_type_unregister(&hash_netport_type);
+}
+
+module_init(hash_netport_init);
+module_exit(hash_netport_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
new file mode 100644 (file)
index 0000000..a47c329
--- /dev/null
@@ -0,0 +1,584 @@
+/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the list:set type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_list.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("list:set type of IP sets");
+MODULE_ALIAS("ip_set_list:set");
+
+/* Member elements without and with timeout */
+struct set_elem {
+       ip_set_id_t id;
+};
+
+struct set_telem {
+       ip_set_id_t id;
+       unsigned long timeout;
+};
+
+/* Type structure */
+struct list_set {
+       size_t dsize;           /* element size */
+       u32 size;               /* size of set list array */
+       u32 timeout;            /* timeout value */
+       struct timer_list gc;   /* garbage collection */
+       struct set_elem members[0]; /* the set members */
+};
+
+static inline struct set_elem *
+list_set_elem(const struct list_set *map, u32 id)
+{
+       return (struct set_elem *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+list_set_timeout(const struct list_set *map, u32 id)
+{
+       const struct set_telem *elem =
+               (const struct set_telem *) list_set_elem(map, id);
+
+       return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+list_set_expired(const struct list_set *map, u32 id)
+{
+       const struct set_telem *elem =
+               (const struct set_telem *) list_set_elem(map, id);
+
+       return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+list_set_exist(const struct set_telem *elem)
+{
+       return elem->id != IPSET_INVALID_ID &&
+              !ip_set_timeout_expired(elem->timeout);
+}
+
+/* Set list without and with timeout */
+
+static int
+list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
+             enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+       struct list_set *map = set->data;
+       struct set_elem *elem;
+       u32 i;
+       int ret;
+
+       for (i = 0; i < map->size; i++) {
+               elem = list_set_elem(map, i);
+               if (elem->id == IPSET_INVALID_ID)
+                       return 0;
+               if (with_timeout(map->timeout) && list_set_expired(map, i))
+                       continue;
+               switch (adt) {
+               case IPSET_TEST:
+                       ret = ip_set_test(elem->id, skb, pf, dim, flags);
+                       if (ret > 0)
+                               return ret;
+                       break;
+               case IPSET_ADD:
+                       ret = ip_set_add(elem->id, skb, pf, dim, flags);
+                       if (ret == 0)
+                               return ret;
+                       break;
+               case IPSET_DEL:
+                       ret = ip_set_del(elem->id, skb, pf, dim, flags);
+                       if (ret == 0)
+                               return ret;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return -EINVAL;
+}
+
+static bool
+next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+       const struct set_elem *elem;
+
+       if (i + 1 < map->size) {
+               elem = list_set_elem(map, i + 1);
+               return !!(elem->id == id &&
+                         !(with_timeout(map->timeout) &&
+                           list_set_expired(map, i + 1)));
+       }
+
+       return 0;
+}
+
+static void
+list_elem_add(struct list_set *map, u32 i, ip_set_id_t id)
+{
+       struct set_elem *e;
+
+       for (; i < map->size; i++) {
+               e = list_set_elem(map, i);
+               swap(e->id, id);
+               if (e->id == IPSET_INVALID_ID)
+                       break;
+       }
+}
+
+static void
+list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
+              unsigned long timeout)
+{
+       struct set_telem *e;
+
+       for (; i < map->size; i++) {
+               e = (struct set_telem *)list_set_elem(map, i);
+               swap(e->id, id);
+               if (e->id == IPSET_INVALID_ID)
+                       break;
+               swap(e->timeout, timeout);
+       }
+}
+
+static int
+list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
+            unsigned long timeout)
+{
+       const struct set_elem *e = list_set_elem(map, i);
+
+       if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+               /* Last element replaced: e.g. add new,before,last */
+               ip_set_put_byindex(e->id);
+       if (with_timeout(map->timeout))
+               list_elem_tadd(map, i, id, timeout);
+       else
+               list_elem_add(map, i, id);
+
+       return 0;
+}
+
+static int
+list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+{
+       struct set_elem *a = list_set_elem(map, i), *b;
+
+       ip_set_put_byindex(id);
+
+       for (; i < map->size - 1; i++) {
+               b = list_set_elem(map, i + 1);
+               a->id = b->id;
+               if (with_timeout(map->timeout))
+                       ((struct set_telem *)a)->timeout =
+                               ((struct set_telem *)b)->timeout;
+               a = b;
+               if (a->id == IPSET_INVALID_ID)
+                       break;
+       }
+       /* Last element */
+       a->id = IPSET_INVALID_ID;
+       return 0;
+}
+
+static int
+list_set_uadt(struct ip_set *set, struct nlattr *tb[],
+             enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+       struct list_set *map = set->data;
+       bool with_timeout = with_timeout(map->timeout);
+       int before = 0;
+       u32 timeout = map->timeout;
+       ip_set_id_t id, refid = IPSET_INVALID_ID;
+       const struct set_elem *elem;
+       struct ip_set *s;
+       u32 i;
+       int ret = 0;
+
+       if (unlikely(!tb[IPSET_ATTR_NAME] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+       if (id == IPSET_INVALID_ID)
+               return -IPSET_ERR_NAME;
+       /* "Loop detection" */
+       if (s->type->features & IPSET_TYPE_NAME) {
+               ret = -IPSET_ERR_LOOP;
+               goto finish;
+       }
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               before = f & IPSET_FLAG_BEFORE;
+       }
+
+       if (before && !tb[IPSET_ATTR_NAMEREF]) {
+               ret = -IPSET_ERR_BEFORE;
+               goto finish;
+       }
+
+       if (tb[IPSET_ATTR_NAMEREF]) {
+               refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+                                         &s);
+               if (refid == IPSET_INVALID_ID) {
+                       ret = -IPSET_ERR_NAMEREF;
+                       goto finish;
+               }
+               if (!before)
+                       before = -1;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!with_timeout) {
+                       ret = -IPSET_ERR_TIMEOUT;
+                       goto finish;
+               }
+               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+       }
+
+       switch (adt) {
+       case IPSET_TEST:
+               for (i = 0; i < map->size && !ret; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == IPSET_INVALID_ID ||
+                           (before != 0 && i + 1 >= map->size))
+                               break;
+                       else if (with_timeout && list_set_expired(map, i))
+                               continue;
+                       else if (before > 0 && elem->id == id)
+                               ret = next_id_eq(map, i, refid);
+                       else if (before < 0 && elem->id == refid)
+                               ret = next_id_eq(map, i, id);
+                       else if (before == 0 && elem->id == id)
+                               ret = 1;
+               }
+               break;
+       case IPSET_ADD:
+               for (i = 0; i < map->size && !ret; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == id &&
+                           !(with_timeout && list_set_expired(map, i)))
+                               ret = -IPSET_ERR_EXIST;
+               }
+               if (ret == -IPSET_ERR_EXIST)
+                       break;
+               ret = -IPSET_ERR_LIST_FULL;
+               for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == IPSET_INVALID_ID)
+                               ret = before != 0 ? -IPSET_ERR_REF_EXIST
+                                       : list_set_add(map, i, id, timeout);
+                       else if (elem->id != refid)
+                               continue;
+                       else if (with_timeout && list_set_expired(map, i))
+                               ret = -IPSET_ERR_REF_EXIST;
+                       else if (before)
+                               ret = list_set_add(map, i, id, timeout);
+                       else if (i + 1 < map->size)
+                               ret = list_set_add(map, i + 1, id, timeout);
+               }
+               break;
+       case IPSET_DEL:
+               ret = -IPSET_ERR_EXIST;
+               for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
+                       elem = list_set_elem(map, i);
+                       if (elem->id == IPSET_INVALID_ID) {
+                               ret = before != 0 ? -IPSET_ERR_REF_EXIST
+                                                 : -IPSET_ERR_EXIST;
+                               break;
+                       } else if (with_timeout && list_set_expired(map, i))
+                               continue;
+                       else if (elem->id == id &&
+                                (before == 0 ||
+                                 (before > 0 &&
+                                  next_id_eq(map, i, refid))))
+                               ret = list_set_del(map, id, i);
+                       else if (before < 0 &&
+                                elem->id == refid &&
+                                next_id_eq(map, i, id))
+                               ret = list_set_del(map, id, i + 1);
+               }
+               break;
+       default:
+               break;
+       }
+
+finish:
+       if (refid != IPSET_INVALID_ID)
+               ip_set_put_byindex(refid);
+       if (adt != IPSET_ADD || ret)
+               ip_set_put_byindex(id);
+
+       return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+list_set_flush(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+       struct set_elem *elem;
+       u32 i;
+
+       for (i = 0; i < map->size; i++) {
+               elem = list_set_elem(map, i);
+               if (elem->id != IPSET_INVALID_ID) {
+                       ip_set_put_byindex(elem->id);
+                       elem->id = IPSET_INVALID_ID;
+               }
+       }
+}
+
+static void
+list_set_destroy(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+
+       if (with_timeout(map->timeout))
+               del_timer_sync(&map->gc);
+       list_set_flush(set);
+       kfree(map);
+
+       set->data = NULL;
+}
+
+static int
+list_set_head(struct ip_set *set, struct sk_buff *skb)
+{
+       const struct list_set *map = set->data;
+       struct nlattr *nested;
+
+       nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+       if (!nested)
+               goto nla_put_failure;
+       NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
+       if (with_timeout(map->timeout))
+               NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+       NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+                     htonl(atomic_read(&set->ref) - 1));
+       NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+                     htonl(sizeof(*map) + map->size * map->dsize));
+       ipset_nest_end(skb, nested);
+
+       return 0;
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int
+list_set_list(const struct ip_set *set,
+             struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct list_set *map = set->data;
+       struct nlattr *atd, *nested;
+       u32 i, first = cb->args[2];
+       const struct set_elem *e;
+
+       atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+       if (!atd)
+               return -EMSGSIZE;
+       for (; cb->args[2] < map->size; cb->args[2]++) {
+               i = cb->args[2];
+               e = list_set_elem(map, i);
+               if (e->id == IPSET_INVALID_ID)
+                       goto finish;
+               if (with_timeout(map->timeout) && list_set_expired(map, i))
+                       continue;
+               nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+               if (!nested) {
+                       if (i == first) {
+                               nla_nest_cancel(skb, atd);
+                               return -EMSGSIZE;
+                       } else
+                               goto nla_put_failure;
+               }
+               NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
+                              ip_set_name_byindex(e->id));
+               if (with_timeout(map->timeout)) {
+                       const struct set_telem *te =
+                               (const struct set_telem *) e;
+                       NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+                                     htonl(ip_set_timeout_get(te->timeout)));
+               }
+               ipset_nest_end(skb, nested);
+       }
+finish:
+       ipset_nest_end(skb, atd);
+       /* Set listing finished */
+       cb->args[2] = 0;
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nested);
+       ipset_nest_end(skb, atd);
+       if (unlikely(i == first)) {
+               cb->args[2] = 0;
+               return -EMSGSIZE;
+       }
+       return 0;
+}
+
+static bool
+list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+       const struct list_set *x = a->data;
+       const struct list_set *y = b->data;
+
+       return x->size == y->size &&
+              x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant list_set = {
+       .kadt   = list_set_kadt,
+       .uadt   = list_set_uadt,
+       .destroy = list_set_destroy,
+       .flush  = list_set_flush,
+       .head   = list_set_head,
+       .list   = list_set_list,
+       .same_set = list_set_same_set,
+};
+
+static void
+list_set_gc(unsigned long ul_set)
+{
+       struct ip_set *set = (struct ip_set *) ul_set;
+       struct list_set *map = set->data;
+       struct set_telem *e;
+       u32 i;
+
+       /* We run parallel with other readers (test element)
+        * but adding/deleting new entries is locked out */
+       read_lock_bh(&set->lock);
+       for (i = map->size - 1; i >= 0; i--) {
+               e = (struct set_telem *) list_set_elem(map, i);
+               if (e->id != IPSET_INVALID_ID &&
+                   list_set_expired(map, i))
+                       list_set_del(map, e->id, i);
+       }
+       read_unlock_bh(&set->lock);
+
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+static void
+list_set_gc_init(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+
+       init_timer(&map->gc);
+       map->gc.data = (unsigned long) set;
+       map->gc.function = list_set_gc;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       add_timer(&map->gc);
+}
+
+/* Create list:set type of sets */
+
+static bool
+init_list_set(struct ip_set *set, u32 size, size_t dsize,
+             unsigned long timeout)
+{
+       struct list_set *map;
+       struct set_elem *e;
+       u32 i;
+
+       map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+       if (!map)
+               return false;
+
+       map->size = size;
+       map->dsize = dsize;
+       map->timeout = timeout;
+       set->data = map;
+
+       for (i = 0; i < size; i++) {
+               e = list_set_elem(map, i);
+               e->id = IPSET_INVALID_ID;
+       }
+
+       return true;
+}
+
+static int
+list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+       u32 size = IP_SET_LIST_DEFAULT_SIZE;
+
+       if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_SIZE])
+               size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
+       if (size < IP_SET_LIST_MIN_SIZE)
+               size = IP_SET_LIST_MIN_SIZE;
+
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               if (!init_list_set(set, size, sizeof(struct set_telem),
+                                  ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
+                       return -ENOMEM;
+
+               list_set_gc_init(set);
+       } else {
+               if (!init_list_set(set, size, sizeof(struct set_elem),
+                                  IPSET_NO_TIMEOUT))
+                       return -ENOMEM;
+       }
+       set->variant = &list_set;
+       return 0;
+}
+
+static struct ip_set_type list_set_type __read_mostly = {
+       .name           = "list:set",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
+       .dimension      = IPSET_DIM_ONE,
+       .family         = AF_UNSPEC,
+       .revision       = 0,
+       .create         = list_set_create,
+       .create_policy  = {
+               [IPSET_ATTR_SIZE]       = { .type = NLA_U32 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_NAME]       = { .type = NLA_STRING,
+                                           .len = IPSET_MAXNAMELEN },
+               [IPSET_ATTR_NAMEREF]    = { .type = NLA_STRING,
+                                           .len = IPSET_MAXNAMELEN },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+list_set_init(void)
+{
+       return ip_set_type_register(&list_set_type);
+}
+
+static void __exit
+list_set_fini(void)
+{
+       ip_set_type_unregister(&list_set_type);
+}
+
+module_init(list_set_init);
+module_exit(list_set_fini);
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
new file mode 100644 (file)
index 0000000..23f8c81
--- /dev/null
@@ -0,0 +1,291 @@
+#include <linux/netfilter/ipset/pfxlen.h>
+
+/*
+ * Prefixlen maps for fast conversions, by Jan Engelhardt.
+ */
+
+#define E(a, b, c, d) \
+       {.ip6 = { \
+               __constant_htonl(a), __constant_htonl(b), \
+               __constant_htonl(c), __constant_htonl(d), \
+       } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_netmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_netmask_map[] = {
+       E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_netmask_map);
+
+#undef  E
+#define E(a, b, c, d)                                          \
+       {.ip6 = { (__force __be32) a, (__force __be32) b,       \
+                 (__force __be32) c, (__force __be32) d,       \
+       } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_hostmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_hostmask_map[] = {
+       E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+       E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
index a475edee0912e8fef842aa1632ecc2d19599f59d..5c48ffb60c2839a5a50560457f8ffc617819399e 100644 (file)
@@ -43,11 +43,6 @@ EXPORT_SYMBOL(register_ip_vs_app);
 EXPORT_SYMBOL(unregister_ip_vs_app);
 EXPORT_SYMBOL(register_ip_vs_app_inc);
 
-/* ipvs application list head */
-static LIST_HEAD(ip_vs_app_list);
-static DEFINE_MUTEX(__ip_vs_app_mutex);
-
-
 /*
  *     Get an ip_vs_app object
  */
@@ -67,7 +62,8 @@ static inline void ip_vs_app_put(struct ip_vs_app *app)
  *     Allocate/initialize app incarnation and register it in proto apps.
  */
 static int
-ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
+ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+                 __u16 port)
 {
        struct ip_vs_protocol *pp;
        struct ip_vs_app *inc;
@@ -98,7 +94,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
                }
        }
 
-       ret = pp->register_app(inc);
+       ret = pp->register_app(net, inc);
        if (ret)
                goto out;
 
@@ -119,7 +115,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
  *     Release app incarnation
  */
 static void
-ip_vs_app_inc_release(struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_protocol *pp;
 
@@ -127,7 +123,7 @@ ip_vs_app_inc_release(struct ip_vs_app *inc)
                return;
 
        if (pp->unregister_app)
-               pp->unregister_app(inc);
+               pp->unregister_app(net, inc);
 
        IP_VS_DBG(9, "%s App %s:%u unregistered\n",
                  pp->name, inc->name, ntohs(inc->port));
@@ -168,15 +164,17 @@ void ip_vs_app_inc_put(struct ip_vs_app *inc)
  *     Register an application incarnation in protocol applications
  */
 int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
+register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+                      __u16 port)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        int result;
 
-       mutex_lock(&__ip_vs_app_mutex);
+       mutex_lock(&ipvs->app_mutex);
 
-       result = ip_vs_app_inc_new(app, proto, port);
+       result = ip_vs_app_inc_new(net, app, proto, port);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+       mutex_unlock(&ipvs->app_mutex);
 
        return result;
 }
@@ -185,16 +183,17 @@ register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
 /*
  *     ip_vs_app registration routine
  */
-int register_ip_vs_app(struct ip_vs_app *app)
+int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        /* increase the module use count */
        ip_vs_use_count_inc();
 
-       mutex_lock(&__ip_vs_app_mutex);
+       mutex_lock(&ipvs->app_mutex);
 
-       list_add(&app->a_list, &ip_vs_app_list);
+       list_add(&app->a_list, &ipvs->app_list);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+       mutex_unlock(&ipvs->app_mutex);
 
        return 0;
 }
@@ -204,19 +203,20 @@ int register_ip_vs_app(struct ip_vs_app *app)
  *     ip_vs_app unregistration routine
  *     We are sure there are no app incarnations attached to services
  */
-void unregister_ip_vs_app(struct ip_vs_app *app)
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_app *inc, *nxt;
 
-       mutex_lock(&__ip_vs_app_mutex);
+       mutex_lock(&ipvs->app_mutex);
 
        list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-               ip_vs_app_inc_release(inc);
+               ip_vs_app_inc_release(net, inc);
        }
 
        list_del(&app->a_list);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+       mutex_unlock(&ipvs->app_mutex);
 
        /* decrease the module use count */
        ip_vs_use_count_dec();
@@ -226,7 +226,8 @@ void unregister_ip_vs_app(struct ip_vs_app *app)
 /*
  *     Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
  */
-int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
+int ip_vs_bind_app(struct ip_vs_conn *cp,
+                  struct ip_vs_protocol *pp)
 {
        return pp->app_conn_bind(cp);
 }
@@ -481,11 +482,11 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
  *     /proc/net/ip_vs_app entry function
  */
 
-static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
+static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
 {
        struct ip_vs_app *app, *inc;
 
-       list_for_each_entry(app, &ip_vs_app_list, a_list) {
+       list_for_each_entry(app, &ipvs->app_list, a_list) {
                list_for_each_entry(inc, &app->incs_list, a_list) {
                        if (pos-- == 0)
                                return inc;
@@ -497,19 +498,24 @@ static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
 
 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       mutex_lock(&__ip_vs_app_mutex);
+       struct net *net = seq_file_net(seq);
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
+       mutex_lock(&ipvs->app_mutex);
+
+       return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
 }
 
 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct ip_vs_app *inc, *app;
        struct list_head *e;
+       struct net *net = seq_file_net(seq);
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
        ++*pos;
        if (v == SEQ_START_TOKEN)
-               return ip_vs_app_idx(0);
+               return ip_vs_app_idx(ipvs, 0);
 
        inc = v;
        app = inc->app;
@@ -518,7 +524,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                return list_entry(e, struct ip_vs_app, a_list);
 
        /* go on to next application */
-       for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
+       for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
                app = list_entry(e, struct ip_vs_app, a_list);
                list_for_each_entry(inc, &app->incs_list, a_list) {
                        return inc;
@@ -529,7 +535,9 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
 {
-       mutex_unlock(&__ip_vs_app_mutex);
+       struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq));
+
+       mutex_unlock(&ipvs->app_mutex);
 }
 
 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -557,7 +565,8 @@ static const struct seq_operations ip_vs_app_seq_ops = {
 
 static int ip_vs_app_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ip_vs_app_seq_ops);
+       return seq_open_net(inode, file, &ip_vs_app_seq_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations ip_vs_app_fops = {
@@ -569,15 +578,36 @@ static const struct file_operations ip_vs_app_fops = {
 };
 #endif
 
-int __init ip_vs_app_init(void)
+static int __net_init __ip_vs_app_init(struct net *net)
 {
-       /* we will replace it with proc_net_ipvs_create() soon */
-       proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       INIT_LIST_HEAD(&ipvs->app_list);
+       __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
+       proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
        return 0;
 }
 
+static void __net_exit __ip_vs_app_cleanup(struct net *net)
+{
+       proc_net_remove(net, "ip_vs_app");
+}
+
+static struct pernet_operations ip_vs_app_ops = {
+       .init = __ip_vs_app_init,
+       .exit = __ip_vs_app_cleanup,
+};
+
+int __init ip_vs_app_init(void)
+{
+       int rv;
+
+       rv = register_pernet_subsys(&ip_vs_app_ops);
+       return rv;
+}
+
 
 void ip_vs_app_cleanup(void)
 {
-       proc_net_remove(&init_net, "ip_vs_app");
+       unregister_pernet_subsys(&ip_vs_app_ops);
 }
index e9adecdc8ca4779468c494c1a2418049ca2384eb..9c2a517b69c8683b6933c2079b2c1199c5978a07 100644 (file)
 /*
  * Connection hash size. Default is what was selected at compile time.
 */
-int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
 
 /* size and mask values */
-int ip_vs_conn_tab_size;
-int ip_vs_conn_tab_mask;
+int ip_vs_conn_tab_size __read_mostly;
+static int ip_vs_conn_tab_mask __read_mostly;
 
 /*
  *  Connection hash table: for input and output packets lookups of IPVS
  */
-static struct list_head *ip_vs_conn_tab;
+static struct hlist_head *ip_vs_conn_tab __read_mostly;
 
 /*  SLAB cache for IPVS connections */
 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 
-/*  counter for current IPVS connections */
-static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
-
 /*  counter for no client port connections */
 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
 
 /* random value for IPVS connection hash */
-static unsigned int ip_vs_conn_rnd;
+static unsigned int ip_vs_conn_rnd __read_mostly;
 
 /*
  *  Fine locking granularity for big connection hash table
  */
-#define CT_LOCKARRAY_BITS  4
+#define CT_LOCKARRAY_BITS  5
 #define CT_LOCKARRAY_SIZE  (1<<CT_LOCKARRAY_BITS)
 #define CT_LOCKARRAY_MASK  (CT_LOCKARRAY_SIZE-1)
 
@@ -133,19 +130,19 @@ static inline void ct_write_unlock_bh(unsigned key)
 /*
  *     Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
                                       const union nf_inet_addr *addr,
                                       __be16 port)
 {
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6)
-               return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
-                                   (__force u32)port, proto, ip_vs_conn_rnd)
-                       & ip_vs_conn_tab_mask;
+               return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+                                   (__force u32)port, proto, ip_vs_conn_rnd) ^
+                       ((size_t)net>>8)) & ip_vs_conn_tab_mask;
 #endif
-       return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
-                           ip_vs_conn_rnd)
-               & ip_vs_conn_tab_mask;
+       return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+                           ip_vs_conn_rnd) ^
+               ((size_t)net>>8)) & ip_vs_conn_tab_mask;
 }
 
 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -166,18 +163,18 @@ static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
                port = p->vport;
        }
 
-       return ip_vs_conn_hashkey(p->af, p->protocol, addr, port);
+       return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
 }
 
 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 {
        struct ip_vs_conn_param p;
 
-       ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport,
-                             NULL, 0, &p);
+       ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+                             &cp->caddr, cp->cport, NULL, 0, &p);
 
-       if (cp->dest && cp->dest->svc->pe) {
-               p.pe = cp->dest->svc->pe;
+       if (cp->pe) {
+               p.pe = cp->pe;
                p.pe_data = cp->pe_data;
                p.pe_data_len = cp->pe_data_len;
        }
@@ -186,7 +183,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 }
 
 /*
- *     Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
+ *     Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
  *     returns bool success.
  */
 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
@@ -204,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
        spin_lock(&cp->lock);
 
        if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
-               list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
+               hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
                cp->flags |= IP_VS_CONN_F_HASHED;
                atomic_inc(&cp->refcnt);
                ret = 1;
@@ -237,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
        spin_lock(&cp->lock);
 
        if (cp->flags & IP_VS_CONN_F_HASHED) {
-               list_del(&cp->c_list);
+               hlist_del(&cp->c_list);
                cp->flags &= ~IP_VS_CONN_F_HASHED;
                atomic_dec(&cp->refcnt);
                ret = 1;
@@ -262,18 +259,20 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
 {
        unsigned hash;
        struct ip_vs_conn *cp;
+       struct hlist_node *n;
 
        hash = ip_vs_conn_hashkey_param(p, false);
 
        ct_read_lock(hash);
 
-       list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
                if (cp->af == p->af &&
+                   p->cport == cp->cport && p->vport == cp->vport &&
                    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
                    ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
-                   p->cport == cp->cport && p->vport == cp->vport &&
                    ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
-                   p->protocol == cp->protocol) {
+                   p->protocol == cp->protocol &&
+                   ip_vs_conn_net_eq(cp, p->net)) {
                        /* HIT */
                        atomic_inc(&cp->refcnt);
                        ct_read_unlock(hash);
@@ -313,23 +312,23 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
                            struct ip_vs_conn_param *p)
 {
        __be16 _ports[2], *pptr;
+       struct net *net = skb_net(skb);
 
        pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
        if (pptr == NULL)
                return 1;
 
        if (likely(!inverse))
-               ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0],
-                                     &iph->daddr, pptr[1], p);
+               ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+                                     pptr[0], &iph->daddr, pptr[1], p);
        else
-               ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1],
-                                     &iph->saddr, pptr[0], p);
+               ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+                                     pptr[1], &iph->saddr, pptr[0], p);
        return 0;
 }
 
 struct ip_vs_conn *
 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-                       struct ip_vs_protocol *pp,
                        const struct ip_vs_iphdr *iph,
                        unsigned int proto_off, int inverse)
 {
@@ -347,14 +346,17 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
 {
        unsigned hash;
        struct ip_vs_conn *cp;
+       struct hlist_node *n;
 
        hash = ip_vs_conn_hashkey_param(p, false);
 
        ct_read_lock(hash);
 
-       list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+               if (!ip_vs_conn_net_eq(cp, p->net))
+                       continue;
                if (p->pe_data && p->pe->ct_match) {
-                       if (p->pe->ct_match(p, cp))
+                       if (p->pe == cp->pe && p->pe->ct_match(p, cp))
                                goto out;
                        continue;
                }
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 {
        unsigned hash;
        struct ip_vs_conn *cp, *ret=NULL;
+       struct hlist_node *n;
 
        /*
         *      Check for "full" addressed entries
@@ -402,12 +405,13 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 
        ct_read_lock(hash);
 
-       list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
                if (cp->af == p->af &&
+                   p->vport == cp->cport && p->cport == cp->dport &&
                    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
                    ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
-                   p->vport == cp->cport && p->cport == cp->dport &&
-                   p->protocol == cp->protocol) {
+                   p->protocol == cp->protocol &&
+                   ip_vs_conn_net_eq(cp, p->net)) {
                        /* HIT */
                        atomic_inc(&cp->refcnt);
                        ret = cp;
@@ -428,7 +432,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 
 struct ip_vs_conn *
 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-                        struct ip_vs_protocol *pp,
                         const struct ip_vs_iphdr *iph,
                         unsigned int proto_off, int inverse)
 {
@@ -611,9 +614,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
        struct ip_vs_dest *dest;
 
        if ((cp) && (!cp->dest)) {
-               dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
-                                      &cp->vaddr, cp->vport,
-                                      cp->protocol);
+               dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+                                      cp->dport, &cp->vaddr, cp->vport,
+                                      cp->protocol, cp->fwmark);
                ip_vs_bind_dest(cp, dest);
                return dest;
        } else
@@ -686,13 +689,14 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
 int ip_vs_check_template(struct ip_vs_conn *ct)
 {
        struct ip_vs_dest *dest = ct->dest;
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
 
        /*
         * Checking the dest server status.
         */
        if ((dest == NULL) ||
            !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-           (sysctl_ip_vs_expire_quiescent_template &&
+           (ipvs->sysctl_expire_quiescent_template &&
             (atomic_read(&dest->weight) == 0))) {
                IP_VS_DBG_BUF(9, "check_template: dest not available for "
                              "protocol %s s:%s:%d v:%s:%d "
@@ -730,6 +734,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
 static void ip_vs_conn_expire(unsigned long data)
 {
        struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
 
        cp->timeout = 60*HZ;
 
@@ -765,13 +770,14 @@ static void ip_vs_conn_expire(unsigned long data)
                if (cp->flags & IP_VS_CONN_F_NFCT)
                        ip_vs_conn_drop_conntrack(cp);
 
+               ip_vs_pe_put(cp->pe);
                kfree(cp->pe_data);
                if (unlikely(cp->app != NULL))
                        ip_vs_unbind_app(cp);
                ip_vs_unbind_dest(cp);
                if (cp->flags & IP_VS_CONN_F_NO_CPORT)
                        atomic_dec(&ip_vs_conn_no_cport_cnt);
-               atomic_dec(&ip_vs_conn_count);
+               atomic_dec(&ipvs->conn_count);
 
                kmem_cache_free(ip_vs_conn_cachep, cp);
                return;
@@ -802,10 +808,12 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
 struct ip_vs_conn *
 ip_vs_conn_new(const struct ip_vs_conn_param *p,
               const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
-              struct ip_vs_dest *dest)
+              struct ip_vs_dest *dest, __u32 fwmark)
 {
        struct ip_vs_conn *cp;
-       struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol);
+       struct netns_ipvs *ipvs = net_ipvs(p->net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+                                                          p->protocol);
 
        cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
        if (cp == NULL) {
@@ -813,8 +821,9 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
                return NULL;
        }
 
-       INIT_LIST_HEAD(&cp->c_list);
+       INIT_HLIST_NODE(&cp->c_list);
        setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+       ip_vs_conn_net_set(cp, p->net);
        cp->af             = p->af;
        cp->protocol       = p->protocol;
        ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
@@ -826,7 +835,10 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
                        &cp->daddr, daddr);
        cp->dport          = dport;
        cp->flags          = flags;
-       if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) {
+       cp->fwmark         = fwmark;
+       if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
+               ip_vs_pe_get(p->pe);
+               cp->pe = p->pe;
                cp->pe_data = p->pe_data;
                cp->pe_data_len = p->pe_data_len;
        }
@@ -842,7 +854,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
        atomic_set(&cp->n_control, 0);
        atomic_set(&cp->in_pkts, 0);
 
-       atomic_inc(&ip_vs_conn_count);
+       atomic_inc(&ipvs->conn_count);
        if (flags & IP_VS_CONN_F_NO_CPORT)
                atomic_inc(&ip_vs_conn_no_cport_cnt);
 
@@ -861,8 +873,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
 #endif
                ip_vs_bind_xmit(cp);
 
-       if (unlikely(pp && atomic_read(&pp->appcnt)))
-               ip_vs_bind_app(cp, pp);
+       if (unlikely(pd && atomic_read(&pd->appcnt)))
+               ip_vs_bind_app(cp, pd->pp);
 
        /*
         * Allow conntrack to be preserved. By default, conntrack
@@ -871,7 +883,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
         * IP_VS_CONN_F_ONE_PACKET too.
         */
 
-       if (ip_vs_conntrack_enabled())
+       if (ip_vs_conntrack_enabled(ipvs))
                cp->flags |= IP_VS_CONN_F_NFCT;
 
        /* Hash it in the ip_vs_conn_tab finally */
@@ -884,18 +896,24 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
  *     /proc/net/ip_vs_conn entries
  */
 #ifdef CONFIG_PROC_FS
+struct ip_vs_iter_state {
+       struct seq_net_private  p;
+       struct hlist_head       *l;
+};
 
 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 {
        int idx;
        struct ip_vs_conn *cp;
+       struct ip_vs_iter_state *iter = seq->private;
+       struct hlist_node *n;
 
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
                ct_read_lock_bh(idx);
-               list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
+               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
                        if (pos-- == 0) {
-                               seq->private = &ip_vs_conn_tab[idx];
-                       return cp;
+                               iter->l = &ip_vs_conn_tab[idx];
+                               return cp;
                        }
                }
                ct_read_unlock_bh(idx);
@@ -906,14 +924,18 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 
 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       seq->private = NULL;
+       struct ip_vs_iter_state *iter = seq->private;
+
+       iter->l = NULL;
        return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
 }
 
 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct ip_vs_conn *cp = v;
-       struct list_head *e, *l = seq->private;
+       struct ip_vs_iter_state *iter = seq->private;
+       struct hlist_node *e;
+       struct hlist_head *l = iter->l;
        int idx;
 
        ++*pos;
@@ -921,27 +943,28 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                return ip_vs_conn_array(seq, 0);
 
        /* more on same hash chain? */
-       if ((e = cp->c_list.next) != l)
-               return list_entry(e, struct ip_vs_conn, c_list);
+       if ((e = cp->c_list.next))
+               return hlist_entry(e, struct ip_vs_conn, c_list);
 
        idx = l - ip_vs_conn_tab;
        ct_read_unlock_bh(idx);
 
        while (++idx < ip_vs_conn_tab_size) {
                ct_read_lock_bh(idx);
-               list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-                       seq->private = &ip_vs_conn_tab[idx];
+               hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
+                       iter->l = &ip_vs_conn_tab[idx];
                        return cp;
                }
                ct_read_unlock_bh(idx);
        }
-       seq->private = NULL;
+       iter->l = NULL;
        return NULL;
 }
 
 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
 {
-       struct list_head *l = seq->private;
+       struct ip_vs_iter_state *iter = seq->private;
+       struct hlist_head *l = iter->l;
 
        if (l)
                ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -955,18 +978,19 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Expires PEName PEData\n");
        else {
                const struct ip_vs_conn *cp = v;
+               struct net *net = seq_file_net(seq);
                char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
                size_t len = 0;
 
-               if (cp->dest && cp->pe_data &&
-                   cp->dest->svc->pe->show_pe_data) {
+               if (!ip_vs_conn_net_eq(cp, net))
+                       return 0;
+               if (cp->pe_data) {
                        pe_data[0] = ' ';
-                       len = strlen(cp->dest->svc->pe->name);
-                       memcpy(pe_data + 1, cp->dest->svc->pe->name, len);
+                       len = strlen(cp->pe->name);
+                       memcpy(pe_data + 1, cp->pe->name, len);
                        pe_data[len + 1] = ' ';
                        len += 2;
-                       len += cp->dest->svc->pe->show_pe_data(cp,
-                                                              pe_data + len);
+                       len += cp->pe->show_pe_data(cp, pe_data + len);
                }
                pe_data[len] = '\0';
 
@@ -1004,7 +1028,8 @@ static const struct seq_operations ip_vs_conn_seq_ops = {
 
 static int ip_vs_conn_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ip_vs_conn_seq_ops);
+       return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
+                           sizeof(struct ip_vs_iter_state));
 }
 
 static const struct file_operations ip_vs_conn_fops = {
@@ -1031,6 +1056,10 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Origin Expires\n");
        else {
                const struct ip_vs_conn *cp = v;
+               struct net *net = seq_file_net(seq);
+
+               if (!ip_vs_conn_net_eq(cp, net))
+                       return 0;
 
 #ifdef CONFIG_IP_VS_IPV6
                if (cp->af == AF_INET6)
@@ -1067,7 +1096,8 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
 
 static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &ip_vs_conn_sync_seq_ops);
+       return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
+                           sizeof(struct ip_vs_iter_state));
 }
 
 static const struct file_operations ip_vs_conn_sync_fops = {
@@ -1113,7 +1143,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
 }
 
 /* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(void)
+void ip_vs_random_dropentry(struct net *net)
 {
        int idx;
        struct ip_vs_conn *cp;
@@ -1123,17 +1153,19 @@ void ip_vs_random_dropentry(void)
         */
        for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
                unsigned hash = net_random() & ip_vs_conn_tab_mask;
+               struct hlist_node *n;
 
                /*
                 *  Lock is actually needed in this loop.
                 */
                ct_write_lock_bh(hash);
 
-               list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
                        if (cp->flags & IP_VS_CONN_F_TEMPLATE)
                                /* connection template */
                                continue;
-
+                       if (!ip_vs_conn_net_eq(cp, net))
+                               continue;
                        if (cp->protocol == IPPROTO_TCP) {
                                switch(cp->state) {
                                case IP_VS_TCP_S_SYN_RECV:
@@ -1168,20 +1200,24 @@ void ip_vs_random_dropentry(void)
 /*
  *      Flush all the connection entries in the ip_vs_conn_tab
  */
-static void ip_vs_conn_flush(void)
+static void ip_vs_conn_flush(struct net *net)
 {
        int idx;
        struct ip_vs_conn *cp;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-  flush_again:
+flush_again:
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
+               struct hlist_node *n;
+
                /*
                 *  Lock is actually needed in this loop.
                 */
                ct_write_lock_bh(idx);
 
-               list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-
+               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+                       if (!ip_vs_conn_net_eq(cp, net))
+                               continue;
                        IP_VS_DBG(4, "del connection\n");
                        ip_vs_conn_expire_now(cp);
                        if (cp->control) {
@@ -1194,16 +1230,41 @@ static void ip_vs_conn_flush(void)
 
        /* the counter may be not NULL, because maybe some conn entries
           are run by slow timer handler or unhashed but still referred */
-       if (atomic_read(&ip_vs_conn_count) != 0) {
+       if (atomic_read(&ipvs->conn_count) != 0) {
                schedule();
                goto flush_again;
        }
 }
+/*
+ * per netns init and exit
+ */
+int __net_init __ip_vs_conn_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       atomic_set(&ipvs->conn_count, 0);
+
+       proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
+       proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+       return 0;
+}
 
+static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+{
+       /* flush all the connection entries first */
+       ip_vs_conn_flush(net);
+       proc_net_remove(net, "ip_vs_conn");
+       proc_net_remove(net, "ip_vs_conn_sync");
+}
+static struct pernet_operations ipvs_conn_ops = {
+       .init = __ip_vs_conn_init,
+       .exit = __ip_vs_conn_cleanup,
+};
 
 int __init ip_vs_conn_init(void)
 {
        int idx;
+       int retc;
 
        /* Compute size and mask */
        ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1212,8 +1273,7 @@ int __init ip_vs_conn_init(void)
        /*
         * Allocate the connection hash table and initialize its list heads
         */
-       ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
-                                sizeof(struct list_head));
+       ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
        if (!ip_vs_conn_tab)
                return -ENOMEM;
 
@@ -1233,32 +1293,25 @@ int __init ip_vs_conn_init(void)
        IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
                  sizeof(struct ip_vs_conn));
 
-       for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
-               INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
-       }
+       for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
+               INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
 
        for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)  {
                rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
        }
 
-       proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
-       proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+       retc = register_pernet_subsys(&ipvs_conn_ops);
 
        /* calculate the random value for connection hash */
        get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
 
-       return 0;
+       return retc;
 }
 
-
 void ip_vs_conn_cleanup(void)
 {
-       /* flush all the connection entries first */
-       ip_vs_conn_flush();
-
+       unregister_pernet_subsys(&ipvs_conn_ops);
        /* Release the empty cache */
        kmem_cache_destroy(ip_vs_conn_cachep);
-       proc_net_remove(&init_net, "ip_vs_conn");
-       proc_net_remove(&init_net, "ip_vs_conn_sync");
        vfree(ip_vs_conn_tab);
 }
index b4e51e9c5a04ad4e1314a338529cc0284407feb3..2d1f932add46566efbaa2f9ef5d2fd2ec8e18fc4 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/icmp.h>                   /* for icmp_send */
 #include <net/route.h>
 #include <net/ip6_checksum.h>
+#include <net/netns/generic.h>         /* net_generic() */
 
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
@@ -68,6 +69,12 @@ EXPORT_SYMBOL(ip_vs_conn_put);
 EXPORT_SYMBOL(ip_vs_get_debug_level);
 #endif
 
+int ip_vs_net_id __read_mostly;
+#ifdef IP_VS_GENERIC_NETNS
+EXPORT_SYMBOL(ip_vs_net_id);
+#endif
+/* netns cnt used for uniqueness */
+static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
 
 /* ID used in ICMP lookups */
 #define icmp_id(icmph)          (((icmph)->un).echo.id)
@@ -108,21 +115,28 @@ static inline void
 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
        struct ip_vs_dest *dest = cp->dest;
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
-               spin_lock(&dest->stats.lock);
-               dest->stats.ustats.inpkts++;
-               dest->stats.ustats.inbytes += skb->len;
-               spin_unlock(&dest->stats.lock);
-
-               spin_lock(&dest->svc->stats.lock);
-               dest->svc->stats.ustats.inpkts++;
-               dest->svc->stats.ustats.inbytes += skb->len;
-               spin_unlock(&dest->svc->stats.lock);
-
-               spin_lock(&ip_vs_stats.lock);
-               ip_vs_stats.ustats.inpkts++;
-               ip_vs_stats.ustats.inbytes += skb->len;
-               spin_unlock(&ip_vs_stats.lock);
+               struct ip_vs_cpu_stats *s;
+
+               s = this_cpu_ptr(dest->stats.cpustats);
+               s->ustats.inpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.inbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               s->ustats.inpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.inbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(ipvs->cpustats);
+               s->ustats.inpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.inbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
        }
 }
 
@@ -131,21 +145,28 @@ static inline void
 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
        struct ip_vs_dest *dest = cp->dest;
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
-               spin_lock(&dest->stats.lock);
-               dest->stats.ustats.outpkts++;
-               dest->stats.ustats.outbytes += skb->len;
-               spin_unlock(&dest->stats.lock);
-
-               spin_lock(&dest->svc->stats.lock);
-               dest->svc->stats.ustats.outpkts++;
-               dest->svc->stats.ustats.outbytes += skb->len;
-               spin_unlock(&dest->svc->stats.lock);
-
-               spin_lock(&ip_vs_stats.lock);
-               ip_vs_stats.ustats.outpkts++;
-               ip_vs_stats.ustats.outbytes += skb->len;
-               spin_unlock(&ip_vs_stats.lock);
+               struct ip_vs_cpu_stats *s;
+
+               s = this_cpu_ptr(dest->stats.cpustats);
+               s->ustats.outpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.outbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               s->ustats.outpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.outbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
+
+               s = this_cpu_ptr(ipvs->cpustats);
+               s->ustats.outpkts++;
+               u64_stats_update_begin(&s->syncp);
+               s->ustats.outbytes += skb->len;
+               u64_stats_update_end(&s->syncp);
        }
 }
 
@@ -153,41 +174,44 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 static inline void
 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
 {
-       spin_lock(&cp->dest->stats.lock);
-       cp->dest->stats.ustats.conns++;
-       spin_unlock(&cp->dest->stats.lock);
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
+       struct ip_vs_cpu_stats *s;
+
+       s = this_cpu_ptr(cp->dest->stats.cpustats);
+       s->ustats.conns++;
 
-       spin_lock(&svc->stats.lock);
-       svc->stats.ustats.conns++;
-       spin_unlock(&svc->stats.lock);
+       s = this_cpu_ptr(svc->stats.cpustats);
+       s->ustats.conns++;
 
-       spin_lock(&ip_vs_stats.lock);
-       ip_vs_stats.ustats.conns++;
-       spin_unlock(&ip_vs_stats.lock);
+       s = this_cpu_ptr(ipvs->cpustats);
+       s->ustats.conns++;
 }
 
 
 static inline int
 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
                const struct sk_buff *skb,
-               struct ip_vs_protocol *pp)
+               struct ip_vs_proto_data *pd)
 {
-       if (unlikely(!pp->state_transition))
+       if (unlikely(!pd->pp->state_transition))
                return 0;
-       return pp->state_transition(cp, direction, skb, pp);
+       return pd->pp->state_transition(cp, direction, skb, pd);
 }
 
-static inline void
+static inline int
 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
                              struct sk_buff *skb, int protocol,
                              const union nf_inet_addr *caddr, __be16 cport,
                              const union nf_inet_addr *vaddr, __be16 vport,
                              struct ip_vs_conn_param *p)
 {
-       ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
+       ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+                             vport, p);
        p->pe = svc->pe;
        if (p->pe && p->pe->fill_param)
-               p->pe->fill_param(p, skb);
+               return p->pe->fill_param(p, skb);
+
+       return 0;
 }
 
 /*
@@ -200,7 +224,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
 static struct ip_vs_conn *
 ip_vs_sched_persist(struct ip_vs_service *svc,
                    struct sk_buff *skb,
-                   __be16 ports[2])
+                   __be16 src_port, __be16 dst_port, int *ignored)
 {
        struct ip_vs_conn *cp = NULL;
        struct ip_vs_iphdr iph;
@@ -224,8 +248,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
 
        IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
                      "mnet %s\n",
-                     IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
-                     IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+                     IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
+                     IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
                      IP_VS_DBG_ADDR(svc->af, &snet));
 
        /*
@@ -247,14 +271,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
                __be16 vport = 0;
 
-               if (ports[1] == svc->port) {
+               if (dst_port == svc->port) {
                        /* non-FTP template:
                         * <protocol, caddr, 0, vaddr, vport, daddr, dport>
                         * FTP template:
                         * <protocol, caddr, 0, vaddr, 0, daddr, 0>
                         */
                        if (svc->port != FTPPORT)
-                               vport = ports[1];
+                               vport = dst_port;
                } else {
                        /* Note: persistent fwmark-based services and
                         * persistent port zero service are handled here.
@@ -268,24 +292,31 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                                vaddr = &fwmark;
                        }
                }
-               ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
-                                             vaddr, vport, &param);
+               /* return *ignored = -1 so NF_DROP can be used */
+               if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
+                                                 vaddr, vport, &param) < 0) {
+                       *ignored = -1;
+                       return NULL;
+               }
        }
 
        /* Check if a template already exists */
        ct = ip_vs_ct_in_get(&param);
        if (!ct || !ip_vs_check_template(ct)) {
-               /* No template found or the dest of the connection
+               /*
+                * No template found or the dest of the connection
                 * template is not available.
+                * return *ignored=0 i.e. ICMP and NF_DROP
                 */
                dest = svc->scheduler->schedule(svc, skb);
                if (!dest) {
                        IP_VS_DBG(1, "p-schedule: no dest found.\n");
                        kfree(param.pe_data);
+                       *ignored = 0;
                        return NULL;
                }
 
-               if (ports[1] == svc->port && svc->port != FTPPORT)
+               if (dst_port == svc->port && svc->port != FTPPORT)
                        dport = dest->port;
 
                /* Create a template
@@ -293,9 +324,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                 * and thus param.pe_data will be destroyed
                 * when the template expires */
                ct = ip_vs_conn_new(&param, &dest->addr, dport,
-                                   IP_VS_CONN_F_TEMPLATE, dest);
+                                   IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
                if (ct == NULL) {
                        kfree(param.pe_data);
+                       *ignored = -1;
                        return NULL;
                }
 
@@ -306,7 +338,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                kfree(param.pe_data);
        }
 
-       dport = ports[1];
+       dport = dst_port;
        if (dport == svc->port && dest->port)
                dport = dest->port;
 
@@ -317,11 +349,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
        /*
         *    Create a new connection according to the template
         */
-       ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
-                             &iph.daddr, ports[1], &param);
-       cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest);
+       ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
+                             src_port, &iph.daddr, dst_port, &param);
+
+       cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
        if (cp == NULL) {
                ip_vs_conn_put(ct);
+               *ignored = -1;
                return NULL;
        }
 
@@ -341,11 +375,27 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
  *  It selects a server according to the virtual service, and
  *  creates a connection entry.
  *  Protocols supported: TCP, UDP
+ *
+ *  Usage of *ignored
+ *
+ * 1 :   protocol tried to schedule (eg. on SYN), found svc but the
+ *       svc/scheduler decides that this packet should be accepted with
+ *       NF_ACCEPT because it must not be scheduled.
+ *
+ * 0 :   scheduler can not find destination, so try bypass or
+ *       return ICMP and then NF_DROP (ip_vs_leave).
+ *
+ * -1 :  scheduler tried to schedule but fatal error occurred, eg.
+ *       ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
+ *       failure such as missing Call-ID, ENOMEM on skb_linearize
+ *       or pe_data. In this case we should return NF_DROP without
+ *       any attempts to send ICMP with ip_vs_leave.
  */
 struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-              struct ip_vs_protocol *pp, int *ignored)
+              struct ip_vs_proto_data *pd, int *ignored)
 {
+       struct ip_vs_protocol *pp = pd->pp;
        struct ip_vs_conn *cp = NULL;
        struct ip_vs_iphdr iph;
        struct ip_vs_dest *dest;
@@ -371,12 +421,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
        }
 
        /*
-        * Do not schedule replies from local real server. It is risky
-        * for fwmark services but mostly for persistent services.
+        *    Do not schedule replies from local real server.
         */
        if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
-           (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) &&
-           (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) {
+           (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
                IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
                              "Not scheduling reply for existing connection");
                __ip_vs_conn_put(cp);
@@ -386,10 +434,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
        /*
         *    Persistent service
         */
-       if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
-               *ignored = 0;
-               return ip_vs_sched_persist(svc, skb, pptr);
-       }
+       if (svc->flags & IP_VS_SVC_F_PERSISTENT)
+               return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+
+       *ignored = 0;
 
        /*
         *    Non-persistent service
@@ -402,8 +450,6 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
                return NULL;
        }
 
-       *ignored = 0;
-
        dest = svc->scheduler->schedule(svc, skb);
        if (dest == NULL) {
                IP_VS_DBG(1, "Schedule: no dest found.\n");
@@ -419,13 +465,17 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
         */
        {
                struct ip_vs_conn_param p;
-               ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
-                                     pptr[0], &iph.daddr, pptr[1], &p);
+
+               ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
+                                     &iph.saddr, pptr[0], &iph.daddr, pptr[1],
+                                     &p);
                cp = ip_vs_conn_new(&p, &dest->addr,
                                    dest->port ? dest->port : pptr[1],
-                                   flags, dest);
-               if (!cp)
+                                   flags, dest, skb->mark);
+               if (!cp) {
+                       *ignored = -1;
                        return NULL;
+               }
        }
 
        IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
@@ -447,11 +497,14 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
  *  no destination is available for a new connection.
  */
 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-               struct ip_vs_protocol *pp)
+               struct ip_vs_proto_data *pd)
 {
+       struct net *net;
+       struct netns_ipvs *ipvs;
        __be16 _ports[2], *pptr;
        struct ip_vs_iphdr iph;
        int unicast;
+
        ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
 
        pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -459,18 +512,20 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                ip_vs_service_put(svc);
                return NF_DROP;
        }
+       net = skb_net(skb);
 
 #ifdef CONFIG_IP_VS_IPV6
        if (svc->af == AF_INET6)
                unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
        else
 #endif
-               unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+               unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
 
        /* if it is fwmark-based service, the cache_bypass sysctl is up
           and the destination is a non-local unicast, then create
           a cache_bypass connection entry */
-       if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
+       ipvs = net_ipvs(net);
+       if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
                int ret, cs;
                struct ip_vs_conn *cp;
                unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -484,12 +539,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
                {
                        struct ip_vs_conn_param p;
-                       ip_vs_conn_fill_param(svc->af, iph.protocol,
+                       ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
                                              &iph.saddr, pptr[0],
                                              &iph.daddr, pptr[1], &p);
                        cp = ip_vs_conn_new(&p, &daddr, 0,
                                            IP_VS_CONN_F_BYPASS | flags,
-                                           NULL);
+                                           NULL, skb->mark);
                        if (!cp)
                                return NF_DROP;
                }
@@ -498,10 +553,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                ip_vs_in_stats(cp, skb);
 
                /* set state */
-               cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+               cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
 
                /* transmit the first SYN packet */
-               ret = cp->packet_xmit(skb, cp, pp);
+               ret = cp->packet_xmit(skb, cp, pd->pp);
                /* do not touch skb anymore */
 
                atomic_inc(&cp->in_pkts);
@@ -674,7 +729,7 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
 #endif
 
 /* Handle relevant response ICMP messages - forward to the right
- * destination host. Used for NAT and local client.
+ * destination host.
  */
 static int handle_response_icmp(int af, struct sk_buff *skb,
                                union nf_inet_addr *snet,
@@ -682,6 +737,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
                                struct ip_vs_protocol *pp,
                                unsigned int offset, unsigned int ihl)
 {
+       struct netns_ipvs *ipvs;
        unsigned int verdict = NF_DROP;
 
        if (IP_VS_FWD_METHOD(cp) != 0) {
@@ -703,6 +759,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
        if (!skb_make_writable(skb, offset))
                goto out;
 
+       ipvs = net_ipvs(skb_net(skb));
+
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6)
                ip_vs_nat_icmp_v6(skb, pp, cp, 1);
@@ -712,11 +770,11 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
 
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
-               if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+               if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
                        goto out;
        } else
 #endif
-               if ((sysctl_ip_vs_snat_reroute ||
+               if ((ipvs->sysctl_snat_reroute ||
                     skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
                    ip_route_me_harder(skb, RTN_LOCAL) != 0)
                        goto out;
@@ -808,7 +866,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+       cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -885,7 +943,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
 
        ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+       cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -921,12 +979,14 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
 }
 
 /* Handle response packets: rewrite addresses and send away...
- * Used for NAT and local client.
  */
 static unsigned int
-handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                struct ip_vs_conn *cp, int ihl)
 {
+       struct ip_vs_protocol *pp = pd->pp;
+       struct netns_ipvs *ipvs;
+
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
 
        if (!skb_make_writable(skb, ihl))
@@ -961,13 +1021,15 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
         * if it came from this machine itself.  So re-compute
         * the routing information.
         */
+       ipvs = net_ipvs(skb_net(skb));
+
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
-               if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+               if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
                        goto drop;
        } else
 #endif
-               if ((sysctl_ip_vs_snat_reroute ||
+               if ((ipvs->sysctl_snat_reroute ||
                     skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
                    ip_route_me_harder(skb, RTN_LOCAL) != 0)
                        goto drop;
@@ -975,7 +1037,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
        IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
 
        ip_vs_out_stats(cp, skb);
-       ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+       ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
        skb->ipvs_property = 1;
        if (!(cp->flags & IP_VS_CONN_F_NFCT))
                ip_vs_notrack(skb);
@@ -999,9 +1061,12 @@ drop:
 static unsigned int
 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
 {
+       struct net *net = NULL;
        struct ip_vs_iphdr iph;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        struct ip_vs_conn *cp;
+       struct netns_ipvs *ipvs;
 
        EnterFunction(11);
 
@@ -1022,6 +1087,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        if (unlikely(!skb_dst(skb)))
                return NF_ACCEPT;
 
+       net = skb_net(skb);
        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
@@ -1045,9 +1111,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
                        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
                }
 
-       pp = ip_vs_proto_get(iph.protocol);
-       if (unlikely(!pp))
+       pd = ip_vs_proto_data_get(net, iph.protocol);
+       if (unlikely(!pd))
                return NF_ACCEPT;
+       pp = pd->pp;
 
        /* reassemble IP fragments */
 #ifdef CONFIG_IP_VS_IPV6
@@ -1073,11 +1140,12 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        /*
         * Check if the packet belongs to an existing entry
         */
-       cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+       cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
+       ipvs = net_ipvs(net);
 
        if (likely(cp))
-               return handle_response(af, skb, pp, cp, iph.len);
-       if (sysctl_ip_vs_nat_icmp_send &&
+               return handle_response(af, skb, pd, cp, iph.len);
+       if (ipvs->sysctl_nat_icmp_send &&
            (pp->protocol == IPPROTO_TCP ||
             pp->protocol == IPPROTO_UDP ||
             pp->protocol == IPPROTO_SCTP)) {
@@ -1087,7 +1155,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
                                          sizeof(_ports), _ports);
                if (pptr == NULL)
                        return NF_ACCEPT;       /* Not for me */
-               if (ip_vs_lookup_real_service(af, iph.protocol,
+               if (ip_vs_lookup_real_service(net, af, iph.protocol,
                                              &iph.saddr,
                                              pptr[0])) {
                        /*
@@ -1202,14 +1270,15 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
 static int
 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 {
+       struct net *net = NULL;
        struct iphdr *iph;
        struct icmphdr  _icmph, *ic;
        struct iphdr    _ciph, *cih;    /* The ip header contained within the ICMP */
        struct ip_vs_iphdr ciph;
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        unsigned int offset, ihl, verdict;
-       union nf_inet_addr snet;
 
        *related = 1;
 
@@ -1249,9 +1318,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        if (cih == NULL)
                return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-       pp = ip_vs_proto_get(cih->protocol);
-       if (!pp)
+       net = skb_net(skb);
+       pd = ip_vs_proto_data_get(net, cih->protocol);
+       if (!pd)
                return NF_ACCEPT;
+       pp = pd->pp;
 
        /* Is the embedded protocol header present? */
        if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
@@ -1265,18 +1336,9 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
-       if (!cp) {
-               /* The packet could also belong to a local client */
-               cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
-               if (cp) {
-                       snet.ip = iph->saddr;
-                       return handle_response_icmp(AF_INET, skb, &snet,
-                                                   cih->protocol, cp, pp,
-                                                   offset, ihl);
-               }
+       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+       if (!cp)
                return NF_ACCEPT;
-       }
 
        verdict = NF_DROP;
 
@@ -1312,6 +1374,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 static int
 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 {
+       struct net *net = NULL;
        struct ipv6hdr *iph;
        struct icmp6hdr _icmph, *ic;
        struct ipv6hdr  _ciph, *cih;    /* The ip header contained
@@ -1319,8 +1382,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
        struct ip_vs_iphdr ciph;
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        unsigned int offset, verdict;
-       union nf_inet_addr snet;
        struct rt6_info *rt;
 
        *related = 1;
@@ -1361,9 +1424,11 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
        if (cih == NULL)
                return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-       pp = ip_vs_proto_get(cih->nexthdr);
-       if (!pp)
+       net = skb_net(skb);
+       pd = ip_vs_proto_data_get(net, cih->nexthdr);
+       if (!pd)
                return NF_ACCEPT;
+       pp = pd->pp;
 
        /* Is the embedded protocol header present? */
        /* TODO: we don't support fragmentation at the moment anyways */
@@ -1377,19 +1442,9 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
        /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
-       if (!cp) {
-               /* The packet could also belong to a local client */
-               cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
-               if (cp) {
-                       ipv6_addr_copy(&snet.in6, &iph->saddr);
-                       return handle_response_icmp(AF_INET6, skb, &snet,
-                                                   cih->nexthdr,
-                                                   cp, pp, offset,
-                                                   sizeof(struct ipv6hdr));
-               }
+       cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
+       if (!cp)
                return NF_ACCEPT;
-       }
 
        verdict = NF_DROP;
 
@@ -1423,10 +1478,13 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 static unsigned int
 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
 {
+       struct net *net;
        struct ip_vs_iphdr iph;
        struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        struct ip_vs_conn *cp;
        int ret, restart, pkts;
+       struct netns_ipvs *ipvs;
 
        /* Already marked as IPVS request or reply? */
        if (skb->ipvs_property)
@@ -1480,20 +1538,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
                        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
                }
 
+       net = skb_net(skb);
        /* Protocol supported? */
-       pp = ip_vs_proto_get(iph.protocol);
-       if (unlikely(!pp))
+       pd = ip_vs_proto_data_get(net, iph.protocol);
+       if (unlikely(!pd))
                return NF_ACCEPT;
-
+       pp = pd->pp;
        /*
         * Check if the packet belongs to an existing connection entry
         */
-       cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
+       cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
 
        if (unlikely(!cp)) {
                int v;
 
-               if (!pp->conn_schedule(af, skb, pp, &v, &cp))
+               if (!pp->conn_schedule(af, skb, pd, &v, &cp))
                        return v;
        }
 
@@ -1505,12 +1564,13 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-
+       net = skb_net(skb);
+       ipvs = net_ipvs(net);
        /* Check the server status */
        if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                /* the destination server is not available */
 
-               if (sysctl_ip_vs_expire_nodest_conn) {
+               if (ipvs->sysctl_expire_nodest_conn) {
                        /* try to expire the connection immediately */
                        ip_vs_conn_expire_now(cp);
                }
@@ -1521,7 +1581,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        ip_vs_in_stats(cp, skb);
-       restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+       restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
        if (cp->packet_xmit)
                ret = cp->packet_xmit(skb, cp, pp);
                /* do not touch skb anymore */
@@ -1535,35 +1595,41 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
         *
         * Sync connection if it is about to close to
         * encorage the standby servers to update the connections timeout
+        *
+        * For ONE_PKT let ip_vs_sync_conn() do the filter work.
         */
-       pkts = atomic_add_return(1, &cp->in_pkts);
-       if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               pkts = ipvs->sysctl_sync_threshold[0];
+       else
+               pkts = atomic_add_return(1, &cp->in_pkts);
+
+       if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
            cp->protocol == IPPROTO_SCTP) {
                if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
-                       (pkts % sysctl_ip_vs_sync_threshold[1]
-                        == sysctl_ip_vs_sync_threshold[0])) ||
+                       (pkts % ipvs->sysctl_sync_threshold[1]
+                        == ipvs->sysctl_sync_threshold[0])) ||
                                (cp->old_state != cp->state &&
                                 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
                                  (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
                                  (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
-                       ip_vs_sync_conn(cp);
+                       ip_vs_sync_conn(net, cp);
                        goto out;
                }
        }
 
        /* Keep this block last: TCP and others with pp->num_states <= 1 */
-       else if (af == AF_INET &&
-           (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+       else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
            (((cp->protocol != IPPROTO_TCP ||
               cp->state == IP_VS_TCP_S_ESTABLISHED) &&
-             (pkts % sysctl_ip_vs_sync_threshold[1]
-              == sysctl_ip_vs_sync_threshold[0])) ||
+             (pkts % ipvs->sysctl_sync_threshold[1]
+              == ipvs->sysctl_sync_threshold[0])) ||
             ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
              ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
               (cp->state == IP_VS_TCP_S_CLOSE) ||
               (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
               (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
-               ip_vs_sync_conn(cp);
+               ip_vs_sync_conn(net, cp);
 out:
        cp->old_state = cp->state;
 
@@ -1782,7 +1848,39 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
        },
 #endif
 };
+/*
+ *     Initialize IP Virtual Server netns mem.
+ */
+static int __net_init __ip_vs_init(struct net *net)
+{
+       struct netns_ipvs *ipvs;
 
+       ipvs = net_generic(net, ip_vs_net_id);
+       if (ipvs == NULL) {
+               pr_err("%s(): no memory.\n", __func__);
+               return -ENOMEM;
+       }
+       ipvs->net = net;
+       /* Counters used for creating unique names */
+       ipvs->gen = atomic_read(&ipvs_netns_cnt);
+       atomic_inc(&ipvs_netns_cnt);
+       net->ipvs = ipvs;
+       printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
+                        sizeof(struct netns_ipvs), ipvs->gen);
+       return 0;
+}
+
+static void __net_exit __ip_vs_cleanup(struct net *net)
+{
+       IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static struct pernet_operations ipvs_core_ops = {
+       .init = __ip_vs_init,
+       .exit = __ip_vs_cleanup,
+       .id   = &ip_vs_net_id,
+       .size = sizeof(struct netns_ipvs),
+};
 
 /*
  *     Initialize IP Virtual Server
@@ -1791,8 +1889,11 @@ static int __init ip_vs_init(void)
 {
        int ret;
 
-       ip_vs_estimator_init();
+       ret = register_pernet_subsys(&ipvs_core_ops);   /* Alloc ip_vs struct */
+       if (ret < 0)
+               return ret;
 
+       ip_vs_estimator_init();
        ret = ip_vs_control_init();
        if (ret < 0) {
                pr_err("can't setup control.\n");
@@ -1813,15 +1914,23 @@ static int __init ip_vs_init(void)
                goto cleanup_app;
        }
 
+       ret = ip_vs_sync_init();
+       if (ret < 0) {
+               pr_err("can't setup sync data.\n");
+               goto cleanup_conn;
+       }
+
        ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
        if (ret < 0) {
                pr_err("can't register hooks.\n");
-               goto cleanup_conn;
+               goto cleanup_sync;
        }
 
        pr_info("ipvs loaded.\n");
        return ret;
 
+cleanup_sync:
+       ip_vs_sync_cleanup();
   cleanup_conn:
        ip_vs_conn_cleanup();
   cleanup_app:
@@ -1831,17 +1940,20 @@ static int __init ip_vs_init(void)
        ip_vs_control_cleanup();
   cleanup_estimator:
        ip_vs_estimator_cleanup();
+       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        return ret;
 }
 
 static void __exit ip_vs_cleanup(void)
 {
        nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+       ip_vs_sync_cleanup();
        ip_vs_conn_cleanup();
        ip_vs_app_cleanup();
        ip_vs_protocol_cleanup();
        ip_vs_control_cleanup();
        ip_vs_estimator_cleanup();
+       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        pr_info("ipvs unloaded.\n");
 }
 
index ba98e1308f3ced1a19414259285d2bdba2f37fd3..d69ec26b6bd4741a4fa92a07a407b4b2ffe1f650 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/mutex.h>
 
 #include <net/net_namespace.h>
+#include <linux/nsproxy.h>
 #include <net/ip.h>
 #ifdef CONFIG_IP_VS_IPV6
 #include <net/ipv6.h>
@@ -57,42 +58,7 @@ static DEFINE_MUTEX(__ip_vs_mutex);
 /* lock for service table */
 static DEFINE_RWLOCK(__ip_vs_svc_lock);
 
-/* lock for table with the real services */
-static DEFINE_RWLOCK(__ip_vs_rs_lock);
-
-/* lock for state and timeout tables */
-static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
-
-/* lock for drop entry handling */
-static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
-
-/* lock for drop packet handling */
-static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
-
-/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
-static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
-
-/* number of virtual services */
-static int ip_vs_num_services = 0;
-
 /* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
-static int sysctl_ip_vs_amemthresh = 1024;
-static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
-int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
-#ifdef CONFIG_IP_VS_NFCT
-int sysctl_ip_vs_conntrack;
-#endif
-int sysctl_ip_vs_snat_reroute = 1;
-
 
 #ifdef CONFIG_IP_VS_DEBUG
 static int sysctl_ip_vs_debug_level = 0;
@@ -105,7 +71,8 @@ int ip_vs_get_debug_level(void)
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+static int __ip_vs_addr_is_local_v6(struct net *net,
+                                   const struct in6_addr *addr)
 {
        struct rt6_info *rt;
        struct flowi fl = {
@@ -114,7 +81,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
                .fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
        };
 
-       rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl);
        if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
                        return 1;
 
@@ -125,7 +92,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
  *     update_defense_level is called from keventd and from sysctl,
  *     so it needs to protect itself from softirqs
  */
-static void update_defense_level(void)
+static void update_defense_level(struct netns_ipvs *ipvs)
 {
        struct sysinfo i;
        static int old_secure_tcp = 0;
@@ -141,73 +108,73 @@ static void update_defense_level(void)
        /* si_swapinfo(&i); */
        /* availmem = availmem - (i.totalswap - i.freeswap); */
 
-       nomem = (availmem < sysctl_ip_vs_amemthresh);
+       nomem = (availmem < ipvs->sysctl_amemthresh);
 
        local_bh_disable();
 
        /* drop_entry */
-       spin_lock(&__ip_vs_dropentry_lock);
-       switch (sysctl_ip_vs_drop_entry) {
+       spin_lock(&ipvs->dropentry_lock);
+       switch (ipvs->sysctl_drop_entry) {
        case 0:
-               atomic_set(&ip_vs_dropentry, 0);
+               atomic_set(&ipvs->dropentry, 0);
                break;
        case 1:
                if (nomem) {
-                       atomic_set(&ip_vs_dropentry, 1);
-                       sysctl_ip_vs_drop_entry = 2;
+                       atomic_set(&ipvs->dropentry, 1);
+                       ipvs->sysctl_drop_entry = 2;
                } else {
-                       atomic_set(&ip_vs_dropentry, 0);
+                       atomic_set(&ipvs->dropentry, 0);
                }
                break;
        case 2:
                if (nomem) {
-                       atomic_set(&ip_vs_dropentry, 1);
+                       atomic_set(&ipvs->dropentry, 1);
                } else {
-                       atomic_set(&ip_vs_dropentry, 0);
-                       sysctl_ip_vs_drop_entry = 1;
+                       atomic_set(&ipvs->dropentry, 0);
+                       ipvs->sysctl_drop_entry = 1;
                };
                break;
        case 3:
-               atomic_set(&ip_vs_dropentry, 1);
+               atomic_set(&ipvs->dropentry, 1);
                break;
        }
-       spin_unlock(&__ip_vs_dropentry_lock);
+       spin_unlock(&ipvs->dropentry_lock);
 
        /* drop_packet */
-       spin_lock(&__ip_vs_droppacket_lock);
-       switch (sysctl_ip_vs_drop_packet) {
+       spin_lock(&ipvs->droppacket_lock);
+       switch (ipvs->sysctl_drop_packet) {
        case 0:
-               ip_vs_drop_rate = 0;
+               ipvs->drop_rate = 0;
                break;
        case 1:
                if (nomem) {
-                       ip_vs_drop_rate = ip_vs_drop_counter
-                               = sysctl_ip_vs_amemthresh /
-                               (sysctl_ip_vs_amemthresh-availmem);
-                       sysctl_ip_vs_drop_packet = 2;
+                       ipvs->drop_rate = ipvs->drop_counter
+                               = ipvs->sysctl_amemthresh /
+                               (ipvs->sysctl_amemthresh-availmem);
+                       ipvs->sysctl_drop_packet = 2;
                } else {
-                       ip_vs_drop_rate = 0;
+                       ipvs->drop_rate = 0;
                }
                break;
        case 2:
                if (nomem) {
-                       ip_vs_drop_rate = ip_vs_drop_counter
-                               = sysctl_ip_vs_amemthresh /
-                               (sysctl_ip_vs_amemthresh-availmem);
+                       ipvs->drop_rate = ipvs->drop_counter
+                               = ipvs->sysctl_amemthresh /
+                               (ipvs->sysctl_amemthresh-availmem);
                } else {
-                       ip_vs_drop_rate = 0;
-                       sysctl_ip_vs_drop_packet = 1;
+                       ipvs->drop_rate = 0;
+                       ipvs->sysctl_drop_packet = 1;
                }
                break;
        case 3:
-               ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
+               ipvs->drop_rate = ipvs->sysctl_am_droprate;
                break;
        }
-       spin_unlock(&__ip_vs_droppacket_lock);
+       spin_unlock(&ipvs->droppacket_lock);
 
        /* secure_tcp */
-       spin_lock(&ip_vs_securetcp_lock);
-       switch (sysctl_ip_vs_secure_tcp) {
+       spin_lock(&ipvs->securetcp_lock);
+       switch (ipvs->sysctl_secure_tcp) {
        case 0:
                if (old_secure_tcp >= 2)
                        to_change = 0;
@@ -216,7 +183,7 @@ static void update_defense_level(void)
                if (nomem) {
                        if (old_secure_tcp < 2)
                                to_change = 1;
-                       sysctl_ip_vs_secure_tcp = 2;
+                       ipvs->sysctl_secure_tcp = 2;
                } else {
                        if (old_secure_tcp >= 2)
                                to_change = 0;
@@ -229,7 +196,7 @@ static void update_defense_level(void)
                } else {
                        if (old_secure_tcp >= 2)
                                to_change = 0;
-                       sysctl_ip_vs_secure_tcp = 1;
+                       ipvs->sysctl_secure_tcp = 1;
                }
                break;
        case 3:
@@ -237,10 +204,11 @@ static void update_defense_level(void)
                        to_change = 1;
                break;
        }
-       old_secure_tcp = sysctl_ip_vs_secure_tcp;
+       old_secure_tcp = ipvs->sysctl_secure_tcp;
        if (to_change >= 0)
-               ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
-       spin_unlock(&ip_vs_securetcp_lock);
+               ip_vs_protocol_timeout_change(ipvs,
+                                             ipvs->sysctl_secure_tcp > 1);
+       spin_unlock(&ipvs->securetcp_lock);
 
        local_bh_enable();
 }
@@ -250,16 +218,16 @@ static void update_defense_level(void)
  *     Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD   1*HZ
-static void defense_work_handler(struct work_struct *work);
-static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
 static void defense_work_handler(struct work_struct *work)
 {
-       update_defense_level();
-       if (atomic_read(&ip_vs_dropentry))
-               ip_vs_random_dropentry();
+       struct netns_ipvs *ipvs =
+               container_of(work, struct netns_ipvs, defense_work.work);
 
-       schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
+       update_defense_level(ipvs);
+       if (atomic_read(&ipvs->dropentry))
+               ip_vs_random_dropentry(ipvs->net);
+       schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
 }
 
 int
@@ -287,33 +255,13 @@ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
 /* the service table hashed by fwmark */
 static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
 
-/*
- *     Hash table: for real service lookups
- */
-#define IP_VS_RTAB_BITS 4
-#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
-#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
-static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
-
-/*
- *     Trash for destinations
- */
-static LIST_HEAD(ip_vs_dest_trash);
-
-/*
- *     FTP & NULL virtual service counters
- */
-static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
-static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
-
 
 /*
  *     Returns hash value for virtual service
  */
-static __inline__ unsigned
-ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
-                 __be16 port)
+static inline unsigned
+ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+                 const union nf_inet_addr *addr, __be16 port)
 {
        register unsigned porth = ntohs(port);
        __be32 addr_fold = addr->ip;
@@ -323,6 +271,7 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
                addr_fold = addr->ip6[0]^addr->ip6[1]^
                            addr->ip6[2]^addr->ip6[3];
 #endif
+       addr_fold ^= ((size_t)net>>8);
 
        return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
                & IP_VS_SVC_TAB_MASK;
@@ -331,13 +280,13 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
 /*
  *     Returns hash value of fwmark for virtual service lookup
  */
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
 {
-       return fwmark & IP_VS_SVC_TAB_MASK;
+       return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
 
 /*
- *     Hashes a service in the ip_vs_svc_table by <proto,addr,port>
+ *     Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
  *     or in the ip_vs_svc_fwm_table by fwmark.
  *     Should be called with locked tables.
  */
@@ -353,16 +302,16 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
 
        if (svc->fwmark == 0) {
                /*
-                *  Hash it by <protocol,addr,port> in ip_vs_svc_table
+                *  Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
                 */
-               hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
-                                        svc->port);
+               hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+                                        &svc->addr, svc->port);
                list_add(&svc->s_list, &ip_vs_svc_table[hash]);
        } else {
                /*
-                *  Hash it by fwmark in ip_vs_svc_fwm_table
+                *  Hash it by fwmark in svc_fwm_table
                 */
-               hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
+               hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
                list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
        }
 
@@ -374,7 +323,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
 
 
 /*
- *     Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
+ *     Unhashes a service from svc_table / svc_fwm_table.
  *     Should be called with locked tables.
  */
 static int ip_vs_svc_unhash(struct ip_vs_service *svc)
@@ -386,10 +335,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
        }
 
        if (svc->fwmark == 0) {
-               /* Remove it from the ip_vs_svc_table table */
+               /* Remove it from the svc_table table */
                list_del(&svc->s_list);
        } else {
-               /* Remove it from the ip_vs_svc_fwm_table table */
+               /* Remove it from the svc_fwm_table table */
                list_del(&svc->f_list);
        }
 
@@ -400,23 +349,24 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
 
 
 /*
- *     Get service by {proto,addr,port} in the service table.
+ *     Get service by {netns, proto,addr,port} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
-                   __be16 vport)
+__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+                    const union nf_inet_addr *vaddr, __be16 vport)
 {
        unsigned hash;
        struct ip_vs_service *svc;
 
        /* Check for "full" addressed entries */
-       hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
+       hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
 
        list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
                if ((svc->af == af)
                    && ip_vs_addr_equal(af, &svc->addr, vaddr)
                    && (svc->port == vport)
-                   && (svc->protocol == protocol)) {
+                   && (svc->protocol == protocol)
+                   && net_eq(svc->net, net)) {
                        /* HIT */
                        return svc;
                }
@@ -430,16 +380,17 @@ __ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
  *     Get service by {fwmark} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
 {
        unsigned hash;
        struct ip_vs_service *svc;
 
        /* Check for fwmark addressed entries */
-       hash = ip_vs_svc_fwm_hashkey(fwmark);
+       hash = ip_vs_svc_fwm_hashkey(net, fwmark);
 
        list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
-               if (svc->fwmark == fwmark && svc->af == af) {
+               if (svc->fwmark == fwmark && svc->af == af
+                   && net_eq(svc->net, net)) {
                        /* HIT */
                        return svc;
                }
@@ -449,42 +400,44 @@ __ip_vs_svc_fwm_find(int af, __u32 fwmark)
 }
 
 struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
                  const union nf_inet_addr *vaddr, __be16 vport)
 {
        struct ip_vs_service *svc;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
        read_lock(&__ip_vs_svc_lock);
 
        /*
         *      Check the table hashed by fwmark first
         */
-       if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark)))
+       svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+       if (fwmark && svc)
                goto out;
 
        /*
         *      Check the table hashed by <protocol,addr,port>
         *      for "full" addressed entries
         */
-       svc = __ip_vs_service_find(af, protocol, vaddr, vport);
+       svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
 
        if (svc == NULL
            && protocol == IPPROTO_TCP
-           && atomic_read(&ip_vs_ftpsvc_counter)
+           && atomic_read(&ipvs->ftpsvc_counter)
            && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
                /*
                 * Check if ftp service entry exists, the packet
                 * might belong to FTP data connections.
                 */
-               svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT);
+               svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
        }
 
        if (svc == NULL
-           && atomic_read(&ip_vs_nullsvc_counter)) {
+           && atomic_read(&ipvs->nullsvc_counter)) {
                /*
                 * Check if the catch-all port (port zero) exists
                 */
-               svc = __ip_vs_service_find(af, protocol, vaddr, 0);
+               svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
        }
 
   out:
@@ -519,6 +472,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
                              svc->fwmark,
                              IP_VS_DBG_ADDR(svc->af, &svc->addr),
                              ntohs(svc->port), atomic_read(&svc->usecnt));
+               free_percpu(svc->stats.cpustats);
                kfree(svc);
        }
 }
@@ -545,10 +499,10 @@ static inline unsigned ip_vs_rs_hashkey(int af,
 }
 
 /*
- *     Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
+ *     Hashes ip_vs_dest in rs_table by <proto,addr,port>.
  *     should be called with locked tables.
  */
-static int ip_vs_rs_hash(struct ip_vs_dest *dest)
+static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
 {
        unsigned hash;
 
@@ -562,19 +516,19 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
         */
        hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
 
-       list_add(&dest->d_list, &ip_vs_rtable[hash]);
+       list_add(&dest->d_list, &ipvs->rs_table[hash]);
 
        return 1;
 }
 
 /*
- *     UNhashes ip_vs_dest from ip_vs_rtable.
+ *     UNhashes ip_vs_dest from rs_table.
  *     should be called with locked tables.
  */
 static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
 {
        /*
-        * Remove it from the ip_vs_rtable table.
+        * Remove it from the rs_table table.
         */
        if (!list_empty(&dest->d_list)) {
                list_del(&dest->d_list);
@@ -588,10 +542,11 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
  *     Lookup real service by <proto,addr,port> in the real service table.
  */
 struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
                          const union nf_inet_addr *daddr,
                          __be16 dport)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        unsigned hash;
        struct ip_vs_dest *dest;
 
@@ -601,19 +556,19 @@ ip_vs_lookup_real_service(int af, __u16 protocol,
         */
        hash = ip_vs_rs_hashkey(af, daddr, dport);
 
-       read_lock(&__ip_vs_rs_lock);
-       list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
+       read_lock(&ipvs->rs_lock);
+       list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
                if ((dest->af == af)
                    && ip_vs_addr_equal(af, &dest->addr, daddr)
                    && (dest->port == dport)
                    && ((dest->protocol == protocol) ||
                        dest->vfwmark)) {
                        /* HIT */
-                       read_unlock(&__ip_vs_rs_lock);
+                       read_unlock(&ipvs->rs_lock);
                        return dest;
                }
        }
-       read_unlock(&__ip_vs_rs_lock);
+       read_unlock(&ipvs->rs_lock);
 
        return NULL;
 }
@@ -652,15 +607,16 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
  * ip_vs_lookup_real_service() looked promissing, but
  * seems not working as expected.
  */
-struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int af,
+                                  const union nf_inet_addr *daddr,
                                   __be16 dport,
                                   const union nf_inet_addr *vaddr,
-                                  __be16 vport, __u16 protocol)
+                                  __be16 vport, __u16 protocol, __u32 fwmark)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_service *svc;
 
-       svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
+       svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
        if (!svc)
                return NULL;
        dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -685,11 +641,12 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
                     __be16 dport)
 {
        struct ip_vs_dest *dest, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
        /*
         * Find the destination in trash
         */
-       list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+       list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
                IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
                              "dest->refcnt=%d\n",
                              dest->vfwmark,
@@ -720,6 +677,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
                        list_del(&dest->n_list);
                        ip_vs_dst_reset(dest);
                        __ip_vs_unbind_svc(dest);
+                       free_percpu(dest->stats.cpustats);
                        kfree(dest);
                }
        }
@@ -737,14 +695,16 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
  *  are expired, and the refcnt of each destination in the trash must
  *  be 1, so we simply release them here.
  */
-static void ip_vs_trash_cleanup(void)
+static void ip_vs_trash_cleanup(struct net *net)
 {
        struct ip_vs_dest *dest, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+       list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
                list_del(&dest->n_list);
                ip_vs_dst_reset(dest);
                __ip_vs_unbind_svc(dest);
+               free_percpu(dest->stats.cpustats);
                kfree(dest);
        }
 }
@@ -768,6 +728,7 @@ static void
 __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                    struct ip_vs_dest_user_kern *udest, int add)
 {
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
        int conn_flags;
 
        /* set the weight and the flags */
@@ -780,12 +741,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                conn_flags |= IP_VS_CONN_F_NOOUTPUT;
        } else {
                /*
-                *    Put the real service in ip_vs_rtable if not present.
+                *    Put the real service in rs_table if not present.
                 *    For now only for NAT!
                 */
-               write_lock_bh(&__ip_vs_rs_lock);
-               ip_vs_rs_hash(dest);
-               write_unlock_bh(&__ip_vs_rs_lock);
+               write_lock_bh(&ipvs->rs_lock);
+               ip_vs_rs_hash(ipvs, dest);
+               write_unlock_bh(&ipvs->rs_lock);
        }
        atomic_set(&dest->conn_flags, conn_flags);
 
@@ -813,7 +774,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        spin_unlock_bh(&dest->dst_lock);
 
        if (add)
-               ip_vs_new_estimator(&dest->stats);
+               ip_vs_new_estimator(svc->net, &dest->stats);
 
        write_lock_bh(&__ip_vs_svc_lock);
 
@@ -850,12 +811,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
                atype = ipv6_addr_type(&udest->addr.in6);
                if ((!(atype & IPV6_ADDR_UNICAST) ||
                        atype & IPV6_ADDR_LINKLOCAL) &&
-                       !__ip_vs_addr_is_local_v6(&udest->addr.in6))
+                       !__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
                        return -EINVAL;
        } else
 #endif
        {
-               atype = inet_addr_type(&init_net, udest->addr.ip);
+               atype = inet_addr_type(svc->net, udest->addr.ip);
                if (atype != RTN_LOCAL && atype != RTN_UNICAST)
                        return -EINVAL;
        }
@@ -865,6 +826,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
                pr_err("%s(): no memory.\n", __func__);
                return -ENOMEM;
        }
+       dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+       if (!dest->stats.cpustats) {
+               pr_err("%s() alloc_percpu failed\n", __func__);
+               goto err_alloc;
+       }
 
        dest->af = svc->af;
        dest->protocol = svc->protocol;
@@ -888,6 +854,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 
        LeaveFunction(2);
        return 0;
+
+err_alloc:
+       kfree(dest);
+       return -ENOMEM;
 }
 
 
@@ -1006,16 +976,18 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
 /*
  *     Delete a destination (must be already unlinked from the service)
  */
-static void __ip_vs_del_dest(struct ip_vs_dest *dest)
+static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
 {
-       ip_vs_kill_estimator(&dest->stats);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ip_vs_kill_estimator(net, &dest->stats);
 
        /*
         *  Remove it from the d-linked list with the real services.
         */
-       write_lock_bh(&__ip_vs_rs_lock);
+       write_lock_bh(&ipvs->rs_lock);
        ip_vs_rs_unhash(dest);
-       write_unlock_bh(&__ip_vs_rs_lock);
+       write_unlock_bh(&ipvs->rs_lock);
 
        /*
         *  Decrease the refcnt of the dest, and free the dest
@@ -1034,6 +1006,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
                   and only one user context can update virtual service at a
                   time, so the operation here is OK */
                atomic_dec(&dest->svc->refcnt);
+               free_percpu(dest->stats.cpustats);
                kfree(dest);
        } else {
                IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1041,7 +1014,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
                              IP_VS_DBG_ADDR(dest->af, &dest->addr),
                              ntohs(dest->port),
                              atomic_read(&dest->refcnt));
-               list_add(&dest->n_list, &ip_vs_dest_trash);
+               list_add(&dest->n_list, &ipvs->dest_trash);
                atomic_inc(&dest->refcnt);
        }
 }
@@ -1105,7 +1078,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
        /*
         *      Delete the destination
         */
-       __ip_vs_del_dest(dest);
+       __ip_vs_del_dest(svc->net, dest);
 
        LeaveFunction(2);
 
@@ -1117,13 +1090,14 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
  *     Add a service into the service hash table
  */
 static int
-ip_vs_add_service(struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                  struct ip_vs_service **svc_p)
 {
        int ret = 0;
        struct ip_vs_scheduler *sched = NULL;
        struct ip_vs_pe *pe = NULL;
        struct ip_vs_service *svc = NULL;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
        /* increase the module use count */
        ip_vs_use_count_inc();
@@ -1137,7 +1111,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
        }
 
        if (u->pe_name && *u->pe_name) {
-               pe = ip_vs_pe_get(u->pe_name);
+               pe = ip_vs_pe_getbyname(u->pe_name);
                if (pe == NULL) {
                        pr_info("persistence engine module ip_vs_pe_%s "
                                "not found\n", u->pe_name);
@@ -1159,6 +1133,11 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
                ret = -ENOMEM;
                goto out_err;
        }
+       svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+       if (!svc->stats.cpustats) {
+               pr_err("%s() alloc_percpu failed\n", __func__);
+               goto out_err;
+       }
 
        /* I'm the first user of the service */
        atomic_set(&svc->usecnt, 0);
@@ -1172,6 +1151,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
        svc->flags = u->flags;
        svc->timeout = u->timeout * HZ;
        svc->netmask = u->netmask;
+       svc->net = net;
 
        INIT_LIST_HEAD(&svc->destinations);
        rwlock_init(&svc->sched_lock);
@@ -1189,15 +1169,15 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
 
        /* Update the virtual service counters */
        if (svc->port == FTPPORT)
-               atomic_inc(&ip_vs_ftpsvc_counter);
+               atomic_inc(&ipvs->ftpsvc_counter);
        else if (svc->port == 0)
-               atomic_inc(&ip_vs_nullsvc_counter);
+               atomic_inc(&ipvs->nullsvc_counter);
 
-       ip_vs_new_estimator(&svc->stats);
+       ip_vs_new_estimator(net, &svc->stats);
 
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
-               ip_vs_num_services++;
+               ipvs->num_services++;
 
        /* Hash the service into the service table */
        write_lock_bh(&__ip_vs_svc_lock);
@@ -1207,6 +1187,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
        *svc_p = svc;
        return 0;
 
+
  out_err:
        if (svc != NULL) {
                ip_vs_unbind_scheduler(svc);
@@ -1215,6 +1196,8 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
                        ip_vs_app_inc_put(svc->inc);
                        local_bh_enable();
                }
+               if (svc->stats.cpustats)
+                       free_percpu(svc->stats.cpustats);
                kfree(svc);
        }
        ip_vs_scheduler_put(sched);
@@ -1248,7 +1231,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
        old_sched = sched;
 
        if (u->pe_name && *u->pe_name) {
-               pe = ip_vs_pe_get(u->pe_name);
+               pe = ip_vs_pe_getbyname(u->pe_name);
                if (pe == NULL) {
                        pr_info("persistence engine module ip_vs_pe_%s "
                                "not found\n", u->pe_name);
@@ -1334,14 +1317,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
        struct ip_vs_dest *dest, *nxt;
        struct ip_vs_scheduler *old_sched;
        struct ip_vs_pe *old_pe;
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
        pr_info("%s: enter\n", __func__);
 
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
-               ip_vs_num_services--;
+               ipvs->num_services--;
 
-       ip_vs_kill_estimator(&svc->stats);
+       ip_vs_kill_estimator(svc->net, &svc->stats);
 
        /* Unbind scheduler */
        old_sched = svc->scheduler;
@@ -1364,16 +1348,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
         */
        list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
                __ip_vs_unlink_dest(svc, dest, 0);
-               __ip_vs_del_dest(dest);
+               __ip_vs_del_dest(svc->net, dest);
        }
 
        /*
         *    Update the virtual service counters
         */
        if (svc->port == FTPPORT)
-               atomic_dec(&ip_vs_ftpsvc_counter);
+               atomic_dec(&ipvs->ftpsvc_counter);
        else if (svc->port == 0)
-               atomic_dec(&ip_vs_nullsvc_counter);
+               atomic_dec(&ipvs->nullsvc_counter);
 
        /*
         *    Free the service if nobody refers to it
@@ -1383,6 +1367,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
                              svc->fwmark,
                              IP_VS_DBG_ADDR(svc->af, &svc->addr),
                              ntohs(svc->port), atomic_read(&svc->usecnt));
+               free_percpu(svc->stats.cpustats);
                kfree(svc);
        }
 
@@ -1428,17 +1413,19 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
 /*
  *     Flush all the virtual services
  */
-static int ip_vs_flush(void)
+static int ip_vs_flush(struct net *net)
 {
        int idx;
        struct ip_vs_service *svc, *nxt;
 
        /*
-        * Flush the service table hashed by <protocol,addr,port>
+        * Flush the service table hashed by <netns,protocol,addr,port>
         */
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
-               list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
-                       ip_vs_unlink_service(svc);
+               list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+                                        s_list) {
+                       if (net_eq(svc->net, net))
+                               ip_vs_unlink_service(svc);
                }
        }
 
@@ -1448,7 +1435,8 @@ static int ip_vs_flush(void)
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry_safe(svc, nxt,
                                         &ip_vs_svc_fwm_table[idx], f_list) {
-                       ip_vs_unlink_service(svc);
+                       if (net_eq(svc->net, net))
+                               ip_vs_unlink_service(svc);
                }
        }
 
@@ -1472,24 +1460,26 @@ static int ip_vs_zero_service(struct ip_vs_service *svc)
        return 0;
 }
 
-static int ip_vs_zero_all(void)
+static int ip_vs_zero_all(struct net *net)
 {
        int idx;
        struct ip_vs_service *svc;
 
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-                       ip_vs_zero_service(svc);
+                       if (net_eq(svc->net, net))
+                               ip_vs_zero_service(svc);
                }
        }
 
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-                       ip_vs_zero_service(svc);
+                       if (net_eq(svc->net, net))
+                               ip_vs_zero_service(svc);
                }
        }
 
-       ip_vs_zero_stats(&ip_vs_stats);
+       ip_vs_zero_stats(net_ipvs(net)->tot_stats);
        return 0;
 }
 
@@ -1498,6 +1488,7 @@ static int
 proc_do_defense_mode(ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
+       struct net *net = current->nsproxy->net_ns;
        int *valp = table->data;
        int val = *valp;
        int rc;
@@ -1508,7 +1499,7 @@ proc_do_defense_mode(ctl_table *table, int write,
                        /* Restore the correct value */
                        *valp = val;
                } else {
-                       update_defense_level();
+                       update_defense_level(net_ipvs(net));
                }
        }
        return rc;
@@ -1534,45 +1525,54 @@ proc_do_sync_threshold(ctl_table *table, int write,
        return rc;
 }
 
+static int
+proc_do_sync_mode(ctl_table *table, int write,
+                    void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int *valp = table->data;
+       int val = *valp;
+       int rc;
+
+       rc = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (write && (*valp != val)) {
+               if ((*valp < 0) || (*valp > 1)) {
+                       /* Restore the correct value */
+                       *valp = val;
+               } else {
+                       struct net *net = current->nsproxy->net_ns;
+                       ip_vs_sync_switch_mode(net, val);
+               }
+       }
+       return rc;
+}
 
 /*
  *     IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
+ *     Do not change order or insert new entries without
+ *     align with netns init in __ip_vs_control_init()
  */
 
 static struct ctl_table vs_vars[] = {
        {
                .procname       = "amemthresh",
-               .data           = &sysctl_ip_vs_amemthresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-#ifdef CONFIG_IP_VS_DEBUG
-       {
-               .procname       = "debug_level",
-               .data           = &sysctl_ip_vs_debug_level,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-#endif
        {
                .procname       = "am_droprate",
-               .data           = &sysctl_ip_vs_am_droprate,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "drop_entry",
-               .data           = &sysctl_ip_vs_drop_entry,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_do_defense_mode,
        },
        {
                .procname       = "drop_packet",
-               .data           = &sysctl_ip_vs_drop_packet,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_do_defense_mode,
@@ -1580,7 +1580,6 @@ static struct ctl_table vs_vars[] = {
 #ifdef CONFIG_IP_VS_NFCT
        {
                .procname       = "conntrack",
-               .data           = &sysctl_ip_vs_conntrack,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec,
@@ -1588,18 +1587,62 @@ static struct ctl_table vs_vars[] = {
 #endif
        {
                .procname       = "secure_tcp",
-               .data           = &sysctl_ip_vs_secure_tcp,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_do_defense_mode,
        },
        {
                .procname       = "snat_reroute",
-               .data           = &sysctl_ip_vs_snat_reroute,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec,
        },
+       {
+               .procname       = "sync_version",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_do_sync_mode,
+       },
+       {
+               .procname       = "cache_bypass",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "expire_nodest_conn",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "expire_quiescent_template",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sync_threshold",
+               .maxlen         =
+                       sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
+               .mode           = 0644,
+               .proc_handler   = proc_do_sync_threshold,
+       },
+       {
+               .procname       = "nat_icmp_send",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#ifdef CONFIG_IP_VS_DEBUG
+       {
+               .procname       = "debug_level",
+               .data           = &sysctl_ip_vs_debug_level,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#endif
 #if 0
        {
                .procname       = "timeout_established",
@@ -1686,41 +1729,6 @@ static struct ctl_table vs_vars[] = {
                .proc_handler   = proc_dointvec_jiffies,
        },
 #endif
-       {
-               .procname       = "cache_bypass",
-               .data           = &sysctl_ip_vs_cache_bypass,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "expire_nodest_conn",
-               .data           = &sysctl_ip_vs_expire_nodest_conn,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "expire_quiescent_template",
-               .data           = &sysctl_ip_vs_expire_quiescent_template,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "sync_threshold",
-               .data           = &sysctl_ip_vs_sync_threshold,
-               .maxlen         = sizeof(sysctl_ip_vs_sync_threshold),
-               .mode           = 0644,
-               .proc_handler   = proc_do_sync_threshold,
-       },
-       {
-               .procname       = "nat_icmp_send",
-               .data           = &sysctl_ip_vs_nat_icmp_send,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
        { }
 };
 
@@ -1732,11 +1740,10 @@ const struct ctl_path net_vs_ctl_path[] = {
 };
 EXPORT_SYMBOL_GPL(net_vs_ctl_path);
 
-static struct ctl_table_header * sysctl_header;
-
 #ifdef CONFIG_PROC_FS
 
 struct ip_vs_iter {
+       struct seq_net_private p;  /* Do not move this, netns depends upon it*/
        struct list_head *table;
        int bucket;
 };
@@ -1763,6 +1770,7 @@ static inline const char *ip_vs_fwd_name(unsigned flags)
 /* Get the Nth entry in the two lists */
 static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
 {
+       struct net *net = seq_file_net(seq);
        struct ip_vs_iter *iter = seq->private;
        int idx;
        struct ip_vs_service *svc;
@@ -1770,7 +1778,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
        /* look in hash by protocol */
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-                       if (pos-- == 0){
+                       if (net_eq(svc->net, net) && pos-- == 0) {
                                iter->table = ip_vs_svc_table;
                                iter->bucket = idx;
                                return svc;
@@ -1781,7 +1789,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
        /* keep looking in fwmark */
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-                       if (pos-- == 0) {
+                       if (net_eq(svc->net, net) && pos-- == 0) {
                                iter->table = ip_vs_svc_fwm_table;
                                iter->bucket = idx;
                                return svc;
@@ -1935,7 +1943,7 @@ static const struct seq_operations ip_vs_info_seq_ops = {
 
 static int ip_vs_info_open(struct inode *inode, struct file *file)
 {
-       return seq_open_private(file, &ip_vs_info_seq_ops,
+       return seq_open_net(inode, file, &ip_vs_info_seq_ops,
                        sizeof(struct ip_vs_iter));
 }
 
@@ -1949,13 +1957,11 @@ static const struct file_operations ip_vs_info_fops = {
 
 #endif
 
-struct ip_vs_stats ip_vs_stats = {
-       .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
-};
-
 #ifdef CONFIG_PROC_FS
 static int ip_vs_stats_show(struct seq_file *seq, void *v)
 {
+       struct net *net = seq_file_single_net(seq);
+       struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
 
 /*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
@@ -1963,29 +1969,29 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "   Conns  Packets  Packets            Bytes            Bytes\n");
 
-       spin_lock_bh(&ip_vs_stats.lock);
-       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
-                  ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
-                  (unsigned long long) ip_vs_stats.ustats.inbytes,
-                  (unsigned long long) ip_vs_stats.ustats.outbytes);
+       spin_lock_bh(&tot_stats->lock);
+       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
+                  tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
+                  (unsigned long long) tot_stats->ustats.inbytes,
+                  (unsigned long long) tot_stats->ustats.outbytes);
 
 /*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
                   " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
        seq_printf(seq,"%8X %8X %8X %16X %16X\n",
-                       ip_vs_stats.ustats.cps,
-                       ip_vs_stats.ustats.inpps,
-                       ip_vs_stats.ustats.outpps,
-                       ip_vs_stats.ustats.inbps,
-                       ip_vs_stats.ustats.outbps);
-       spin_unlock_bh(&ip_vs_stats.lock);
+                       tot_stats->ustats.cps,
+                       tot_stats->ustats.inpps,
+                       tot_stats->ustats.outpps,
+                       tot_stats->ustats.inbps,
+                       tot_stats->ustats.outbps);
+       spin_unlock_bh(&tot_stats->lock);
 
        return 0;
 }
 
 static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ip_vs_stats_show, NULL);
+       return single_open_net(inode, file, ip_vs_stats_show);
 }
 
 static const struct file_operations ip_vs_stats_fops = {
@@ -1996,13 +2002,70 @@ static const struct file_operations ip_vs_stats_fops = {
        .release = single_release,
 };
 
+static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+{
+       struct net *net = seq_file_single_net(seq);
+       struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+       int i;
+
+/*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
+       seq_puts(seq,
+                "       Total Incoming Outgoing         Incoming         Outgoing\n");
+       seq_printf(seq,
+                  "CPU    Conns  Packets  Packets            Bytes            Bytes\n");
+
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
+               seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
+                           i, u->ustats.conns, u->ustats.inpkts,
+                           u->ustats.outpkts, (__u64)u->ustats.inbytes,
+                           (__u64)u->ustats.outbytes);
+       }
+
+       spin_lock_bh(&tot_stats->lock);
+       seq_printf(seq, "  ~ %8X %8X %8X %16LX %16LX\n\n",
+                  tot_stats->ustats.conns, tot_stats->ustats.inpkts,
+                  tot_stats->ustats.outpkts,
+                  (unsigned long long) tot_stats->ustats.inbytes,
+                  (unsigned long long) tot_stats->ustats.outbytes);
+
+/*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+       seq_puts(seq,
+                  "     Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
+       seq_printf(seq, "    %8X %8X %8X %16X %16X\n",
+                       tot_stats->ustats.cps,
+                       tot_stats->ustats.inpps,
+                       tot_stats->ustats.outpps,
+                       tot_stats->ustats.inbps,
+                       tot_stats->ustats.outbps);
+       spin_unlock_bh(&tot_stats->lock);
+
+       return 0;
+}
+
+static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open_net(inode, file, ip_vs_stats_percpu_show);
+}
+
+static const struct file_operations ip_vs_stats_percpu_fops = {
+       .owner = THIS_MODULE,
+       .open = ip_vs_stats_percpu_seq_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
 #endif
 
 /*
  *     Set timeout values for tcp tcpfin udp in the timeout_table.
  */
-static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
 {
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+       struct ip_vs_proto_data *pd;
+#endif
+
        IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
                  u->tcp_timeout,
                  u->tcp_fin_timeout,
@@ -2010,19 +2073,22 @@ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
        if (u->tcp_timeout) {
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
+               pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+               pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
                        = u->tcp_timeout * HZ;
        }
 
        if (u->tcp_fin_timeout) {
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
+               pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+               pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
                        = u->tcp_fin_timeout * HZ;
        }
 #endif
 
 #ifdef CONFIG_IP_VS_PROTO_UDP
        if (u->udp_timeout) {
-               ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
+               pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+               pd->timeout_table[IP_VS_UDP_S_NORMAL]
                        = u->udp_timeout * HZ;
        }
 #endif
@@ -2087,6 +2153,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
 static int
 do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 {
+       struct net *net = sock_net(sk);
        int ret;
        unsigned char arg[MAX_ARG_LEN];
        struct ip_vs_service_user *usvc_compat;
@@ -2121,19 +2188,20 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
        if (cmd == IP_VS_SO_SET_FLUSH) {
                /* Flush the virtual service */
-               ret = ip_vs_flush();
+               ret = ip_vs_flush(net);
                goto out_unlock;
        } else if (cmd == IP_VS_SO_SET_TIMEOUT) {
                /* Set timeout values for (tcp tcpfin udp) */
-               ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
+               ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
                goto out_unlock;
        } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
                struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
-               ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
+               ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+                                       dm->syncid);
                goto out_unlock;
        } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
                struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
-               ret = stop_sync_thread(dm->state);
+               ret = stop_sync_thread(net, dm->state);
                goto out_unlock;
        }
 
@@ -2148,7 +2216,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
        if (cmd == IP_VS_SO_SET_ZERO) {
                /* if no service address is set, zero counters in all */
                if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
-                       ret = ip_vs_zero_all();
+                       ret = ip_vs_zero_all(net);
                        goto out_unlock;
                }
        }
@@ -2165,10 +2233,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
        /* Lookup the exact service by <protocol, addr, port> or fwmark */
        if (usvc.fwmark == 0)
-               svc = __ip_vs_service_find(usvc.af, usvc.protocol,
+               svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
                                           &usvc.addr, usvc.port);
        else
-               svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark);
+               svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
 
        if (cmd != IP_VS_SO_SET_ADD
            && (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2181,7 +2249,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                if (svc != NULL)
                        ret = -EEXIST;
                else
-                       ret = ip_vs_add_service(&usvc, &svc);
+                       ret = ip_vs_add_service(net, &usvc, &svc);
                break;
        case IP_VS_SO_SET_EDIT:
                ret = ip_vs_edit_service(svc, &usvc);
@@ -2241,7 +2309,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
 }
 
 static inline int
-__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
+__ip_vs_get_service_entries(struct net *net,
+                           const struct ip_vs_get_services *get,
                            struct ip_vs_get_services __user *uptr)
 {
        int idx, count=0;
@@ -2252,7 +2321,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
                        /* Only expose IPv4 entries to old interface */
-                       if (svc->af != AF_INET)
+                       if (svc->af != AF_INET || !net_eq(svc->net, net))
                                continue;
 
                        if (count >= get->num_services)
@@ -2271,7 +2340,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
                        /* Only expose IPv4 entries to old interface */
-                       if (svc->af != AF_INET)
+                       if (svc->af != AF_INET || !net_eq(svc->net, net))
                                continue;
 
                        if (count >= get->num_services)
@@ -2291,7 +2360,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
 }
 
 static inline int
-__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
                         struct ip_vs_get_dests __user *uptr)
 {
        struct ip_vs_service *svc;
@@ -2299,9 +2368,9 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
        int ret = 0;
 
        if (get->fwmark)
-               svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark);
+               svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
        else
-               svc = __ip_vs_service_find(AF_INET, get->protocol, &addr,
+               svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
                                           get->port);
 
        if (svc) {
@@ -2336,17 +2405,21 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
 }
 
 static inline void
-__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
 {
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+       struct ip_vs_proto_data *pd;
+#endif
+
 #ifdef CONFIG_IP_VS_PROTO_TCP
-       u->tcp_timeout =
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
-       u->tcp_fin_timeout =
-               ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
+       pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+       u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
+       u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
+       pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
        u->udp_timeout =
-               ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
+                       pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
 #endif
 }
 
@@ -2375,7 +2448,10 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        unsigned char arg[128];
        int ret = 0;
        unsigned int copylen;
+       struct net *net = sock_net(sk);
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
+       BUG_ON(!net);
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
@@ -2418,7 +2494,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                struct ip_vs_getinfo info;
                info.version = IP_VS_VERSION_CODE;
                info.size = ip_vs_conn_tab_size;
-               info.num_services = ip_vs_num_services;
+               info.num_services = ipvs->num_services;
                if (copy_to_user(user, &info, sizeof(info)) != 0)
                        ret = -EFAULT;
        }
@@ -2437,7 +2513,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        ret = -EINVAL;
                        goto out;
                }
-               ret = __ip_vs_get_service_entries(get, user);
+               ret = __ip_vs_get_service_entries(net, get, user);
        }
        break;
 
@@ -2450,10 +2526,11 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                entry = (struct ip_vs_service_entry *)arg;
                addr.ip = entry->addr;
                if (entry->fwmark)
-                       svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark);
+                       svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
                else
-                       svc = __ip_vs_service_find(AF_INET, entry->protocol,
-                                                  &addr, entry->port);
+                       svc = __ip_vs_service_find(net, AF_INET,
+                                                  entry->protocol, &addr,
+                                                  entry->port);
                if (svc) {
                        ip_vs_copy_service(entry, svc);
                        if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2476,7 +2553,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        ret = -EINVAL;
                        goto out;
                }
-               ret = __ip_vs_get_dest_entries(get, user);
+               ret = __ip_vs_get_dest_entries(net, get, user);
        }
        break;
 
@@ -2484,7 +2561,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        {
                struct ip_vs_timeout_user t;
 
-               __ip_vs_get_timeouts(&t);
+               __ip_vs_get_timeouts(net, &t);
                if (copy_to_user(user, &t, sizeof(t)) != 0)
                        ret = -EFAULT;
        }
@@ -2495,15 +2572,17 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                struct ip_vs_daemon_user d[2];
 
                memset(&d, 0, sizeof(d));
-               if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
+               if (ipvs->sync_state & IP_VS_STATE_MASTER) {
                        d[0].state = IP_VS_STATE_MASTER;
-                       strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
-                       d[0].syncid = ip_vs_master_syncid;
+                       strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+                               sizeof(d[0].mcast_ifn));
+                       d[0].syncid = ipvs->master_syncid;
                }
-               if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
+               if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
                        d[1].state = IP_VS_STATE_BACKUP;
-                       strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
-                       d[1].syncid = ip_vs_backup_syncid;
+                       strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+                               sizeof(d[1].mcast_ifn));
+                       d[1].syncid = ipvs->backup_syncid;
                }
                if (copy_to_user(user, &d, sizeof(d)) != 0)
                        ret = -EFAULT;
@@ -2542,6 +2621,7 @@ static struct genl_family ip_vs_genl_family = {
        .name           = IPVS_GENL_NAME,
        .version        = IPVS_GENL_VERSION,
        .maxattr        = IPVS_CMD_MAX,
+       .netnsok        = true,         /* Make ipvsadm to work on netns */
 };
 
 /* Policy used for first-level command attributes */
@@ -2696,11 +2776,12 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
        int idx = 0, i;
        int start = cb->args[0];
        struct ip_vs_service *svc;
+       struct net *net = skb_sknet(skb);
 
        mutex_lock(&__ip_vs_mutex);
        for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
                list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
-                       if (++idx <= start)
+                       if (++idx <= start || !net_eq(svc->net, net))
                                continue;
                        if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
                                idx--;
@@ -2711,7 +2792,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
 
        for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
-                       if (++idx <= start)
+                       if (++idx <= start || !net_eq(svc->net, net))
                                continue;
                        if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
                                idx--;
@@ -2727,7 +2808,8 @@ nla_put_failure:
        return skb->len;
 }
 
-static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+static int ip_vs_genl_parse_service(struct net *net,
+                                   struct ip_vs_service_user_kern *usvc,
                                    struct nlattr *nla, int full_entry,
                                    struct ip_vs_service **ret_svc)
 {
@@ -2770,9 +2852,9 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
        }
 
        if (usvc->fwmark)
-               svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark);
+               svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
        else
-               svc = __ip_vs_service_find(usvc->af, usvc->protocol,
+               svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
                                           &usvc->addr, usvc->port);
        *ret_svc = svc;
 
@@ -2809,13 +2891,14 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
        return 0;
 }
 
-static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+                                                    struct nlattr *nla)
 {
        struct ip_vs_service_user_kern usvc;
        struct ip_vs_service *svc;
        int ret;
 
-       ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc);
+       ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
        return ret ? ERR_PTR(ret) : svc;
 }
 
@@ -2883,6 +2966,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
        struct ip_vs_service *svc;
        struct ip_vs_dest *dest;
        struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+       struct net *net = skb_sknet(skb);
 
        mutex_lock(&__ip_vs_mutex);
 
@@ -2891,7 +2975,8 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
                        IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
                goto out_err;
 
-       svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+
+       svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
        if (IS_ERR(svc) || svc == NULL)
                goto out_err;
 
@@ -3005,20 +3090,23 @@ nla_put_failure:
 static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
                                   struct netlink_callback *cb)
 {
+       struct net *net = skb_net(skb);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
        mutex_lock(&__ip_vs_mutex);
-       if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+       if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
                if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
-                                          ip_vs_master_mcast_ifn,
-                                          ip_vs_master_syncid, cb) < 0)
+                                          ipvs->master_mcast_ifn,
+                                          ipvs->master_syncid, cb) < 0)
                        goto nla_put_failure;
 
                cb->args[0] = 1;
        }
 
-       if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+       if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
                if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
-                                          ip_vs_backup_mcast_ifn,
-                                          ip_vs_backup_syncid, cb) < 0)
+                                          ipvs->backup_mcast_ifn,
+                                          ipvs->backup_syncid, cb) < 0)
                        goto nla_put_failure;
 
                cb->args[1] = 1;
@@ -3030,31 +3118,33 @@ nla_put_failure:
        return skb->len;
 }
 
-static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
 {
        if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
              attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
              attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
                return -EINVAL;
 
-       return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+       return start_sync_thread(net,
+                                nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
                                 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
                                 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
 }
 
-static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
 {
        if (!attrs[IPVS_DAEMON_ATTR_STATE])
                return -EINVAL;
 
-       return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+       return stop_sync_thread(net,
+                               nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 }
 
-static int ip_vs_genl_set_config(struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
 {
        struct ip_vs_timeout_user t;
 
-       __ip_vs_get_timeouts(&t);
+       __ip_vs_get_timeouts(net, &t);
 
        if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
                t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3066,7 +3156,7 @@ static int ip_vs_genl_set_config(struct nlattr **attrs)
        if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
                t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
 
-       return ip_vs_set_timeout(&t);
+       return ip_vs_set_timeout(net, &t);
 }
 
 static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -3076,16 +3166,20 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
        struct ip_vs_dest_user_kern udest;
        int ret = 0, cmd;
        int need_full_svc = 0, need_full_dest = 0;
+       struct net *net;
+       struct netns_ipvs *ipvs;
 
+       net = skb_sknet(skb);
+       ipvs = net_ipvs(net);
        cmd = info->genlhdr->cmd;
 
        mutex_lock(&__ip_vs_mutex);
 
        if (cmd == IPVS_CMD_FLUSH) {
-               ret = ip_vs_flush();
+               ret = ip_vs_flush(net);
                goto out;
        } else if (cmd == IPVS_CMD_SET_CONFIG) {
-               ret = ip_vs_genl_set_config(info->attrs);
+               ret = ip_vs_genl_set_config(net, info->attrs);
                goto out;
        } else if (cmd == IPVS_CMD_NEW_DAEMON ||
                   cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3101,13 +3195,13 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
                }
 
                if (cmd == IPVS_CMD_NEW_DAEMON)
-                       ret = ip_vs_genl_new_daemon(daemon_attrs);
+                       ret = ip_vs_genl_new_daemon(net, daemon_attrs);
                else
-                       ret = ip_vs_genl_del_daemon(daemon_attrs);
+                       ret = ip_vs_genl_del_daemon(net, daemon_attrs);
                goto out;
        } else if (cmd == IPVS_CMD_ZERO &&
                   !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
-               ret = ip_vs_zero_all();
+               ret = ip_vs_zero_all(net);
                goto out;
        }
 
@@ -3117,7 +3211,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
        if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
                need_full_svc = 1;
 
-       ret = ip_vs_genl_parse_service(&usvc,
+       ret = ip_vs_genl_parse_service(net, &usvc,
                                       info->attrs[IPVS_CMD_ATTR_SERVICE],
                                       need_full_svc, &svc);
        if (ret)
@@ -3147,7 +3241,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
        switch (cmd) {
        case IPVS_CMD_NEW_SERVICE:
                if (svc == NULL)
-                       ret = ip_vs_add_service(&usvc, &svc);
+                       ret = ip_vs_add_service(net, &usvc, &svc);
                else
                        ret = -EEXIST;
                break;
@@ -3185,7 +3279,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg;
        void *reply;
        int ret, cmd, reply_cmd;
+       struct net *net;
+       struct netns_ipvs *ipvs;
 
+       net = skb_sknet(skb);
+       ipvs = net_ipvs(net);
        cmd = info->genlhdr->cmd;
 
        if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3214,7 +3312,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
        {
                struct ip_vs_service *svc;
 
-               svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+               svc = ip_vs_genl_find_service(net,
+                                             info->attrs[IPVS_CMD_ATTR_SERVICE]);
                if (IS_ERR(svc)) {
                        ret = PTR_ERR(svc);
                        goto out_err;
@@ -3234,7 +3333,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
        {
                struct ip_vs_timeout_user t;
 
-               __ip_vs_get_timeouts(&t);
+               __ip_vs_get_timeouts(net, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
                NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
                NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
@@ -3380,62 +3479,173 @@ static void ip_vs_genl_unregister(void)
 
 /* End of Generic Netlink interface definitions */
 
+/*
+ * per netns intit/exit func.
+ */
+int __net_init __ip_vs_control_init(struct net *net)
+{
+       int idx;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ctl_table *tbl;
+
+       atomic_set(&ipvs->dropentry, 0);
+       spin_lock_init(&ipvs->dropentry_lock);
+       spin_lock_init(&ipvs->droppacket_lock);
+       spin_lock_init(&ipvs->securetcp_lock);
+       ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+       /* Initialize rs_table */
+       for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+               INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+       INIT_LIST_HEAD(&ipvs->dest_trash);
+       atomic_set(&ipvs->ftpsvc_counter, 0);
+       atomic_set(&ipvs->nullsvc_counter, 0);
+
+       /* procfs stats */
+       ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
+       if (ipvs->tot_stats == NULL) {
+               pr_err("%s(): no memory.\n", __func__);
+               return -ENOMEM;
+       }
+       ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+       if (!ipvs->cpustats) {
+               pr_err("%s() alloc_percpu failed\n", __func__);
+               goto err_alloc;
+       }
+       spin_lock_init(&ipvs->tot_stats->lock);
+
+       proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+       proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+       proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+                            &ip_vs_stats_percpu_fops);
+
+       if (!net_eq(net, &init_net)) {
+               tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
+               if (tbl == NULL)
+                       goto err_dup;
+       } else
+               tbl = vs_vars;
+       /* Initialize sysctl defaults */
+       idx = 0;
+       ipvs->sysctl_amemthresh = 1024;
+       tbl[idx++].data = &ipvs->sysctl_amemthresh;
+       ipvs->sysctl_am_droprate = 10;
+       tbl[idx++].data = &ipvs->sysctl_am_droprate;
+       tbl[idx++].data = &ipvs->sysctl_drop_entry;
+       tbl[idx++].data = &ipvs->sysctl_drop_packet;
+#ifdef CONFIG_IP_VS_NFCT
+       tbl[idx++].data = &ipvs->sysctl_conntrack;
+#endif
+       tbl[idx++].data = &ipvs->sysctl_secure_tcp;
+       ipvs->sysctl_snat_reroute = 1;
+       tbl[idx++].data = &ipvs->sysctl_snat_reroute;
+       ipvs->sysctl_sync_ver = 1;
+       tbl[idx++].data = &ipvs->sysctl_sync_ver;
+       tbl[idx++].data = &ipvs->sysctl_cache_bypass;
+       tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
+       tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
+       ipvs->sysctl_sync_threshold[0] = 3;
+       ipvs->sysctl_sync_threshold[1] = 50;
+       tbl[idx].data = &ipvs->sysctl_sync_threshold;
+       tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+       tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+
+
+#ifdef CONFIG_SYSCTL
+       ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
+                                                    tbl);
+       if (ipvs->sysctl_hdr == NULL) {
+               if (!net_eq(net, &init_net))
+                       kfree(tbl);
+               goto err_dup;
+       }
+#endif
+       ip_vs_new_estimator(net, ipvs->tot_stats);
+       ipvs->sysctl_tbl = tbl;
+       /* Schedule defense work */
+       INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
+       schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+       return 0;
+
+err_dup:
+       free_percpu(ipvs->cpustats);
+err_alloc:
+       kfree(ipvs->tot_stats);
+       return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ip_vs_trash_cleanup(net);
+       ip_vs_kill_estimator(net, ipvs->tot_stats);
+       cancel_delayed_work_sync(&ipvs->defense_work);
+       cancel_work_sync(&ipvs->defense_work.work);
+#ifdef CONFIG_SYSCTL
+       unregister_net_sysctl_table(ipvs->sysctl_hdr);
+#endif
+       proc_net_remove(net, "ip_vs_stats_percpu");
+       proc_net_remove(net, "ip_vs_stats");
+       proc_net_remove(net, "ip_vs");
+       free_percpu(ipvs->cpustats);
+       kfree(ipvs->tot_stats);
+}
+
+static struct pernet_operations ipvs_control_ops = {
+       .init = __ip_vs_control_init,
+       .exit = __ip_vs_control_cleanup,
+};
 
 int __init ip_vs_control_init(void)
 {
-       int ret;
        int idx;
+       int ret;
 
        EnterFunction(2);
 
-       /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
+       /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
        for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++)  {
                INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
                INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
        }
-       for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++)  {
-               INIT_LIST_HEAD(&ip_vs_rtable[idx]);
+
+       ret = register_pernet_subsys(&ipvs_control_ops);
+       if (ret) {
+               pr_err("cannot register namespace.\n");
+               goto err;
        }
-       smp_wmb();
+
+       smp_wmb();      /* Do we really need it now ? */
 
        ret = nf_register_sockopt(&ip_vs_sockopts);
        if (ret) {
                pr_err("cannot register sockopt.\n");
-               return ret;
+               goto err_net;
        }
 
        ret = ip_vs_genl_register();
        if (ret) {
                pr_err("cannot register Generic Netlink interface.\n");
                nf_unregister_sockopt(&ip_vs_sockopts);
-               return ret;
+               goto err_net;
        }
 
-       proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
-       proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
-
-       sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
-
-       ip_vs_new_estimator(&ip_vs_stats);
-
-       /* Hook the defense timer */
-       schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
-
        LeaveFunction(2);
        return 0;
+
+err_net:
+       unregister_pernet_subsys(&ipvs_control_ops);
+err:
+       return ret;
 }
 
 
 void ip_vs_control_cleanup(void)
 {
        EnterFunction(2);
-       ip_vs_trash_cleanup();
-       cancel_delayed_work_sync(&defense_work);
-       cancel_work_sync(&defense_work.work);
-       ip_vs_kill_estimator(&ip_vs_stats);
-       unregister_sysctl_table(sysctl_header);
-       proc_net_remove(&init_net, "ip_vs_stats");
-       proc_net_remove(&init_net, "ip_vs");
+       unregister_pernet_subsys(&ipvs_control_ops);
        ip_vs_genl_unregister();
        nf_unregister_sockopt(&ip_vs_sockopts);
        LeaveFunction(2);
index ff28801962e05883d1a469ac3b2848b51e4429fd..f560a05c965a61c528f5d8e12fac304d37b70492 100644 (file)
@@ -8,8 +8,12 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
- *
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
+ *              Network name space (netns) aware.
+ *              Global data moved to netns i.e struct netns_ipvs
+ *              Affected data: est_list and est_lock.
+ *              estimation_timer() runs with timer per netns.
+ *              get_stats()) do the per cpu summing.
  */
 
 #define KMSG_COMPONENT "IPVS"
  */
 
 
-static void estimation_timer(unsigned long arg);
+/*
+ * Make a summary from each cpu
+ */
+static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+                                struct ip_vs_cpu_stats *stats)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
+               unsigned int start;
+               __u64 inbytes, outbytes;
+               if (i) {
+                       sum->conns += s->ustats.conns;
+                       sum->inpkts += s->ustats.inpkts;
+                       sum->outpkts += s->ustats.outpkts;
+                       do {
+                               start = u64_stats_fetch_begin_bh(&s->syncp);
+                               inbytes = s->ustats.inbytes;
+                               outbytes = s->ustats.outbytes;
+                       } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+                       sum->inbytes += inbytes;
+                       sum->outbytes += outbytes;
+               } else {
+                       sum->conns = s->ustats.conns;
+                       sum->inpkts = s->ustats.inpkts;
+                       sum->outpkts = s->ustats.outpkts;
+                       do {
+                               start = u64_stats_fetch_begin_bh(&s->syncp);
+                               sum->inbytes = s->ustats.inbytes;
+                               sum->outbytes = s->ustats.outbytes;
+                       } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+               }
+       }
+}
 
-static LIST_HEAD(est_list);
-static DEFINE_SPINLOCK(est_lock);
-static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
 
 static void estimation_timer(unsigned long arg)
 {
@@ -62,11 +97,16 @@ static void estimation_timer(unsigned long arg)
        u32 n_inpkts, n_outpkts;
        u64 n_inbytes, n_outbytes;
        u32 rate;
+       struct net *net = (struct net *)arg;
+       struct netns_ipvs *ipvs;
 
-       spin_lock(&est_lock);
-       list_for_each_entry(e, &est_list, list) {
+       ipvs = net_ipvs(net);
+       ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
+       spin_lock(&ipvs->est_lock);
+       list_for_each_entry(e, &ipvs->est_list, list) {
                s = container_of(e, struct ip_vs_stats, est);
 
+               ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
                spin_lock(&s->lock);
                n_conns = s->ustats.conns;
                n_inpkts = s->ustats.inpkts;
@@ -75,38 +115,39 @@ static void estimation_timer(unsigned long arg)
                n_outbytes = s->ustats.outbytes;
 
                /* scaled by 2^10, but divided 2 seconds */
-               rate = (n_conns - e->last_conns)<<9;
+               rate = (n_conns - e->last_conns) << 9;
                e->last_conns = n_conns;
-               e->cps += ((long)rate - (long)e->cps)>>2;
-               s->ustats.cps = (e->cps+0x1FF)>>10;
+               e->cps += ((long)rate - (long)e->cps) >> 2;
+               s->ustats.cps = (e->cps + 0x1FF) >> 10;
 
-               rate = (n_inpkts - e->last_inpkts)<<9;
+               rate = (n_inpkts - e->last_inpkts) << 9;
                e->last_inpkts = n_inpkts;
-               e->inpps += ((long)rate - (long)e->inpps)>>2;
-               s->ustats.inpps = (e->inpps+0x1FF)>>10;
+               e->inpps += ((long)rate - (long)e->inpps) >> 2;
+               s->ustats.inpps = (e->inpps + 0x1FF) >> 10;
 
-               rate = (n_outpkts - e->last_outpkts)<<9;
+               rate = (n_outpkts - e->last_outpkts) << 9;
                e->last_outpkts = n_outpkts;
-               e->outpps += ((long)rate - (long)e->outpps)>>2;
-               s->ustats.outpps = (e->outpps+0x1FF)>>10;
+               e->outpps += ((long)rate - (long)e->outpps) >> 2;
+               s->ustats.outpps = (e->outpps + 0x1FF) >> 10;
 
-               rate = (n_inbytes - e->last_inbytes)<<4;
+               rate = (n_inbytes - e->last_inbytes) << 4;
                e->last_inbytes = n_inbytes;
-               e->inbps += ((long)rate - (long)e->inbps)>>2;
-               s->ustats.inbps = (e->inbps+0xF)>>5;
+               e->inbps += ((long)rate - (long)e->inbps) >> 2;
+               s->ustats.inbps = (e->inbps + 0xF) >> 5;
 
-               rate = (n_outbytes - e->last_outbytes)<<4;
+               rate = (n_outbytes - e->last_outbytes) << 4;
                e->last_outbytes = n_outbytes;
-               e->outbps += ((long)rate - (long)e->outbps)>>2;
-               s->ustats.outbps = (e->outbps+0xF)>>5;
+               e->outbps += ((long)rate - (long)e->outbps) >> 2;
+               s->ustats.outbps = (e->outbps + 0xF) >> 5;
                spin_unlock(&s->lock);
        }
-       spin_unlock(&est_lock);
-       mod_timer(&est_timer, jiffies + 2*HZ);
+       spin_unlock(&ipvs->est_lock);
+       mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
 }
 
-void ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_estimator *est = &stats->est;
 
        INIT_LIST_HEAD(&est->list);
@@ -126,18 +167,19 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
        est->last_outbytes = stats->ustats.outbytes;
        est->outbps = stats->ustats.outbps<<5;
 
-       spin_lock_bh(&est_lock);
-       list_add(&est->list, &est_list);
-       spin_unlock_bh(&est_lock);
+       spin_lock_bh(&ipvs->est_lock);
+       list_add(&est->list, &ipvs->est_list);
+       spin_unlock_bh(&ipvs->est_lock);
 }
 
-void ip_vs_kill_estimator(struct ip_vs_stats *stats)
+void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_estimator *est = &stats->est;
 
-       spin_lock_bh(&est_lock);
+       spin_lock_bh(&ipvs->est_lock);
        list_del(&est->list);
-       spin_unlock_bh(&est_lock);
+       spin_unlock_bh(&ipvs->est_lock);
 }
 
 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
@@ -157,13 +199,35 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
        est->outbps = 0;
 }
 
-int __init ip_vs_estimator_init(void)
+static int __net_init __ip_vs_estimator_init(struct net *net)
 {
-       mod_timer(&est_timer, jiffies + 2 * HZ);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       INIT_LIST_HEAD(&ipvs->est_list);
+       spin_lock_init(&ipvs->est_lock);
+       setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+       mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
        return 0;
 }
 
+static void __net_exit __ip_vs_estimator_exit(struct net *net)
+{
+       del_timer_sync(&net_ipvs(net)->est_timer);
+}
+static struct pernet_operations ip_vs_app_ops = {
+       .init = __ip_vs_estimator_init,
+       .exit = __ip_vs_estimator_exit,
+};
+
+int __init ip_vs_estimator_init(void)
+{
+       int rv;
+
+       rv = register_pernet_subsys(&ip_vs_app_ops);
+       return rv;
+}
+
 void ip_vs_estimator_cleanup(void)
 {
-       del_timer_sync(&est_timer);
+       unregister_pernet_subsys(&ip_vs_app_ops);
 }
index 75455000ad1c1cde82b2134970ab3a67a5a97b82..6b5dd6ddaae999b7153e68506be8cd26b59ceed6 100644 (file)
@@ -157,6 +157,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        int ret = 0;
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
+       struct net *net;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -197,18 +198,20 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                 */
                {
                        struct ip_vs_conn_param p;
-                       ip_vs_conn_fill_param(AF_INET, iph->protocol,
-                                             &from, port, &cp->caddr, 0, &p);
+                       ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+                                             iph->protocol, &from, port,
+                                             &cp->caddr, 0, &p);
                        n_cp = ip_vs_conn_out_get(&p);
                }
                if (!n_cp) {
                        struct ip_vs_conn_param p;
-                       ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr,
+                       ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+                                             AF_INET, IPPROTO_TCP, &cp->caddr,
                                              0, &cp->vaddr, port, &p);
                        n_cp = ip_vs_conn_new(&p, &from, port,
                                              IP_VS_CONN_F_NO_CPORT |
                                              IP_VS_CONN_F_NFCT,
-                                             cp->dest);
+                                             cp->dest, skb->mark);
                        if (!n_cp)
                                return 0;
 
@@ -257,8 +260,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                 * would be adjusted twice.
                 */
 
+               net = skb_net(skb);
                cp->app_data = NULL;
-               ip_vs_tcp_conn_listen(n_cp);
+               ip_vs_tcp_conn_listen(net, n_cp);
                ip_vs_conn_put(n_cp);
                return ret;
        }
@@ -287,6 +291,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        union nf_inet_addr to;
        __be16 port;
        struct ip_vs_conn *n_cp;
+       struct net *net;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -358,14 +363,15 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
 
        {
                struct ip_vs_conn_param p;
-               ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port,
-                                     &cp->vaddr, htons(ntohs(cp->vport)-1),
-                                     &p);
+               ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+                                     iph->protocol, &to, port, &cp->vaddr,
+                                     htons(ntohs(cp->vport)-1), &p);
                n_cp = ip_vs_conn_in_get(&p);
                if (!n_cp) {
                        n_cp = ip_vs_conn_new(&p, &cp->daddr,
                                              htons(ntohs(cp->dport)-1),
-                                             IP_VS_CONN_F_NFCT, cp->dest);
+                                             IP_VS_CONN_F_NFCT, cp->dest,
+                                             skb->mark);
                        if (!n_cp)
                                return 0;
 
@@ -377,7 +383,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        /*
         *      Move tunnel to listen state
         */
-       ip_vs_tcp_conn_listen(n_cp);
+       net = skb_net(skb);
+       ip_vs_tcp_conn_listen(net, n_cp);
        ip_vs_conn_put(n_cp);
 
        return 1;
@@ -398,23 +405,22 @@ static struct ip_vs_app ip_vs_ftp = {
        .pkt_in =       ip_vs_ftp_in,
 };
 
-
 /*
- *     ip_vs_ftp initialization
+ *     per netns ip_vs_ftp initialization
  */
-static int __init ip_vs_ftp_init(void)
+static int __net_init __ip_vs_ftp_init(struct net *net)
 {
        int i, ret;
        struct ip_vs_app *app = &ip_vs_ftp;
 
-       ret = register_ip_vs_app(app);
+       ret = register_ip_vs_app(net, app);
        if (ret)
                return ret;
 
        for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
                if (!ports[i])
                        continue;
-               ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
+               ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
                if (ret)
                        break;
                pr_info("%s: loaded support on port[%d] = %d\n",
@@ -422,18 +428,39 @@ static int __init ip_vs_ftp_init(void)
        }
 
        if (ret)
-               unregister_ip_vs_app(app);
+               unregister_ip_vs_app(net, app);
 
        return ret;
 }
+/*
+ *     netns exit
+ */
+static void __ip_vs_ftp_exit(struct net *net)
+{
+       struct ip_vs_app *app = &ip_vs_ftp;
+
+       unregister_ip_vs_app(net, app);
+}
+
+static struct pernet_operations ip_vs_ftp_ops = {
+       .init = __ip_vs_ftp_init,
+       .exit = __ip_vs_ftp_exit,
+};
 
+int __init ip_vs_ftp_init(void)
+{
+       int rv;
+
+       rv = register_pernet_subsys(&ip_vs_ftp_ops);
+       return rv;
+}
 
 /*
  *     ip_vs_ftp finish.
  */
 static void __exit ip_vs_ftp_exit(void)
 {
-       unregister_ip_vs_app(&ip_vs_ftp);
+       unregister_pernet_subsys(&ip_vs_ftp_ops);
 }
 
 
index 9323f8944199a6dcd6c99e0d126d21d697167bee..6bf7a807649c15833f8cf46d94e87e0f839d2d19 100644 (file)
@@ -70,7 +70,6 @@
  *    entries that haven't been touched for a day.
  */
 #define COUNT_FOR_FULL_EXPIRATION   30
-static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
 
 
 /*
@@ -117,7 +116,7 @@ struct ip_vs_lblc_table {
 static ctl_table vs_vars_table[] = {
        {
                .procname       = "lblc_expiration",
-               .data           = &sysctl_ip_vs_lblc_expiration,
+               .data           = NULL,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
@@ -125,8 +124,6 @@ static ctl_table vs_vars_table[] = {
        { }
 };
 
-static struct ctl_table_header * sysctl_header;
-
 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
 {
        list_del(&en->list);
@@ -248,6 +245,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
        struct ip_vs_lblc_entry *en, *nxt;
        unsigned long now = jiffies;
        int i, j;
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
        for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLC_TAB_MASK;
@@ -255,7 +253,8 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
                write_lock(&svc->sched_lock);
                list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
                        if (time_before(now,
-                                       en->lastuse + sysctl_ip_vs_lblc_expiration))
+                                       en->lastuse +
+                                       ipvs->sysctl_lblc_expiration))
                                continue;
 
                        ip_vs_lblc_free(en);
@@ -390,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
        int loh, doh;
 
        /*
-        * We think the overhead of processing active connections is fifty
-        * times higher than that of inactive connections in average. (This
-        * fifty times might not be accurate, we will change it later.) We
-        * use the following formula to estimate the overhead:
-        *                dest->activeconns*50 + dest->inactconns
-        * and the load:
+        * We use the following formula to estimate the load:
         *                (dest overhead) / dest->weight
         *
         * Remember -- no floats in kernel mode!!!
@@ -411,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                        continue;
                if (atomic_read(&dest->weight) > 0) {
                        least = dest;
-                       loh = atomic_read(&least->activeconns) * 50
-                               + atomic_read(&least->inactconns);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
@@ -426,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (loh * atomic_read(&dest->weight) >
                    doh * atomic_read(&least->weight)) {
                        least = dest;
@@ -511,7 +503,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        /* No cache entry or it is invalid, time to schedule */
        dest = __ip_vs_lblc_schedule(svc);
        if (!dest) {
-               IP_VS_ERR_RL("LBLC: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
                return NULL;
        }
 
@@ -543,23 +535,73 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
        .schedule =             ip_vs_lblc_schedule,
 };
 
+/*
+ *  per netns init.
+ */
+static int __net_init __ip_vs_lblc_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       if (!net_eq(net, &init_net)) {
+               ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
+                                               sizeof(vs_vars_table),
+                                               GFP_KERNEL);
+               if (ipvs->lblc_ctl_table == NULL)
+                       return -ENOMEM;
+       } else
+               ipvs->lblc_ctl_table = vs_vars_table;
+       ipvs->sysctl_lblc_expiration = 24*60*60*HZ;
+       ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
+
+#ifdef CONFIG_SYSCTL
+       ipvs->lblc_ctl_header =
+               register_net_sysctl_table(net, net_vs_ctl_path,
+                                         ipvs->lblc_ctl_table);
+       if (!ipvs->lblc_ctl_header) {
+               if (!net_eq(net, &init_net))
+                       kfree(ipvs->lblc_ctl_table);
+               return -ENOMEM;
+       }
+#endif
+
+       return 0;
+}
+
+static void __net_exit __ip_vs_lblc_exit(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+       unregister_net_sysctl_table(ipvs->lblc_ctl_header);
+#endif
+
+       if (!net_eq(net, &init_net))
+               kfree(ipvs->lblc_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblc_ops = {
+       .init = __ip_vs_lblc_init,
+       .exit = __ip_vs_lblc_exit,
+};
 
 static int __init ip_vs_lblc_init(void)
 {
        int ret;
 
-       sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+       ret = register_pernet_subsys(&ip_vs_lblc_ops);
+       if (ret)
+               return ret;
+
        ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
        if (ret)
-               unregister_sysctl_table(sysctl_header);
+               unregister_pernet_subsys(&ip_vs_lblc_ops);
        return ret;
 }
 
-
 static void __exit ip_vs_lblc_cleanup(void)
 {
-       unregister_sysctl_table(sysctl_header);
        unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+       unregister_pernet_subsys(&ip_vs_lblc_ops);
 }
 
 
index dbeed8ea421aa7c1335d414622559f262f6fa728..00631765b92a281ed58f50f3a5a16f28e23782f1 100644 (file)
@@ -70,8 +70,6 @@
  *    entries that haven't been touched for a day.
  */
 #define COUNT_FOR_FULL_EXPIRATION   30
-static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
-
 
 /*
  *     for IPVS lblcr entry hash table
@@ -180,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
 
                if ((atomic_read(&least->weight) > 0)
                    && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
-                       loh = atomic_read(&least->activeconns) * 50
-                               + atomic_read(&least->inactconns);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
@@ -194,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                if ((loh * atomic_read(&dest->weight) >
                     doh * atomic_read(&least->weight))
                    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -230,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
        list_for_each_entry(e, &set->list, list) {
                most = e->dest;
                if (atomic_read(&most->weight) > 0) {
-                       moh = atomic_read(&most->activeconns) * 50
-                               + atomic_read(&most->inactconns);
+                       moh = ip_vs_dest_conn_overhead(most);
                        goto nextstage;
                }
        }
@@ -241,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
   nextstage:
        list_for_each_entry(e, &set->list, list) {
                dest = e->dest;
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
                if ((moh * atomic_read(&dest->weight) <
                     doh * atomic_read(&most->weight))
@@ -296,7 +290,7 @@ struct ip_vs_lblcr_table {
 static ctl_table vs_vars_table[] = {
        {
                .procname       = "lblcr_expiration",
-               .data           = &sysctl_ip_vs_lblcr_expiration,
+               .data           = NULL,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
@@ -304,8 +298,6 @@ static ctl_table vs_vars_table[] = {
        { }
 };
 
-static struct ctl_table_header * sysctl_header;
-
 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
 {
        list_del(&en->list);
@@ -425,14 +417,15 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
        unsigned long now = jiffies;
        int i, j;
        struct ip_vs_lblcr_entry *en, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
        for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
                write_lock(&svc->sched_lock);
                list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
-                       if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
-                                      now))
+                       if (time_after(en->lastuse
+                                       + ipvs->sysctl_lblcr_expiration, now))
                                continue;
 
                        ip_vs_lblcr_free(en);
@@ -566,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
        int loh, doh;
 
        /*
-        * We think the overhead of processing active connections is fifty
-        * times higher than that of inactive connections in average. (This
-        * fifty times might not be accurate, we will change it later.) We
-        * use the following formula to estimate the overhead:
-        *                dest->activeconns*50 + dest->inactconns
-        * and the load:
+        * We use the following formula to estimate the load:
         *                (dest overhead) / dest->weight
         *
         * Remember -- no floats in kernel mode!!!
@@ -588,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
 
                if (atomic_read(&dest->weight) > 0) {
                        least = dest;
-                       loh = atomic_read(&least->activeconns) * 50
-                               + atomic_read(&least->inactconns);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
@@ -603,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
-               doh = atomic_read(&dest->activeconns) * 50
-                       + atomic_read(&dest->inactconns);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (loh * atomic_read(&dest->weight) >
                    doh * atomic_read(&least->weight)) {
                        least = dest;
@@ -664,6 +650,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        read_lock(&svc->sched_lock);
        en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
        if (en) {
+               struct netns_ipvs *ipvs = net_ipvs(svc->net);
                /* We only hold a read lock, but this is atomic */
                en->lastuse = jiffies;
 
@@ -675,7 +662,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                /* More than one destination + enough time passed by, cleanup */
                if (atomic_read(&en->set.size) > 1 &&
                                time_after(jiffies, en->set.lastmod +
-                               sysctl_ip_vs_lblcr_expiration)) {
+                               ipvs->sysctl_lblcr_expiration)) {
                        struct ip_vs_dest *m;
 
                        write_lock(&en->set.lock);
@@ -694,7 +681,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                /* The cache entry is invalid, time to schedule */
                dest = __ip_vs_lblcr_schedule(svc);
                if (!dest) {
-                       IP_VS_ERR_RL("LBLCR: no destination available\n");
+                       ip_vs_scheduler_err(svc, "no destination available");
                        read_unlock(&svc->sched_lock);
                        return NULL;
                }
@@ -744,23 +731,73 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
        .schedule =             ip_vs_lblcr_schedule,
 };
 
+/*
+ *  per netns init.
+ */
+static int __net_init __ip_vs_lblcr_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       if (!net_eq(net, &init_net)) {
+               ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
+                                               sizeof(vs_vars_table),
+                                               GFP_KERNEL);
+               if (ipvs->lblcr_ctl_table == NULL)
+                       return -ENOMEM;
+       } else
+               ipvs->lblcr_ctl_table = vs_vars_table;
+       ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
+       ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
+
+#ifdef CONFIG_SYSCTL
+       ipvs->lblcr_ctl_header =
+               register_net_sysctl_table(net, net_vs_ctl_path,
+                                         ipvs->lblcr_ctl_table);
+       if (!ipvs->lblcr_ctl_header) {
+               if (!net_eq(net, &init_net))
+                       kfree(ipvs->lblcr_ctl_table);
+               return -ENOMEM;
+       }
+#endif
+
+       return 0;
+}
+
+static void __net_exit __ip_vs_lblcr_exit(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+       unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
+#endif
+
+       if (!net_eq(net, &init_net))
+               kfree(ipvs->lblcr_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblcr_ops = {
+       .init = __ip_vs_lblcr_init,
+       .exit = __ip_vs_lblcr_exit,
+};
 
 static int __init ip_vs_lblcr_init(void)
 {
        int ret;
 
-       sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+       ret = register_pernet_subsys(&ip_vs_lblcr_ops);
+       if (ret)
+               return ret;
+
        ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
        if (ret)
-               unregister_sysctl_table(sysctl_header);
+               unregister_pernet_subsys(&ip_vs_lblcr_ops);
        return ret;
 }
 
-
 static void __exit ip_vs_lblcr_cleanup(void)
 {
-       unregister_sysctl_table(sysctl_header);
        unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+       unregister_pernet_subsys(&ip_vs_lblcr_ops);
 }
 
 
index 4f69db1fac56f514bbbdb17d4be00521683a19a3..f391819c0ccad893ff1b07b59a0bfb7a8005ed66 100644 (file)
 
 #include <net/ip_vs.h>
 
-
-static inline unsigned int
-ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
-{
-       /*
-        * We think the overhead of processing active connections is 256
-        * times higher than that of inactive connections in average. (This
-        * 256 times might not be accurate, we will change it later) We
-        * use the following formula to estimate the overhead now:
-        *                dest->activeconns*256 + dest->inactconns
-        */
-       return (atomic_read(&dest->activeconns) << 8) +
-               atomic_read(&dest->inactconns);
-}
-
-
 /*
  *     Least Connection scheduling
  */
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
                    atomic_read(&dest->weight) == 0)
                        continue;
-               doh = ip_vs_lc_dest_overhead(dest);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (!least || doh < loh) {
                        least = dest;
                        loh = doh;
@@ -70,7 +54,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        }
 
        if (!least)
-               IP_VS_ERR_RL("LC: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
        else
                IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
                              "inactconns %d\n",
index 4680647cd450ec3b397a40630090e95b967bcbab..f454c80df0a7868d29f639fed1555225462a36d6 100644 (file)
@@ -141,6 +141,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
        struct nf_conntrack_tuple *orig, new_reply;
        struct ip_vs_conn *cp;
        struct ip_vs_conn_param p;
+       struct net *net = nf_ct_net(ct);
 
        if (exp->tuple.src.l3num != PF_INET)
                return;
@@ -155,7 +156,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
 
        /* RS->CLIENT */
        orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
-       ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum,
+       ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
                              &orig->src.u3, orig->src.u.tcp.port,
                              &orig->dst.u3, orig->dst.u.tcp.port, &p);
        cp = ip_vs_conn_out_get(&p);
@@ -268,7 +269,8 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
                " for conn " FMT_CONN "\n",
                __func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-       h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
+       h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+                                 &tuple);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
                /* Show what happens instead of calling nf_ct_kill() */
index c413e18308230e910cef0a6e392e752826a57e5f..984d9c137d84ac65c140c6ee15c78a66d80c8775 100644 (file)
@@ -99,7 +99,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        }
 
        if (!least) {
-               IP_VS_ERR_RL("NQ: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
                return NULL;
        }
 
index 3414af70ee127c0f30f7b49cf0c15d91683bde5b..5cf859ccb31bbe096c9588643a9ed0f0d5f13342 100644 (file)
@@ -29,12 +29,11 @@ void ip_vs_unbind_pe(struct ip_vs_service *svc)
 }
 
 /* Get pe in the pe list by name */
-static struct ip_vs_pe *
-ip_vs_pe_getbyname(const char *pe_name)
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
 {
        struct ip_vs_pe *pe;
 
-       IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__,
+       IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
                  pe_name);
 
        spin_lock_bh(&ip_vs_pe_lock);
@@ -60,28 +59,22 @@ ip_vs_pe_getbyname(const char *pe_name)
 }
 
 /* Lookup pe and try to load it if it doesn't exist */
-struct ip_vs_pe *ip_vs_pe_get(const char *name)
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
 {
        struct ip_vs_pe *pe;
 
        /* Search for the pe by name */
-       pe = ip_vs_pe_getbyname(name);
+       pe = __ip_vs_pe_getbyname(name);
 
        /* If pe not found, load the module and search again */
        if (!pe) {
                request_module("ip_vs_pe_%s", name);
-               pe = ip_vs_pe_getbyname(name);
+               pe = __ip_vs_pe_getbyname(name);
        }
 
        return pe;
 }
 
-void ip_vs_pe_put(struct ip_vs_pe *pe)
-{
-       if (pe && pe->module)
-               module_put(pe->module);
-}
-
 /* Register a pe in the pe list */
 int register_ip_vs_pe(struct ip_vs_pe *pe)
 {
index b8b4e9620f3e936251da71d452f1d1f7aed68d04..0d83bc01fed4c14d7323debd536c5ca738cd5954 100644 (file)
@@ -71,6 +71,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
        struct ip_vs_iphdr iph;
        unsigned int dataoff, datalen, matchoff, matchlen;
        const char *dptr;
+       int retc;
 
        ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
 
@@ -83,6 +84,8 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
        if (dataoff >= skb->len)
                return -EINVAL;
 
+       if ((retc=skb_linearize(skb)) < 0)
+               return retc;
        dptr = skb->data + dataoff;
        datalen = skb->len - dataoff;
 
index c539983908771ead7df2e14b7d4eb9c259369064..17484a4416ef91158d03d868ee3c698e3179cf6e 100644 (file)
@@ -60,6 +60,35 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
        return 0;
 }
 
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
+    defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
+    defined(CONFIG_IP_VS_PROTO_ESP)
+/*
+ *     register an ipvs protocols netns related data
+ */
+static int
+register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+       struct ip_vs_proto_data *pd =
+                       kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+
+       if (!pd) {
+               pr_err("%s(): no memory.\n", __func__);
+               return -ENOMEM;
+       }
+       pd->pp = pp;    /* For speed issues */
+       pd->next = ipvs->proto_data_table[hash];
+       ipvs->proto_data_table[hash] = pd;
+       atomic_set(&pd->appcnt, 0);     /* Init app counter */
+
+       if (pp->init_netns != NULL)
+               pp->init_netns(net, pd);
+
+       return 0;
+}
+#endif
 
 /*
  *     unregister an ipvs protocol
@@ -82,6 +111,29 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
        return -ESRCH;
 }
 
+/*
+ *     unregister an ipvs protocols netns data
+ */
+static int
+unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data **pd_p;
+       unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+
+       pd_p = &ipvs->proto_data_table[hash];
+       for (; *pd_p; pd_p = &(*pd_p)->next) {
+               if (*pd_p == pd) {
+                       *pd_p = pd->next;
+                       if (pd->pp->exit_netns != NULL)
+                               pd->pp->exit_netns(net, pd);
+                       kfree(pd);
+                       return 0;
+               }
+       }
+
+       return -ESRCH;
+}
 
 /*
  *     get ip_vs_protocol object by its proto.
@@ -100,19 +152,44 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
 }
 EXPORT_SYMBOL(ip_vs_proto_get);
 
+/*
+ *     get ip_vs_protocol object data by netns and proto
+ */
+struct ip_vs_proto_data *
+__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+{
+       struct ip_vs_proto_data *pd;
+       unsigned hash = IP_VS_PROTO_HASH(proto);
+
+       for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
+               if (pd->pp->protocol == proto)
+                       return pd;
+       }
+
+       return NULL;
+}
+
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct net *net, unsigned short proto)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       return __ipvs_proto_data_get(ipvs, proto);
+}
+EXPORT_SYMBOL(ip_vs_proto_data_get);
 
 /*
  *     Propagate event for state change to all protocols
  */
-void ip_vs_protocol_timeout_change(int flags)
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
 {
-       struct ip_vs_protocol *pp;
+       struct ip_vs_proto_data *pd;
        int i;
 
        for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
-               for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) {
-                       if (pp->timeout_change)
-                               pp->timeout_change(pp, flags);
+               for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
+                       if (pd->pp->timeout_change)
+                               pd->pp->timeout_change(pd, flags);
                }
        }
 }
@@ -236,6 +313,46 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
                ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
 }
 
+/*
+ * per network name-space init
+ */
+static int __net_init __ip_vs_protocol_init(struct net *net)
+{
+#ifdef CONFIG_IP_VS_PROTO_TCP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_AH
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_ESP
+       register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+#endif
+       return 0;
+}
+
+static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd;
+       int i;
+
+       /* unregister all the ipvs proto data for this netns */
+       for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
+               while ((pd = ipvs->proto_data_table[i]) != NULL)
+                       unregister_ip_vs_proto_netns(net, pd);
+       }
+}
+
+static struct pernet_operations ipvs_proto_ops = {
+       .init = __ip_vs_protocol_init,
+       .exit = __ip_vs_protocol_cleanup,
+};
 
 int __init ip_vs_protocol_init(void)
 {
@@ -265,6 +382,7 @@ int __init ip_vs_protocol_init(void)
        REGISTER_PROTOCOL(&ip_vs_protocol_esp);
 #endif
        pr_info("Registered protocols (%s)\n", &protocols[2]);
+       return register_pernet_subsys(&ipvs_proto_ops);
 
        return 0;
 }
@@ -275,6 +393,7 @@ void ip_vs_protocol_cleanup(void)
        struct ip_vs_protocol *pp;
        int i;
 
+       unregister_pernet_subsys(&ipvs_proto_ops);
        /* unregister all the ipvs protocols */
        for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
                while ((pp = ip_vs_proto_table[i]) != NULL)
index 3a0461117d3fad6216747b60b5ec24856ca9d7fc..5b8eb8b12c3e6a1155df420f8b101b32c29f6def 100644 (file)
@@ -41,28 +41,30 @@ struct isakmp_hdr {
 #define PORT_ISAKMP    500
 
 static void
-ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph,
-                            int inverse, struct ip_vs_conn_param *p)
+ah_esp_conn_fill_param_proto(struct net *net, int af,
+                            const struct ip_vs_iphdr *iph, int inverse,
+                            struct ip_vs_conn_param *p)
 {
        if (likely(!inverse))
-               ip_vs_conn_fill_param(af, IPPROTO_UDP,
+               ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
                                      &iph->saddr, htons(PORT_ISAKMP),
                                      &iph->daddr, htons(PORT_ISAKMP), p);
        else
-               ip_vs_conn_fill_param(af, IPPROTO_UDP,
+               ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
                                      &iph->daddr, htons(PORT_ISAKMP),
                                      &iph->saddr, htons(PORT_ISAKMP), p);
 }
 
 static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_in_get(int af, const struct sk_buff *skb,
                   const struct ip_vs_iphdr *iph, unsigned int proto_off,
                   int inverse)
 {
        struct ip_vs_conn *cp;
        struct ip_vs_conn_param p;
+       struct net *net = skb_net(skb);
 
-       ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+       ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
        cp = ip_vs_conn_in_get(&p);
        if (!cp) {
                /*
@@ -72,7 +74,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
                IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
                              "%s%s %s->%s\n",
                              inverse ? "ICMP+" : "",
-                             pp->name,
+                             ip_vs_proto_get(iph->protocol)->name,
                              IP_VS_DBG_ADDR(af, &iph->saddr),
                              IP_VS_DBG_ADDR(af, &iph->daddr));
        }
@@ -83,21 +85,21 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
 
 static struct ip_vs_conn *
 ah_esp_conn_out_get(int af, const struct sk_buff *skb,
-                   struct ip_vs_protocol *pp,
                    const struct ip_vs_iphdr *iph,
                    unsigned int proto_off,
                    int inverse)
 {
        struct ip_vs_conn *cp;
        struct ip_vs_conn_param p;
+       struct net *net = skb_net(skb);
 
-       ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+       ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
        cp = ip_vs_conn_out_get(&p);
        if (!cp) {
                IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
                              "%s%s %s->%s\n",
                              inverse ? "ICMP+" : "",
-                             pp->name,
+                             ip_vs_proto_get(iph->protocol)->name,
                              IP_VS_DBG_ADDR(af, &iph->saddr),
                              IP_VS_DBG_ADDR(af, &iph->daddr));
        }
@@ -107,7 +109,7 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb,
 
 
 static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                     int *verdict, struct ip_vs_conn **cpp)
 {
        /*
@@ -117,26 +119,14 @@ ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
        return 0;
 }
 
-static void ah_esp_init(struct ip_vs_protocol *pp)
-{
-       /* nothing to do now */
-}
-
-
-static void ah_esp_exit(struct ip_vs_protocol *pp)
-{
-       /* nothing to do now */
-}
-
-
 #ifdef CONFIG_IP_VS_PROTO_AH
 struct ip_vs_protocol ip_vs_protocol_ah = {
        .name =                 "AH",
        .protocol =             IPPROTO_AH,
        .num_states =           1,
        .dont_defrag =          1,
-       .init =                 ah_esp_init,
-       .exit =                 ah_esp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
        .conn_schedule =        ah_esp_conn_schedule,
        .conn_in_get =          ah_esp_conn_in_get,
        .conn_out_get =         ah_esp_conn_out_get,
@@ -149,7 +139,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
        .app_conn_bind =        NULL,
        .debug_packet =         ip_vs_tcpudp_debug_packet,
        .timeout_change =       NULL,           /* ISAKMP */
-       .set_state_timeout =    NULL,
 };
 #endif
 
@@ -159,8 +148,8 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
        .protocol =             IPPROTO_ESP,
        .num_states =           1,
        .dont_defrag =          1,
-       .init =                 ah_esp_init,
-       .exit =                 ah_esp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
        .conn_schedule =        ah_esp_conn_schedule,
        .conn_in_get =          ah_esp_conn_in_get,
        .conn_out_get =         ah_esp_conn_out_get,
index 1ea96bcd342b8fc81eb437a6826fc6652c2be4c2..b027ccc49f43cba6fdbc3ceaa293b50fc7e1d857 100644 (file)
@@ -9,9 +9,10 @@
 #include <net/ip_vs.h>
 
 static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                   int *verdict, struct ip_vs_conn **cpp)
 {
+       struct net *net;
        struct ip_vs_service *svc;
        sctp_chunkhdr_t _schunkh, *sch;
        sctp_sctphdr_t *sh, _sctph;
@@ -27,13 +28,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                                 sizeof(_schunkh), &_schunkh);
        if (sch == NULL)
                return 0;
-
+       net = skb_net(skb);
        if ((sch->type == SCTP_CID_INIT) &&
-           (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+           (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
                                     &iph.daddr, sh->dest))) {
                int ignored;
 
-               if (ip_vs_todrop()) {
+               if (ip_vs_todrop(net_ipvs(net))) {
                        /*
                         * It seems that we are very loaded.
                         * We have to drop this packet :(
@@ -46,14 +47,19 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                 * Let the virtual server select a real server for the
                 * incoming connection, and create a connection entry.
                 */
-               *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-               if (!*cpp && !ignored) {
-                       *verdict = ip_vs_leave(svc, skb, pp);
+               *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+               if (!*cpp && ignored <= 0) {
+                       if (!ignored)
+                               *verdict = ip_vs_leave(svc, skb, pd);
+                       else {
+                               ip_vs_service_put(svc);
+                               *verdict = NF_DROP;
+                       }
                        return 0;
                }
                ip_vs_service_put(svc);
        }
-
+       /* NF_ACCEPT */
        return 1;
 }
 
@@ -856,7 +862,7 @@ static struct ipvs_sctp_nextstate
 /*
  *      Timeout table[state]
  */
-static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
        [IP_VS_SCTP_S_NONE]         =     2 * HZ,
        [IP_VS_SCTP_S_INIT_CLI]     =     1 * 60 * HZ,
        [IP_VS_SCTP_S_INIT_SER]     =     1 * 60 * HZ,
@@ -900,20 +906,8 @@ static const char *sctp_state_name(int state)
        return "?";
 }
 
-static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
-{
-}
-
-static int
-sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-
-return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
-                               sctp_state_name_table, sname, to);
-}
-
 static inline int
-set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
                int direction, const struct sk_buff *skb)
 {
        sctp_chunkhdr_t _sctpch, *sch;
@@ -971,7 +965,7 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
 
                IP_VS_DBG_BUF(8, "%s %s  %s:%d->"
                                "%s:%d state: %s->%s conn->refcnt:%d\n",
-                               pp->name,
+                               pd->pp->name,
                                ((direction == IP_VS_DIR_OUTPUT) ?
                                 "output " : "input "),
                                IP_VS_DBG_ADDR(cp->af, &cp->daddr),
@@ -995,75 +989,73 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
                        }
                }
        }
+       if (likely(pd))
+               cp->timeout = pd->timeout_table[cp->state = next_state];
+       else    /* What to do ? */
+               cp->timeout = sctp_timeouts[cp->state = next_state];
 
-        cp->timeout = pp->timeout_table[cp->state = next_state];
-
-        return 1;
+       return 1;
 }
 
 static int
 sctp_state_transition(struct ip_vs_conn *cp, int direction,
-               const struct sk_buff *skb, struct ip_vs_protocol *pp)
+               const struct sk_buff *skb, struct ip_vs_proto_data *pd)
 {
        int ret = 0;
 
        spin_lock(&cp->lock);
-       ret = set_sctp_state(pp, cp, direction, skb);
+       ret = set_sctp_state(pd, cp, direction, skb);
        spin_unlock(&cp->lock);
 
        return ret;
 }
 
-/*
- *      Hash table for SCTP application incarnations
- */
-#define SCTP_APP_TAB_BITS        4
-#define SCTP_APP_TAB_SIZE        (1 << SCTP_APP_TAB_BITS)
-#define SCTP_APP_TAB_MASK        (SCTP_APP_TAB_SIZE - 1)
-
-static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(sctp_app_lock);
-
 static inline __u16 sctp_app_hashkey(__be16 port)
 {
        return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
                & SCTP_APP_TAB_MASK;
 }
 
-static int sctp_register_app(struct ip_vs_app *inc)
+static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_app *i;
        __u16 hash;
        __be16 port = inc->port;
        int ret = 0;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
 
        hash = sctp_app_hashkey(port);
 
-       spin_lock_bh(&sctp_app_lock);
-       list_for_each_entry(i, &sctp_apps[hash], p_list) {
+       spin_lock_bh(&ipvs->sctp_app_lock);
+       list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
                if (i->port == port) {
                        ret = -EEXIST;
                        goto out;
                }
        }
-       list_add(&inc->p_list, &sctp_apps[hash]);
-       atomic_inc(&ip_vs_protocol_sctp.appcnt);
+       list_add(&inc->p_list, &ipvs->sctp_apps[hash]);
+       atomic_inc(&pd->appcnt);
 out:
-       spin_unlock_bh(&sctp_app_lock);
+       spin_unlock_bh(&ipvs->sctp_app_lock);
 
        return ret;
 }
 
-static void sctp_unregister_app(struct ip_vs_app *inc)
+static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-       spin_lock_bh(&sctp_app_lock);
-       atomic_dec(&ip_vs_protocol_sctp.appcnt);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+
+       spin_lock_bh(&ipvs->sctp_app_lock);
+       atomic_dec(&pd->appcnt);
        list_del(&inc->p_list);
-       spin_unlock_bh(&sctp_app_lock);
+       spin_unlock_bh(&ipvs->sctp_app_lock);
 }
 
 static int sctp_app_conn_bind(struct ip_vs_conn *cp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
        int hash;
        struct ip_vs_app *inc;
        int result = 0;
@@ -1074,12 +1066,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
        /* Lookup application incarnations and bind the right one */
        hash = sctp_app_hashkey(cp->vport);
 
-       spin_lock(&sctp_app_lock);
-       list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+       spin_lock(&ipvs->sctp_app_lock);
+       list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) {
                if (inc->port == cp->vport) {
                        if (unlikely(!ip_vs_app_inc_get(inc)))
                                break;
-                       spin_unlock(&sctp_app_lock);
+                       spin_unlock(&ipvs->sctp_app_lock);
 
                        IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
                                        "%s:%u to app %s on port %u\n",
@@ -1095,43 +1087,50 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
                        goto out;
                }
        }
-       spin_unlock(&sctp_app_lock);
+       spin_unlock(&ipvs->sctp_app_lock);
 out:
        return result;
 }
 
-static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ *   timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-       IP_VS_INIT_HASH_TABLE(sctp_apps);
-       pp->timeout_table = sctp_timeouts;
-}
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
+       ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
+       spin_lock_init(&ipvs->sctp_app_lock);
+       pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
+                                                       sizeof(sctp_timeouts));
+}
 
-static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
-
+       kfree(pd->timeout_table);
 }
 
 struct ip_vs_protocol ip_vs_protocol_sctp = {
-       .name = "SCTP",
-       .protocol = IPPROTO_SCTP,
-       .num_states = IP_VS_SCTP_S_LAST,
-       .dont_defrag = 0,
-       .appcnt = ATOMIC_INIT(0),
-       .init = ip_vs_sctp_init,
-       .exit = ip_vs_sctp_exit,
-       .register_app = sctp_register_app,
+       .name           = "SCTP",
+       .protocol       = IPPROTO_SCTP,
+       .num_states     = IP_VS_SCTP_S_LAST,
+       .dont_defrag    = 0,
+       .init           = NULL,
+       .exit           = NULL,
+       .init_netns     = __ip_vs_sctp_init,
+       .exit_netns     = __ip_vs_sctp_exit,
+       .register_app   = sctp_register_app,
        .unregister_app = sctp_unregister_app,
-       .conn_schedule = sctp_conn_schedule,
-       .conn_in_get = ip_vs_conn_in_get_proto,
-       .conn_out_get = ip_vs_conn_out_get_proto,
-       .snat_handler = sctp_snat_handler,
-       .dnat_handler = sctp_dnat_handler,
-       .csum_check = sctp_csum_check,
-       .state_name = sctp_state_name,
+       .conn_schedule  = sctp_conn_schedule,
+       .conn_in_get    = ip_vs_conn_in_get_proto,
+       .conn_out_get   = ip_vs_conn_out_get_proto,
+       .snat_handler   = sctp_snat_handler,
+       .dnat_handler   = sctp_dnat_handler,
+       .csum_check     = sctp_csum_check,
+       .state_name     = sctp_state_name,
        .state_transition = sctp_state_transition,
-       .app_conn_bind = sctp_app_conn_bind,
-       .debug_packet = ip_vs_tcpudp_debug_packet,
-       .timeout_change = sctp_timeout_change,
-       .set_state_timeout = sctp_set_state_timeout,
+       .app_conn_bind  = sctp_app_conn_bind,
+       .debug_packet   = ip_vs_tcpudp_debug_packet,
+       .timeout_change = NULL,
 };
index f6c5200e214663fe915b2136532c54d03861e5eb..c0cc341b840d38180a948c51694d3e4d877e10fb 100644 (file)
@@ -9,8 +9,12 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
  *
+ *              Network name space (netns) aware.
+ *              Global data moved to netns i.e struct netns_ipvs
+ *              tcp_timeouts table has copy per netns in a hash table per
+ *              protocol ip_vs_proto_data and is handled by netns
  */
 
 #define KMSG_COMPONENT "IPVS"
 #include <net/ip_vs.h>
 
 static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                  int *verdict, struct ip_vs_conn **cpp)
 {
+       struct net *net;
        struct ip_vs_service *svc;
        struct tcphdr _tcph, *th;
        struct ip_vs_iphdr iph;
@@ -42,14 +47,14 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                *verdict = NF_DROP;
                return 0;
        }
-
+       net = skb_net(skb);
        /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
        if (th->syn &&
-           (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
-                                    th->dest))) {
+           (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
+                                    &iph.daddr, th->dest))) {
                int ignored;
 
-               if (ip_vs_todrop()) {
+               if (ip_vs_todrop(net_ipvs(net))) {
                        /*
                         * It seems that we are very loaded.
                         * We have to drop this packet :(
@@ -63,13 +68,19 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                 * Let the virtual server select a real server for the
                 * incoming connection, and create a connection entry.
                 */
-               *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-               if (!*cpp && !ignored) {
-                       *verdict = ip_vs_leave(svc, skb, pp);
+               *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+               if (!*cpp && ignored <= 0) {
+                       if (!ignored)
+                               *verdict = ip_vs_leave(svc, skb, pd);
+                       else {
+                               ip_vs_service_put(svc);
+                               *verdict = NF_DROP;
+                       }
                        return 0;
                }
                ip_vs_service_put(svc);
        }
+       /* NF_ACCEPT */
        return 1;
 }
 
@@ -338,7 +349,7 @@ static const int tcp_state_off[IP_VS_DIR_LAST] = {
 /*
  *     Timeout table[state]
  */
-static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
+static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
        [IP_VS_TCP_S_NONE]              =       2*HZ,
        [IP_VS_TCP_S_ESTABLISHED]       =       15*60*HZ,
        [IP_VS_TCP_S_SYN_SENT]          =       2*60*HZ,
@@ -437,10 +448,7 @@ static struct tcp_states_t tcp_states_dos [] = {
 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
 };
 
-static struct tcp_states_t *tcp_state_table = tcp_states;
-
-
-static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
+static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
 {
        int on = (flags & 1);           /* secure_tcp */
 
@@ -450,14 +458,7 @@ static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
        ** for most if not for all of the applications. Something
        ** like "capabilities" (flags) for each object.
        */
-       tcp_state_table = (on? tcp_states_dos : tcp_states);
-}
-
-static int
-tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-       return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
-                                      tcp_state_name_table, sname, to);
+       pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
 }
 
 static inline int tcp_state_idx(struct tcphdr *th)
@@ -474,7 +475,7 @@ static inline int tcp_state_idx(struct tcphdr *th)
 }
 
 static inline void
-set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
              int direction, struct tcphdr *th)
 {
        int state_idx;
@@ -497,7 +498,8 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
                goto tcp_state_out;
        }
 
-       new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
+       new_state =
+               pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
 
   tcp_state_out:
        if (new_state != cp->state) {
@@ -505,7 +507,7 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
 
                IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
                              "%s:%d state: %s->%s conn->refcnt:%d\n",
-                             pp->name,
+                             pd->pp->name,
                              ((state_off == TCP_DIR_OUTPUT) ?
                               "output " : "input "),
                              th->syn ? 'S' : '.',
@@ -535,17 +537,19 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
                }
        }
 
-       cp->timeout = pp->timeout_table[cp->state = new_state];
+       if (likely(pd))
+               cp->timeout = pd->timeout_table[cp->state = new_state];
+       else    /* What to do ? */
+               cp->timeout = tcp_timeouts[cp->state = new_state];
 }
 
-
 /*
  *     Handle state transitions
  */
 static int
 tcp_state_transition(struct ip_vs_conn *cp, int direction,
                     const struct sk_buff *skb,
-                    struct ip_vs_protocol *pp)
+                    struct ip_vs_proto_data *pd)
 {
        struct tcphdr _tcph, *th;
 
@@ -560,23 +564,12 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
                return 0;
 
        spin_lock(&cp->lock);
-       set_tcp_state(pp, cp, direction, th);
+       set_tcp_state(pd, cp, direction, th);
        spin_unlock(&cp->lock);
 
        return 1;
 }
 
-
-/*
- *     Hash table for TCP application incarnations
- */
-#define        TCP_APP_TAB_BITS        4
-#define        TCP_APP_TAB_SIZE        (1 << TCP_APP_TAB_BITS)
-#define        TCP_APP_TAB_MASK        (TCP_APP_TAB_SIZE - 1)
-
-static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(tcp_app_lock);
-
 static inline __u16 tcp_app_hashkey(__be16 port)
 {
        return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
@@ -584,44 +577,50 @@ static inline __u16 tcp_app_hashkey(__be16 port)
 }
 
 
-static int tcp_register_app(struct ip_vs_app *inc)
+static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_app *i;
        __u16 hash;
        __be16 port = inc->port;
        int ret = 0;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
 
        hash = tcp_app_hashkey(port);
 
-       spin_lock_bh(&tcp_app_lock);
-       list_for_each_entry(i, &tcp_apps[hash], p_list) {
+       spin_lock_bh(&ipvs->tcp_app_lock);
+       list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
                if (i->port == port) {
                        ret = -EEXIST;
                        goto out;
                }
        }
-       list_add(&inc->p_list, &tcp_apps[hash]);
-       atomic_inc(&ip_vs_protocol_tcp.appcnt);
+       list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
+       atomic_inc(&pd->appcnt);
 
   out:
-       spin_unlock_bh(&tcp_app_lock);
+       spin_unlock_bh(&ipvs->tcp_app_lock);
        return ret;
 }
 
 
 static void
-tcp_unregister_app(struct ip_vs_app *inc)
+tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-       spin_lock_bh(&tcp_app_lock);
-       atomic_dec(&ip_vs_protocol_tcp.appcnt);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
+       spin_lock_bh(&ipvs->tcp_app_lock);
+       atomic_dec(&pd->appcnt);
        list_del(&inc->p_list);
-       spin_unlock_bh(&tcp_app_lock);
+       spin_unlock_bh(&ipvs->tcp_app_lock);
 }
 
 
 static int
 tcp_app_conn_bind(struct ip_vs_conn *cp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
        int hash;
        struct ip_vs_app *inc;
        int result = 0;
@@ -633,12 +632,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
        /* Lookup application incarnations and bind the right one */
        hash = tcp_app_hashkey(cp->vport);
 
-       spin_lock(&tcp_app_lock);
-       list_for_each_entry(inc, &tcp_apps[hash], p_list) {
+       spin_lock(&ipvs->tcp_app_lock);
+       list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
                if (inc->port == cp->vport) {
                        if (unlikely(!ip_vs_app_inc_get(inc)))
                                break;
-                       spin_unlock(&tcp_app_lock);
+                       spin_unlock(&ipvs->tcp_app_lock);
 
                        IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
                                      "%s:%u to app %s on port %u\n",
@@ -655,7 +654,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
                        goto out;
                }
        }
-       spin_unlock(&tcp_app_lock);
+       spin_unlock(&ipvs->tcp_app_lock);
 
   out:
        return result;
@@ -665,24 +664,35 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
 /*
  *     Set LISTEN timeout. (ip_vs_conn_put will setup timer)
  */
-void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
 {
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
        spin_lock(&cp->lock);
        cp->state = IP_VS_TCP_S_LISTEN;
-       cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
+       cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
+                          : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
        spin_unlock(&cp->lock);
 }
 
-
-static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ *   timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-       IP_VS_INIT_HASH_TABLE(tcp_apps);
-       pp->timeout_table = tcp_timeouts;
-}
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
+       ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
+       spin_lock_init(&ipvs->tcp_app_lock);
+       pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
+                                                       sizeof(tcp_timeouts));
+       pd->tcp_state_table =  tcp_states;
+}
 
-static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
+       kfree(pd->timeout_table);
 }
 
 
@@ -691,9 +701,10 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
        .protocol =             IPPROTO_TCP,
        .num_states =           IP_VS_TCP_S_LAST,
        .dont_defrag =          0,
-       .appcnt =               ATOMIC_INIT(0),
-       .init =                 ip_vs_tcp_init,
-       .exit =                 ip_vs_tcp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
+       .init_netns =           __ip_vs_tcp_init,
+       .exit_netns =           __ip_vs_tcp_exit,
        .register_app =         tcp_register_app,
        .unregister_app =       tcp_unregister_app,
        .conn_schedule =        tcp_conn_schedule,
@@ -707,5 +718,4 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
        .app_conn_bind =        tcp_app_conn_bind,
        .debug_packet =         ip_vs_tcpudp_debug_packet,
        .timeout_change =       tcp_timeout_change,
-       .set_state_timeout =    tcp_set_state_timeout,
 };
index 9d106a06bb0a46376252b32f2d30882d921b8b16..f1282cbe6fe3f92d9ff74bf5fe8fbe9ed4e31176 100644 (file)
@@ -9,7 +9,8 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
+ *              Network name space (netns) aware.
  *
  */
 
 #include <net/ip6_checksum.h>
 
 static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
                  int *verdict, struct ip_vs_conn **cpp)
 {
+       struct net *net;
        struct ip_vs_service *svc;
        struct udphdr _udph, *uh;
        struct ip_vs_iphdr iph;
@@ -42,13 +44,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                *verdict = NF_DROP;
                return 0;
        }
-
-       svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+       net = skb_net(skb);
+       svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
                                &iph.daddr, uh->dest);
        if (svc) {
                int ignored;
 
-               if (ip_vs_todrop()) {
+               if (ip_vs_todrop(net_ipvs(net))) {
                        /*
                         * It seems that we are very loaded.
                         * We have to drop this packet :(
@@ -62,13 +64,19 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                 * Let the virtual server select a real server for the
                 * incoming connection, and create a connection entry.
                 */
-               *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-               if (!*cpp && !ignored) {
-                       *verdict = ip_vs_leave(svc, skb, pp);
+               *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+               if (!*cpp && ignored <= 0) {
+                       if (!ignored)
+                               *verdict = ip_vs_leave(svc, skb, pd);
+                       else {
+                               ip_vs_service_put(svc);
+                               *verdict = NF_DROP;
+                       }
                        return 0;
                }
                ip_vs_service_put(svc);
        }
+       /* NF_ACCEPT */
        return 1;
 }
 
@@ -338,19 +346,6 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
        return 1;
 }
 
-
-/*
- *     Note: the caller guarantees that only one of register_app,
- *     unregister_app or app_conn_bind is called each time.
- */
-
-#define        UDP_APP_TAB_BITS        4
-#define        UDP_APP_TAB_SIZE        (1 << UDP_APP_TAB_BITS)
-#define        UDP_APP_TAB_MASK        (UDP_APP_TAB_SIZE - 1)
-
-static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(udp_app_lock);
-
 static inline __u16 udp_app_hashkey(__be16 port)
 {
        return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
@@ -358,44 +353,50 @@ static inline __u16 udp_app_hashkey(__be16 port)
 }
 
 
-static int udp_register_app(struct ip_vs_app *inc)
+static int udp_register_app(struct net *net, struct ip_vs_app *inc)
 {
        struct ip_vs_app *i;
        __u16 hash;
        __be16 port = inc->port;
        int ret = 0;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
 
        hash = udp_app_hashkey(port);
 
 
-       spin_lock_bh(&udp_app_lock);
-       list_for_each_entry(i, &udp_apps[hash], p_list) {
+       spin_lock_bh(&ipvs->udp_app_lock);
+       list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
                if (i->port == port) {
                        ret = -EEXIST;
                        goto out;
                }
        }
-       list_add(&inc->p_list, &udp_apps[hash]);
-       atomic_inc(&ip_vs_protocol_udp.appcnt);
+       list_add(&inc->p_list, &ipvs->udp_apps[hash]);
+       atomic_inc(&pd->appcnt);
 
   out:
-       spin_unlock_bh(&udp_app_lock);
+       spin_unlock_bh(&ipvs->udp_app_lock);
        return ret;
 }
 
 
 static void
-udp_unregister_app(struct ip_vs_app *inc)
+udp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-       spin_lock_bh(&udp_app_lock);
-       atomic_dec(&ip_vs_protocol_udp.appcnt);
+       struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       spin_lock_bh(&ipvs->udp_app_lock);
+       atomic_dec(&pd->appcnt);
        list_del(&inc->p_list);
-       spin_unlock_bh(&udp_app_lock);
+       spin_unlock_bh(&ipvs->udp_app_lock);
 }
 
 
 static int udp_app_conn_bind(struct ip_vs_conn *cp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
        int hash;
        struct ip_vs_app *inc;
        int result = 0;
@@ -407,12 +408,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
        /* Lookup application incarnations and bind the right one */
        hash = udp_app_hashkey(cp->vport);
 
-       spin_lock(&udp_app_lock);
-       list_for_each_entry(inc, &udp_apps[hash], p_list) {
+       spin_lock(&ipvs->udp_app_lock);
+       list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
                if (inc->port == cp->vport) {
                        if (unlikely(!ip_vs_app_inc_get(inc)))
                                break;
-                       spin_unlock(&udp_app_lock);
+                       spin_unlock(&ipvs->udp_app_lock);
 
                        IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
                                      "%s:%u to app %s on port %u\n",
@@ -429,14 +430,14 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
                        goto out;
                }
        }
-       spin_unlock(&udp_app_lock);
+       spin_unlock(&ipvs->udp_app_lock);
 
   out:
        return result;
 }
 
 
-static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
+static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
        [IP_VS_UDP_S_NORMAL]            =       5*60*HZ,
        [IP_VS_UDP_S_LAST]              =       2*HZ,
 };
@@ -446,14 +447,6 @@ static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
        [IP_VS_UDP_S_LAST]              =       "BUG!",
 };
 
-
-static int
-udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-       return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
-                                      udp_state_name_table, sname, to);
-}
-
 static const char * udp_state_name(int state)
 {
        if (state >= IP_VS_UDP_S_LAST)
@@ -464,20 +457,30 @@ static const char * udp_state_name(int state)
 static int
 udp_state_transition(struct ip_vs_conn *cp, int direction,
                     const struct sk_buff *skb,
-                    struct ip_vs_protocol *pp)
+                    struct ip_vs_proto_data *pd)
 {
-       cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
+       if (unlikely(!pd)) {
+               pr_err("UDP no ns data\n");
+               return 0;
+       }
+
+       cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
        return 1;
 }
 
-static void udp_init(struct ip_vs_protocol *pp)
+static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-       IP_VS_INIT_HASH_TABLE(udp_apps);
-       pp->timeout_table = udp_timeouts;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
+       spin_lock_init(&ipvs->udp_app_lock);
+       pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
+                                                       sizeof(udp_timeouts));
 }
 
-static void udp_exit(struct ip_vs_protocol *pp)
+static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
+       kfree(pd->timeout_table);
 }
 
 
@@ -486,8 +489,10 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
        .protocol =             IPPROTO_UDP,
        .num_states =           IP_VS_UDP_S_LAST,
        .dont_defrag =          0,
-       .init =                 udp_init,
-       .exit =                 udp_exit,
+       .init =                 NULL,
+       .exit =                 NULL,
+       .init_netns =           __udp_init,
+       .exit_netns =           __udp_exit,
        .conn_schedule =        udp_conn_schedule,
        .conn_in_get =          ip_vs_conn_in_get_proto,
        .conn_out_get =         ip_vs_conn_out_get_proto,
@@ -501,5 +506,4 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
        .app_conn_bind =        udp_app_conn_bind,
        .debug_packet =         ip_vs_tcpudp_debug_packet,
        .timeout_change =       NULL,
-       .set_state_timeout =    udp_set_state_timeout,
 };
index e210f37d8ea29edca08996db3920d6ec363d234c..c49b388d1085238ce435fec81fa8f97da6a8ee93 100644 (file)
@@ -72,7 +72,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                q = q->next;
        } while (q != p);
        write_unlock(&svc->sched_lock);
-       IP_VS_ERR_RL("RR: no destination available\n");
+       ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
   out:
index 076ebe00435deef930f428fbb02414c550c14b98..08dbdd5bc18fc5dc9f23562ac86a66e20dafb897 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <net/ip_vs.h>
 
+EXPORT_SYMBOL(ip_vs_scheduler_err);
 /*
  *  IPVS scheduler list
  */
@@ -146,6 +147,30 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
                module_put(scheduler->module);
 }
 
+/*
+ * Common error output helper for schedulers
+ */
+
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
+{
+       if (svc->fwmark) {
+               IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
+                            svc->scheduler->name, svc->fwmark,
+                            svc->fwmark, msg);
+#ifdef CONFIG_IP_VS_IPV6
+       } else if (svc->af == AF_INET6) {
+               IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
+                            svc->scheduler->name,
+                            ip_vs_proto_name(svc->protocol),
+                            &svc->addr.in6, ntohs(svc->port), msg);
+#endif
+       } else {
+               IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
+                            svc->scheduler->name,
+                            ip_vs_proto_name(svc->protocol),
+                            &svc->addr.ip, ntohs(svc->port), msg);
+       }
+}
 
 /*
  *  Register a scheduler in the scheduler list
index 1ab75a9dc400d5c9a45bf5a02ff6e95949292cff..89ead246ed3d250aa24bc2e195f4901f7b73b1d3 100644 (file)
@@ -87,7 +87,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                        goto nextstage;
                }
        }
-       IP_VS_ERR_RL("SED: no destination available\n");
+       ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
        /*
index e6cc174fbc06d4410a54c1e09021811e5773e81c..b5e2556c581ad4c7ddabad5807d6af2d50add98f 100644 (file)
@@ -223,7 +223,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
            || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
            || atomic_read(&dest->weight) <= 0
            || is_overloaded(dest)) {
-               IP_VS_ERR_RL("SH: no destination available\n");
+               ip_vs_scheduler_err(svc, "no destination available");
                return NULL;
        }
 
index ab85aedea17eea6100eb1aefe48b028d371f29d2..fecf24de4af32a5b39db3a7f58edc6c4c9b81e08 100644 (file)
@@ -5,6 +5,18 @@
  *              high-performance and highly available server based on a
  *              cluster of servers.
  *
+ * Version 1,   is capable of handling both version 0 and 1 messages.
+ *              Version 0 is the plain old format.
+ *              Note Version 0 receivers will just drop Ver 1 messages.
+ *              Version 1 is capable of handle IPv6, Persistence data,
+ *              time-outs, and firewall marks.
+ *              In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
+ *              Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
+ *
+ * Definitions  Message: is a complete datagram
+ *              Sync_conn: is a part of a Message
+ *              Param Data is an option to a Sync_conn.
+ *
  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
  *
  * ip_vs_sync:  sync connection info from master load balancer to backups
@@ -15,6 +27,8 @@
  *     Alexandre Cassen        :       Added SyncID support for incoming sync
  *                                     messages filtering.
  *     Justin Ossevoort        :       Fix endian problem on sync message size.
+ *     Hans Schillstrom        :       Added Version 1: i.e. IPv6,
+ *                                     Persistence support, fwmark and time-out.
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -35,6 +49,8 @@
 #include <linux/wait.h>
 #include <linux/kernel.h>
 
+#include <asm/unaligned.h>             /* Used for ntoh_seq and hton_seq */
+
 #include <net/ip.h>
 #include <net/sock.h>
 
 #define IP_VS_SYNC_GROUP 0xe0000051    /* multicast addr - 224.0.0.81 */
 #define IP_VS_SYNC_PORT  8848          /* multicast port */
 
+#define SYNC_PROTO_VER  1              /* Protocol version in header */
 
 /*
  *     IPVS sync connection entry
+ *     Version 0, i.e. original version.
  */
-struct ip_vs_sync_conn {
+struct ip_vs_sync_conn_v0 {
        __u8                    reserved;
 
        /* Protocol, addresses and port numbers */
@@ -71,41 +89,159 @@ struct ip_vs_sync_conn_options {
        struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 };
 
+/*
+     Sync Connection format (sync_conn)
+
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |    Type       |    Protocol   | Ver.  |        Size           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             Flags                             |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |            State              |         cport                 |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |            vport              |         dport                 |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             fwmark                            |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             timeout  (in sec.)                |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                              ...                              |
+      |                        IP-Addresses  (v4 or v6)               |
+      |                              ...                              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  Optional Parameters.
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      | Param. Type    | Param. Length |   Param. data                |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                               |
+      |                              ...                              |
+      |                               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                               | Param Type    | Param. Length |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                           Param  data                         |
+      |         Last Param data should be padded for 32 bit alignment |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ *  Type 0, IPv4 sync connection format
+ */
+struct ip_vs_sync_v4 {
+       __u8                    type;
+       __u8                    protocol;       /* Which protocol (TCP/UDP) */
+       __be16                  ver_size;       /* Version msb 4 bits */
+       /* Flags and state transition */
+       __be32                  flags;          /* status flags */
+       __be16                  state;          /* state info   */
+       /* Protocol, addresses and port numbers */
+       __be16                  cport;
+       __be16                  vport;
+       __be16                  dport;
+       __be32                  fwmark;         /* Firewall mark from skb */
+       __be32                  timeout;        /* cp timeout */
+       __be32                  caddr;          /* client address */
+       __be32                  vaddr;          /* virtual address */
+       __be32                  daddr;          /* destination address */
+       /* The sequence options start here */
+       /* PE data padded to 32bit alignment after seq. options */
+};
+/*
+ * Type 2 messages IPv6
+ */
+struct ip_vs_sync_v6 {
+       __u8                    type;
+       __u8                    protocol;       /* Which protocol (TCP/UDP) */
+       __be16                  ver_size;       /* Version msb 4 bits */
+       /* Flags and state transition */
+       __be32                  flags;          /* status flags */
+       __be16                  state;          /* state info   */
+       /* Protocol, addresses and port numbers */
+       __be16                  cport;
+       __be16                  vport;
+       __be16                  dport;
+       __be32                  fwmark;         /* Firewall mark from skb */
+       __be32                  timeout;        /* cp timeout */
+       struct in6_addr         caddr;          /* client address */
+       struct in6_addr         vaddr;          /* virtual address */
+       struct in6_addr         daddr;          /* destination address */
+       /* The sequence options start here */
+       /* PE data padded to 32bit alignment after seq. options */
+};
+
+union ip_vs_sync_conn {
+       struct ip_vs_sync_v4    v4;
+       struct ip_vs_sync_v6    v6;
+};
+
+/* Bits in Type field in above */
+#define STYPE_INET6            0
+#define STYPE_F_INET6          (1 << STYPE_INET6)
+
+#define SVER_SHIFT             12              /* Shift to get version */
+#define SVER_MASK              0x0fff          /* Mask to strip version */
+
+#define IPVS_OPT_SEQ_DATA      1
+#define IPVS_OPT_PE_DATA       2
+#define IPVS_OPT_PE_NAME       3
+#define IPVS_OPT_PARAM         7
+
+#define IPVS_OPT_F_SEQ_DATA    (1 << (IPVS_OPT_SEQ_DATA-1))
+#define IPVS_OPT_F_PE_DATA     (1 << (IPVS_OPT_PE_DATA-1))
+#define IPVS_OPT_F_PE_NAME     (1 << (IPVS_OPT_PE_NAME-1))
+#define IPVS_OPT_F_PARAM       (1 << (IPVS_OPT_PARAM-1))
+
 struct ip_vs_sync_thread_data {
+       struct net *net;
        struct socket *sock;
        char *buf;
 };
 
-#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
+/* Version 0 definition of packet sizes */
+#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn_v0))
 #define FULL_CONN_SIZE  \
-(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
+(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
 
 
 /*
-  The master mulitcasts messages to the backup load balancers in the
-  following format.
+  The master mulitcasts messages (Datagrams) to the backup load balancers
+  in the following format.
+
+ Version 1:
+  Note, first byte should be Zero, so ver 0 receivers will drop the packet.
 
        0                   1                   2                   3
        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-      |  Count Conns  |    SyncID     |            Size               |
+      |      0        |    SyncID     |            Size               |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |  Count Conns  |    Version    |    Reserved, set to Zero      |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                                                               |
       |                    IPVS Sync Connection (1)                   |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                            .                                  |
-      |                            .                                  |
+      ~                            .                                  ~
       |                            .                                  |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                                                               |
       |                    IPVS Sync Connection (n)                   |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Version 0 Header
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |  Count Conns  |    SyncID     |            Size               |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                    IPVS Sync Connection (1)                   |
 */
 
 #define SYNC_MESG_HEADER_LEN   4
 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
 
-struct ip_vs_sync_mesg {
+/* Version 0 header */
+struct ip_vs_sync_mesg_v0 {
        __u8                    nr_conns;
        __u8                    syncid;
        __u16                   size;
@@ -113,9 +249,16 @@ struct ip_vs_sync_mesg {
        /* ip_vs_sync_conn entries start here */
 };
 
-/* the maximum length of sync (sending/receiving) message */
-static int sync_send_mesg_maxlen;
-static int sync_recv_mesg_maxlen;
+/* Version 1 header */
+struct ip_vs_sync_mesg {
+       __u8                    reserved;       /* must be zero */
+       __u8                    syncid;
+       __u16                   size;
+       __u8                    nr_conns;
+       __s8                    version;        /* SYNC_PROTO_VER  */
+       __u16                   spare;
+       /* ip_vs_sync_conn entries start here */
+};
 
 struct ip_vs_sync_buff {
        struct list_head        list;
@@ -127,28 +270,6 @@ struct ip_vs_sync_buff {
        unsigned char           *end;
 };
 
-
-/* the sync_buff list head and the lock */
-static LIST_HEAD(ip_vs_sync_queue);
-static DEFINE_SPINLOCK(ip_vs_sync_lock);
-
-/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff   *curr_sb = NULL;
-static DEFINE_SPINLOCK(curr_sb_lock);
-
-/* ipvs sync daemon state */
-volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
-
-/* multicast interface name */
-char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-
-/* sync daemon tasks */
-static struct task_struct *sync_master_thread;
-static struct task_struct *sync_backup_thread;
-
 /* multicast addr */
 static struct sockaddr_in mcast_addr = {
        .sin_family             = AF_INET,
@@ -156,41 +277,71 @@ static struct sockaddr_in mcast_addr = {
        .sin_addr.s_addr        = cpu_to_be32(IP_VS_SYNC_GROUP),
 };
 
+/*
+ * Copy of struct ip_vs_seq
+ * From unaligned network order to aligned host order
+ */
+static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
+{
+       ho->init_seq       = get_unaligned_be32(&no->init_seq);
+       ho->delta          = get_unaligned_be32(&no->delta);
+       ho->previous_delta = get_unaligned_be32(&no->previous_delta);
+}
+
+/*
+ * Copy of struct ip_vs_seq
+ * From Aligned host order to unaligned network order
+ */
+static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
+{
+       put_unaligned_be32(ho->init_seq, &no->init_seq);
+       put_unaligned_be32(ho->delta, &no->delta);
+       put_unaligned_be32(ho->previous_delta, &no->previous_delta);
+}
 
-static inline struct ip_vs_sync_buff *sb_dequeue(void)
+static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
 {
        struct ip_vs_sync_buff *sb;
 
-       spin_lock_bh(&ip_vs_sync_lock);
-       if (list_empty(&ip_vs_sync_queue)) {
+       spin_lock_bh(&ipvs->sync_lock);
+       if (list_empty(&ipvs->sync_queue)) {
                sb = NULL;
        } else {
-               sb = list_entry(ip_vs_sync_queue.next,
+               sb = list_entry(ipvs->sync_queue.next,
                                struct ip_vs_sync_buff,
                                list);
                list_del(&sb->list);
        }
-       spin_unlock_bh(&ip_vs_sync_lock);
+       spin_unlock_bh(&ipvs->sync_lock);
 
        return sb;
 }
 
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+/*
+ * Create a new sync buffer for Version 1 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
 {
        struct ip_vs_sync_buff *sb;
 
        if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
                return NULL;
 
-       if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+       sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+       if (!sb->mesg) {
                kfree(sb);
                return NULL;
        }
+       sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zeo now */
+       sb->mesg->version = SYNC_PROTO_VER;
+       sb->mesg->syncid = ipvs->master_syncid;
+       sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
        sb->mesg->nr_conns = 0;
-       sb->mesg->syncid = ip_vs_master_syncid;
-       sb->mesg->size = 4;
-       sb->head = (unsigned char *)sb->mesg + 4;
-       sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
+       sb->mesg->spare = 0;
+       sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
+       sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+
        sb->firstuse = jiffies;
        return sb;
 }
@@ -201,14 +352,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
        kfree(sb);
 }
 
-static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs)
 {
-       spin_lock(&ip_vs_sync_lock);
-       if (ip_vs_sync_state & IP_VS_STATE_MASTER)
-               list_add_tail(&sb->list, &ip_vs_sync_queue);
+       struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+
+       spin_lock(&ipvs->sync_lock);
+       if (ipvs->sync_state & IP_VS_STATE_MASTER)
+               list_add_tail(&sb->list, &ipvs->sync_queue);
        else
                ip_vs_sync_buff_release(sb);
-       spin_unlock(&ip_vs_sync_lock);
+       spin_unlock(&ipvs->sync_lock);
 }
 
 /*
@@ -216,36 +369,101 @@ static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
  *     than the specified time or the specified time is zero.
  */
 static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
 {
        struct ip_vs_sync_buff *sb;
 
-       spin_lock_bh(&curr_sb_lock);
-       if (curr_sb && (time == 0 ||
-                       time_before(jiffies - curr_sb->firstuse, time))) {
-               sb = curr_sb;
-               curr_sb = NULL;
+       spin_lock_bh(&ipvs->sync_buff_lock);
+       if (ipvs->sync_buff &&
+           time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
+               sb = ipvs->sync_buff;
+               ipvs->sync_buff = NULL;
        } else
                sb = NULL;
-       spin_unlock_bh(&curr_sb_lock);
+       spin_unlock_bh(&ipvs->sync_buff_lock);
        return sb;
 }
 
+/*
+ * Switch mode from sending version 0 or 1
+ *  - must handle sync_buf
+ */
+void ip_vs_sync_switch_mode(struct net *net, int mode)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
+               return;
+       if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff)
+               return;
+
+       spin_lock_bh(&ipvs->sync_buff_lock);
+       /* Buffer empty ? then let buf_create do the job  */
+       if (ipvs->sync_buff->mesg->size <=  sizeof(struct ip_vs_sync_mesg)) {
+               kfree(ipvs->sync_buff);
+               ipvs->sync_buff = NULL;
+       } else {
+               spin_lock_bh(&ipvs->sync_lock);
+               if (ipvs->sync_state & IP_VS_STATE_MASTER)
+                       list_add_tail(&ipvs->sync_buff->list,
+                                     &ipvs->sync_queue);
+               else
+                       ip_vs_sync_buff_release(ipvs->sync_buff);
+               spin_unlock_bh(&ipvs->sync_lock);
+       }
+       spin_unlock_bh(&ipvs->sync_buff_lock);
+}
 
 /*
+ * Create a new sync buffer for Version 0 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+{
+       struct ip_vs_sync_buff *sb;
+       struct ip_vs_sync_mesg_v0 *mesg;
+
+       if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+               return NULL;
+
+       sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+       if (!sb->mesg) {
+               kfree(sb);
+               return NULL;
+       }
+       mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
+       mesg->nr_conns = 0;
+       mesg->syncid = ipvs->master_syncid;
+       mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
+       sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
+       sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+       sb->firstuse = jiffies;
+       return sb;
+}
+
+/*
+ *      Version 0 , could be switched in by sys_ctl.
  *      Add an ip_vs_conn information into the current sync_buff.
- *      Called by ip_vs_in.
  */
-void ip_vs_sync_conn(struct ip_vs_conn *cp)
+void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
 {
-       struct ip_vs_sync_mesg *m;
-       struct ip_vs_sync_conn *s;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_sync_mesg_v0 *m;
+       struct ip_vs_sync_conn_v0 *s;
        int len;
 
-       spin_lock(&curr_sb_lock);
-       if (!curr_sb) {
-               if (!(curr_sb=ip_vs_sync_buff_create())) {
-                       spin_unlock(&curr_sb_lock);
+       if (unlikely(cp->af != AF_INET))
+               return;
+       /* Do not sync ONE PACKET */
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               return;
+
+       spin_lock(&ipvs->sync_buff_lock);
+       if (!ipvs->sync_buff) {
+               ipvs->sync_buff =
+                       ip_vs_sync_buff_create_v0(ipvs);
+               if (!ipvs->sync_buff) {
+                       spin_unlock(&ipvs->sync_buff_lock);
                        pr_err("ip_vs_sync_buff_create failed.\n");
                        return;
                }
@@ -253,10 +471,11 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
 
        len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
                SIMPLE_CONN_SIZE;
-       m = curr_sb->mesg;
-       s = (struct ip_vs_sync_conn *)curr_sb->head;
+       m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
+       s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
 
        /* copy members */
+       s->reserved = 0;
        s->protocol = cp->protocol;
        s->cport = cp->cport;
        s->vport = cp->vport;
@@ -274,83 +493,366 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
 
        m->nr_conns++;
        m->size += len;
-       curr_sb->head += len;
+       ipvs->sync_buff->head += len;
 
        /* check if there is a space for next one */
-       if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
-               sb_queue_tail(curr_sb);
-               curr_sb = NULL;
+       if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
+               sb_queue_tail(ipvs);
+               ipvs->sync_buff = NULL;
        }
-       spin_unlock(&curr_sb_lock);
+       spin_unlock(&ipvs->sync_buff_lock);
 
        /* synchronize its controller if it has */
        if (cp->control)
-               ip_vs_sync_conn(cp->control);
+               ip_vs_sync_conn(net, cp->control);
+}
+
+/*
+ *      Add an ip_vs_conn information into the current sync_buff.
+ *      Called by ip_vs_in.
+ *      Sending Version 1 messages
+ */
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_sync_mesg *m;
+       union ip_vs_sync_conn *s;
+       __u8 *p;
+       unsigned int len, pe_name_len, pad;
+
+       /* Handle old version of the protocol */
+       if (ipvs->sysctl_sync_ver == 0) {
+               ip_vs_sync_conn_v0(net, cp);
+               return;
+       }
+       /* Do not sync ONE PACKET */
+       if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+               goto control;
+sloop:
+       /* Sanity checks */
+       pe_name_len = 0;
+       if (cp->pe_data_len) {
+               if (!cp->pe_data || !cp->dest) {
+                       IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
+                       return;
+               }
+               pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
+       }
+
+       spin_lock(&ipvs->sync_buff_lock);
+
+#ifdef CONFIG_IP_VS_IPV6
+       if (cp->af == AF_INET6)
+               len = sizeof(struct ip_vs_sync_v6);
+       else
+#endif
+               len = sizeof(struct ip_vs_sync_v4);
+
+       if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
+               len += sizeof(struct ip_vs_sync_conn_options) + 2;
+
+       if (cp->pe_data_len)
+               len += cp->pe_data_len + 2;     /* + Param hdr field */
+       if (pe_name_len)
+               len += pe_name_len + 2;
+
+       /* check if there is a space for this one  */
+       pad = 0;
+       if (ipvs->sync_buff) {
+               pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
+               if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
+                       sb_queue_tail(ipvs);
+                       ipvs->sync_buff = NULL;
+                       pad = 0;
+               }
+       }
+
+       if (!ipvs->sync_buff) {
+               ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
+               if (!ipvs->sync_buff) {
+                       spin_unlock(&ipvs->sync_buff_lock);
+                       pr_err("ip_vs_sync_buff_create failed.\n");
+                       return;
+               }
+       }
+
+       m = ipvs->sync_buff->mesg;
+       p = ipvs->sync_buff->head;
+       ipvs->sync_buff->head += pad + len;
+       m->size += pad + len;
+       /* Add ev. padding from prev. sync_conn */
+       while (pad--)
+               *(p++) = 0;
+
+       s = (union ip_vs_sync_conn *)p;
+
+       /* Set message type  & copy members */
+       s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
+       s->v4.ver_size = htons(len & SVER_MASK);        /* Version 0 */
+       s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
+       s->v4.state = htons(cp->state);
+       s->v4.protocol = cp->protocol;
+       s->v4.cport = cp->cport;
+       s->v4.vport = cp->vport;
+       s->v4.dport = cp->dport;
+       s->v4.fwmark = htonl(cp->fwmark);
+       s->v4.timeout = htonl(cp->timeout / HZ);
+       m->nr_conns++;
+
+#ifdef CONFIG_IP_VS_IPV6
+       if (cp->af == AF_INET6) {
+               p += sizeof(struct ip_vs_sync_v6);
+               ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
+               ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
+               ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+       } else
+#endif
+       {
+               p += sizeof(struct ip_vs_sync_v4);      /* options ptr */
+               s->v4.caddr = cp->caddr.ip;
+               s->v4.vaddr = cp->vaddr.ip;
+               s->v4.daddr = cp->daddr.ip;
+       }
+       if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+               *(p++) = IPVS_OPT_SEQ_DATA;
+               *(p++) = sizeof(struct ip_vs_sync_conn_options);
+               hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
+               p += sizeof(struct ip_vs_seq);
+               hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
+               p += sizeof(struct ip_vs_seq);
+       }
+       /* Handle pe data */
+       if (cp->pe_data_len && cp->pe_data) {
+               *(p++) = IPVS_OPT_PE_DATA;
+               *(p++) = cp->pe_data_len;
+               memcpy(p, cp->pe_data, cp->pe_data_len);
+               p += cp->pe_data_len;
+               if (pe_name_len) {
+                       /* Add PE_NAME */
+                       *(p++) = IPVS_OPT_PE_NAME;
+                       *(p++) = pe_name_len;
+                       memcpy(p, cp->pe->name, pe_name_len);
+                       p += pe_name_len;
+               }
+       }
+
+       spin_unlock(&ipvs->sync_buff_lock);
+
+control:
+       /* synchronize its controller if it has */
+       cp = cp->control;
+       if (!cp)
+               return;
+       /*
+        * Reduce sync rate for templates
+        * i.e only increment in_pkts for Templates.
+        */
+       if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+               int pkts = atomic_add_return(1, &cp->in_pkts);
+
+               if (pkts % ipvs->sysctl_sync_threshold[1] != 1)
+                       return;
+       }
+       goto sloop;
 }
 
+/*
+ *  fill_param used by version 1
+ */
 static inline int
-ip_vs_conn_fill_param_sync(int af, int protocol,
-                          const union nf_inet_addr *caddr, __be16 cport,
-                          const union nf_inet_addr *vaddr, __be16 vport,
-                          struct ip_vs_conn_param *p)
+ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+                          struct ip_vs_conn_param *p,
+                          __u8 *pe_data, unsigned int pe_data_len,
+                          __u8 *pe_name, unsigned int pe_name_len)
 {
-       /* XXX: Need to take into account persistence engine */
-       ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p);
+#ifdef CONFIG_IP_VS_IPV6
+       if (af == AF_INET6)
+               ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+                                     (const union nf_inet_addr *)&sc->v6.caddr,
+                                     sc->v6.cport,
+                                     (const union nf_inet_addr *)&sc->v6.vaddr,
+                                     sc->v6.vport, p);
+       else
+#endif
+               ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+                                     (const union nf_inet_addr *)&sc->v4.caddr,
+                                     sc->v4.cport,
+                                     (const union nf_inet_addr *)&sc->v4.vaddr,
+                                     sc->v4.vport, p);
+       /* Handle pe data */
+       if (pe_data_len) {
+               if (pe_name_len) {
+                       char buff[IP_VS_PENAME_MAXLEN+1];
+
+                       memcpy(buff, pe_name, pe_name_len);
+                       buff[pe_name_len]=0;
+                       p->pe = __ip_vs_pe_getbyname(buff);
+                       if (!p->pe) {
+                               IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
+                                            buff);
+                               return 1;
+                       }
+               } else {
+                       IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
+                       return 1;
+               }
+
+               p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
+               if (!p->pe_data) {
+                       if (p->pe->module)
+                               module_put(p->pe->module);
+                       return -ENOMEM;
+               }
+               memcpy(p->pe_data, pe_data, pe_data_len);
+               p->pe_data_len = pe_data_len;
+       }
        return 0;
 }
 
 /*
- *      Process received multicast message and create the corresponding
- *      ip_vs_conn entries.
+ *  Connection Add / Update.
+ *  Common for version 0 and 1 reception of backup sync_conns.
+ *  Param: ...
+ *         timeout is in sec.
  */
-static void ip_vs_process_message(const char *buffer, const size_t buflen)
+static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+                           unsigned int flags, unsigned int state,
+                           unsigned int protocol, unsigned int type,
+                           const union nf_inet_addr *daddr, __be16 dport,
+                           unsigned long timeout, __u32 fwmark,
+                           struct ip_vs_sync_conn_options *opt)
 {
-       struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
-       struct ip_vs_sync_conn *s;
-       struct ip_vs_sync_conn_options *opt;
-       struct ip_vs_conn *cp;
-       struct ip_vs_protocol *pp;
        struct ip_vs_dest *dest;
-       struct ip_vs_conn_param param;
-       char *p;
-       int i;
+       struct ip_vs_conn *cp;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       if (buflen < sizeof(struct ip_vs_sync_mesg)) {
-               IP_VS_ERR_RL("sync message header too short\n");
-               return;
-       }
+       if (!(flags & IP_VS_CONN_F_TEMPLATE))
+               cp = ip_vs_conn_in_get(param);
+       else
+               cp = ip_vs_ct_in_get(param);
 
-       /* Convert size back to host byte order */
-       m->size = ntohs(m->size);
+       if (cp && param->pe_data)       /* Free pe_data */
+               kfree(param->pe_data);
+       if (!cp) {
+               /*
+                * Find the appropriate destination for the connection.
+                * If it is not found the connection will remain unbound
+                * but still handled.
+                */
+               dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
+                                      param->vport, protocol, fwmark);
 
-       if (buflen != m->size) {
-               IP_VS_ERR_RL("bogus sync message size\n");
-               return;
+               /*  Set the approprite ativity flag */
+               if (protocol == IPPROTO_TCP) {
+                       if (state != IP_VS_TCP_S_ESTABLISHED)
+                               flags |= IP_VS_CONN_F_INACTIVE;
+                       else
+                               flags &= ~IP_VS_CONN_F_INACTIVE;
+               } else if (protocol == IPPROTO_SCTP) {
+                       if (state != IP_VS_SCTP_S_ESTABLISHED)
+                               flags |= IP_VS_CONN_F_INACTIVE;
+                       else
+                               flags &= ~IP_VS_CONN_F_INACTIVE;
+               }
+               cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+               if (dest)
+                       atomic_dec(&dest->refcnt);
+               if (!cp) {
+                       if (param->pe_data)
+                               kfree(param->pe_data);
+                       IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+                       return;
+               }
+       } else if (!cp->dest) {
+               dest = ip_vs_try_bind_dest(cp);
+               if (dest)
+                       atomic_dec(&dest->refcnt);
+       } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
+               (cp->state != state)) {
+               /* update active/inactive flag for the connection */
+               dest = cp->dest;
+               if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+                       (state != IP_VS_TCP_S_ESTABLISHED)) {
+                       atomic_dec(&dest->activeconns);
+                       atomic_inc(&dest->inactconns);
+                       cp->flags |= IP_VS_CONN_F_INACTIVE;
+               } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+                       (state == IP_VS_TCP_S_ESTABLISHED)) {
+                       atomic_inc(&dest->activeconns);
+                       atomic_dec(&dest->inactconns);
+                       cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+               }
+       } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+               (cp->state != state)) {
+               dest = cp->dest;
+               if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+               (state != IP_VS_SCTP_S_ESTABLISHED)) {
+                       atomic_dec(&dest->activeconns);
+                       atomic_inc(&dest->inactconns);
+                       cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+               }
        }
 
-       /* SyncID sanity check */
-       if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
-               IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
-                         m->syncid);
-               return;
+       if (opt)
+               memcpy(&cp->in_seq, opt, sizeof(*opt));
+       atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]);
+       cp->state = state;
+       cp->old_state = cp->state;
+       /*
+        * For Ver 0 messages style
+        *  - Not possible to recover the right timeout for templates
+        *  - can not find the right fwmark
+        *    virtual service. If needed, we can do it for
+        *    non-fwmark persistent services.
+        * Ver 1 messages style.
+        *  - No problem.
+        */
+       if (timeout) {
+               if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
+                       timeout = MAX_SCHEDULE_TIMEOUT / HZ;
+               cp->timeout = timeout*HZ;
+       } else {
+               struct ip_vs_proto_data *pd;
+
+               pd = ip_vs_proto_data_get(net, protocol);
+               if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
+                       cp->timeout = pd->timeout_table[state];
+               else
+                       cp->timeout = (3*60*HZ);
        }
+       ip_vs_conn_put(cp);
+}
 
-       p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
+/*
+ *  Process received multicast message for Version 0
+ */
+static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+                                    const size_t buflen)
+{
+       struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
+       struct ip_vs_sync_conn_v0 *s;
+       struct ip_vs_sync_conn_options *opt;
+       struct ip_vs_protocol *pp;
+       struct ip_vs_conn_param param;
+       char *p;
+       int i;
+
+       p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
        for (i=0; i<m->nr_conns; i++) {
                unsigned flags, state;
 
                if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
-                       IP_VS_ERR_RL("bogus conn in sync message\n");
+                       IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
                        return;
                }
-               s = (struct ip_vs_sync_conn *) p;
+               s = (struct ip_vs_sync_conn_v0 *) p;
                flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
                flags &= ~IP_VS_CONN_F_HASHED;
                if (flags & IP_VS_CONN_F_SEQ_MASK) {
                        opt = (struct ip_vs_sync_conn_options *)&s[1];
                        p += FULL_CONN_SIZE;
                        if (p > buffer+buflen) {
-                               IP_VS_ERR_RL("bogus conn options in sync message\n");
+                               IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
                                return;
                        }
                } else {
@@ -362,118 +864,286 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
                if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
                        pp = ip_vs_proto_get(s->protocol);
                        if (!pp) {
-                               IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+                               IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
                                        s->protocol);
                                continue;
                        }
                        if (state >= pp->num_states) {
-                               IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+                               IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
                                        pp->name, state);
                                continue;
                        }
                } else {
                        /* protocol in templates is not used for state/timeout */
-                       pp = NULL;
                        if (state > 0) {
-                               IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+                               IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
                                        state);
                                state = 0;
                        }
                }
 
-               {
-                       if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol,
-                                             (union nf_inet_addr *)&s->caddr,
-                                             s->cport,
-                                             (union nf_inet_addr *)&s->vaddr,
-                                             s->vport, &param)) {
-                               pr_err("ip_vs_conn_fill_param_sync failed");
-                               return;
+               ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+                                     (const union nf_inet_addr *)&s->caddr,
+                                     s->cport,
+                                     (const union nf_inet_addr *)&s->vaddr,
+                                     s->vport, &param);
+
+               /* Send timeout as Zero */
+               ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+                               (union nf_inet_addr *)&s->daddr, s->dport,
+                               0, 0, opt);
+       }
+}
+
+/*
+ * Handle options
+ */
+static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
+                                   __u32 *opt_flags,
+                                   struct ip_vs_sync_conn_options *opt)
+{
+       struct ip_vs_sync_conn_options *topt;
+
+       topt = (struct ip_vs_sync_conn_options *)p;
+
+       if (plen != sizeof(struct ip_vs_sync_conn_options)) {
+               IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
+               return -EINVAL;
+       }
+       if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
+               IP_VS_DBG(2, "BACKUP, conn options found twice\n");
+               return -EINVAL;
+       }
+       ntoh_seq(&topt->in_seq, &opt->in_seq);
+       ntoh_seq(&topt->out_seq, &opt->out_seq);
+       *opt_flags |= IPVS_OPT_F_SEQ_DATA;
+       return 0;
+}
+
+static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
+                         __u8 **data, unsigned int maxlen,
+                         __u32 *opt_flags, __u32 flag)
+{
+       if (plen > maxlen) {
+               IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
+               return -EINVAL;
+       }
+       if (*opt_flags & flag) {
+               IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
+               return -EINVAL;
+       }
+       *data_len = plen;
+       *data = p;
+       *opt_flags |= flag;
+       return 0;
+}
+/*
+ *   Process a Version 1 sync. connection
+ */
+static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+{
+       struct ip_vs_sync_conn_options opt;
+       union  ip_vs_sync_conn *s;
+       struct ip_vs_protocol *pp;
+       struct ip_vs_conn_param param;
+       __u32 flags;
+       unsigned int af, state, pe_data_len=0, pe_name_len=0;
+       __u8 *pe_data=NULL, *pe_name=NULL;
+       __u32 opt_flags=0;
+       int retc=0;
+
+       s = (union ip_vs_sync_conn *) p;
+
+       if (s->v6.type & STYPE_F_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+               af = AF_INET6;
+               p += sizeof(struct ip_vs_sync_v6);
+#else
+               IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
+               retc = 10;
+               goto out;
+#endif
+       } else if (!s->v4.type) {
+               af = AF_INET;
+               p += sizeof(struct ip_vs_sync_v4);
+       } else {
+               return -10;
+       }
+       if (p > msg_end)
+               return -20;
+
+       /* Process optional params check Type & Len. */
+       while (p < msg_end) {
+               int ptype;
+               int plen;
+
+               if (p+2 > msg_end)
+                       return -30;
+               ptype = *(p++);
+               plen  = *(p++);
+
+               if (!plen || ((p + plen) > msg_end))
+                       return -40;
+               /* Handle seq option  p = param data */
+               switch (ptype & ~IPVS_OPT_F_PARAM) {
+               case IPVS_OPT_SEQ_DATA:
+                       if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
+                               return -50;
+                       break;
+
+               case IPVS_OPT_PE_DATA:
+                       if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
+                                          IP_VS_PEDATA_MAXLEN, &opt_flags,
+                                          IPVS_OPT_F_PE_DATA))
+                               return -60;
+                       break;
+
+               case IPVS_OPT_PE_NAME:
+                       if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
+                                          IP_VS_PENAME_MAXLEN, &opt_flags,
+                                          IPVS_OPT_F_PE_NAME))
+                               return -70;
+                       break;
+
+               default:
+                       /* Param data mandatory ? */
+                       if (!(ptype & IPVS_OPT_F_PARAM)) {
+                               IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
+                                         ptype & ~IPVS_OPT_F_PARAM);
+                               retc = 20;
+                               goto out;
                        }
-                       if (!(flags & IP_VS_CONN_F_TEMPLATE))
-                               cp = ip_vs_conn_in_get(&param);
-                       else
-                               cp = ip_vs_ct_in_get(&param);
                }
-               if (!cp) {
-                       /*
-                        * Find the appropriate destination for the connection.
-                        * If it is not found the connection will remain unbound
-                        * but still handled.
-                        */
-                       dest = ip_vs_find_dest(AF_INET,
-                                              (union nf_inet_addr *)&s->daddr,
-                                              s->dport,
-                                              (union nf_inet_addr *)&s->vaddr,
-                                              s->vport,
-                                              s->protocol);
-                       /*  Set the approprite ativity flag */
-                       if (s->protocol == IPPROTO_TCP) {
-                               if (state != IP_VS_TCP_S_ESTABLISHED)
-                                       flags |= IP_VS_CONN_F_INACTIVE;
-                               else
-                                       flags &= ~IP_VS_CONN_F_INACTIVE;
-                       } else if (s->protocol == IPPROTO_SCTP) {
-                               if (state != IP_VS_SCTP_S_ESTABLISHED)
-                                       flags |= IP_VS_CONN_F_INACTIVE;
-                               else
-                                       flags &= ~IP_VS_CONN_F_INACTIVE;
+               p += plen;  /* Next option */
+       }
+
+       /* Get flags and Mask off unsupported */
+       flags  = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
+       flags |= IP_VS_CONN_F_SYNC;
+       state = ntohs(s->v4.state);
+
+       if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+               pp = ip_vs_proto_get(s->v4.protocol);
+               if (!pp) {
+                       IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
+                               s->v4.protocol);
+                       retc = 30;
+                       goto out;
+               }
+               if (state >= pp->num_states) {
+                       IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
+                               pp->name, state);
+                       retc = 40;
+                       goto out;
+               }
+       } else {
+               /* protocol in templates is not used for state/timeout */
+               if (state > 0) {
+                       IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
+                               state);
+                       state = 0;
+               }
+       }
+       if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+                                      pe_data_len, pe_name, pe_name_len)) {
+               retc = 50;
+               goto out;
+       }
+       /* If only IPv4, just silent skip IPv6 */
+       if (af == AF_INET)
+               ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+                               (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
+                               ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
+                               (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+                               );
+#ifdef CONFIG_IP_VS_IPV6
+       else
+               ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+                               (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
+                               ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
+                               (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+                               );
+#endif
+       return 0;
+       /* Error exit */
+out:
+       IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
+       return retc;
+
+}
+/*
+ *      Process received multicast message and create the corresponding
+ *      ip_vs_conn entries.
+ *      Handles Version 0 & 1
+ */
+static void ip_vs_process_message(struct net *net, __u8 *buffer,
+                                 const size_t buflen)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
+       __u8 *p, *msg_end;
+       int i, nr_conns;
+
+       if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
+               IP_VS_DBG(2, "BACKUP, message header too short\n");
+               return;
+       }
+       /* Convert size back to host byte order */
+       m2->size = ntohs(m2->size);
+
+       if (buflen != m2->size) {
+               IP_VS_DBG(2, "BACKUP, bogus message size\n");
+               return;
+       }
+       /* SyncID sanity check */
+       if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+               IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
+               return;
+       }
+       /* Handle version 1  message */
+       if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
+           && (m2->spare == 0)) {
+
+               msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
+               nr_conns = m2->nr_conns;
+
+               for (i=0; i<nr_conns; i++) {
+                       union ip_vs_sync_conn *s;
+                       unsigned size;
+                       int retc;
+
+                       p = msg_end;
+                       if (p + sizeof(s->v4) > buffer+buflen) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+                               return;
                        }
-                       cp = ip_vs_conn_new(&param,
-                                           (union nf_inet_addr *)&s->daddr,
-                                           s->dport, flags, dest);
-                       if (dest)
-                               atomic_dec(&dest->refcnt);
-                       if (!cp) {
-                               pr_err("ip_vs_conn_new failed\n");
+                       s = (union ip_vs_sync_conn *)p;
+                       size = ntohs(s->v4.ver_size) & SVER_MASK;
+                       msg_end = p + size;
+                       /* Basic sanity checks */
+                       if (msg_end  > buffer+buflen) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
                                return;
                        }
-               } else if (!cp->dest) {
-                       dest = ip_vs_try_bind_dest(cp);
-                       if (dest)
-                               atomic_dec(&dest->refcnt);
-               } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
-                          (cp->state != state)) {
-                       /* update active/inactive flag for the connection */
-                       dest = cp->dest;
-                       if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-                               (state != IP_VS_TCP_S_ESTABLISHED)) {
-                               atomic_dec(&dest->activeconns);
-                               atomic_inc(&dest->inactconns);
-                               cp->flags |= IP_VS_CONN_F_INACTIVE;
-                       } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
-                               (state == IP_VS_TCP_S_ESTABLISHED)) {
-                               atomic_inc(&dest->activeconns);
-                               atomic_dec(&dest->inactconns);
-                               cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+                       if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
+                                             ntohs(s->v4.ver_size) >> SVER_SHIFT);
+                               return;
                        }
-               } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
-                          (cp->state != state)) {
-                       dest = cp->dest;
-                       if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-                            (state != IP_VS_SCTP_S_ESTABLISHED)) {
-                           atomic_dec(&dest->activeconns);
-                           atomic_inc(&dest->inactconns);
-                           cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+                       /* Process a single sync_conn */
+                       retc = ip_vs_proc_sync_conn(net, p, msg_end);
+                       if (retc < 0) {
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
+                                            retc);
+                               return;
                        }
+                       /* Make sure we have 32 bit alignment */
+                       msg_end = p + ((size + 3) & ~3);
                }
-
-               if (opt)
-                       memcpy(&cp->in_seq, opt, sizeof(*opt));
-               atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
-               cp->state = state;
-               cp->old_state = cp->state;
-               /*
-                * We can not recover the right timeout for templates
-                * in all cases, we can not find the right fwmark
-                * virtual service. If needed, we can do it for
-                * non-fwmark persistent services.
-                */
-               if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
-                       cp->timeout = pp->timeout_table[state];
-               else
-                       cp->timeout = (3*60*HZ);
-               ip_vs_conn_put(cp);
+       } else {
+               /* Old type of message */
+               ip_vs_process_message_v0(net, buffer, buflen);
+               return;
        }
 }
 
@@ -511,8 +1181,10 @@ static int set_mcast_if(struct sock *sk, char *ifname)
 {
        struct net_device *dev;
        struct inet_sock *inet = inet_sk(sk);
+       struct net *net = sock_net(sk);
 
-       if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+       dev = __dev_get_by_name(net, ifname);
+       if (!dev)
                return -ENODEV;
 
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -531,30 +1203,33 @@ static int set_mcast_if(struct sock *sk, char *ifname)
  *     Set the maximum length of sync message according to the
  *     specified interface's MTU.
  */
-static int set_sync_mesg_maxlen(int sync_state)
+static int set_sync_mesg_maxlen(struct net *net, int sync_state)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct net_device *dev;
        int num;
 
        if (sync_state == IP_VS_STATE_MASTER) {
-               if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+               dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
+               if (!dev)
                        return -ENODEV;
 
                num = (dev->mtu - sizeof(struct iphdr) -
                       sizeof(struct udphdr) -
                       SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
-               sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
+               ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
                        SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
                IP_VS_DBG(7, "setting the maximum length of sync sending "
-                         "message %d.\n", sync_send_mesg_maxlen);
+                         "message %d.\n", ipvs->send_mesg_maxlen);
        } else if (sync_state == IP_VS_STATE_BACKUP) {
-               if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+               dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
+               if (!dev)
                        return -ENODEV;
 
-               sync_recv_mesg_maxlen = dev->mtu -
+               ipvs->recv_mesg_maxlen = dev->mtu -
                        sizeof(struct iphdr) - sizeof(struct udphdr);
                IP_VS_DBG(7, "setting the maximum length of sync receiving "
-                         "message %d.\n", sync_recv_mesg_maxlen);
+                         "message %d.\n", ipvs->recv_mesg_maxlen);
        }
 
        return 0;
@@ -569,6 +1244,7 @@ static int set_sync_mesg_maxlen(int sync_state)
 static int
 join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 {
+       struct net *net = sock_net(sk);
        struct ip_mreqn mreq;
        struct net_device *dev;
        int ret;
@@ -576,7 +1252,8 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
        memset(&mreq, 0, sizeof(mreq));
        memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
 
-       if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+       dev = __dev_get_by_name(net, ifname);
+       if (!dev)
                return -ENODEV;
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
                return -EINVAL;
@@ -593,11 +1270,13 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 
 static int bind_mcastif_addr(struct socket *sock, char *ifname)
 {
+       struct net *net = sock_net(sock->sk);
        struct net_device *dev;
        __be32 addr;
        struct sockaddr_in sin;
 
-       if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+       dev = __dev_get_by_name(net, ifname);
+       if (!dev)
                return -ENODEV;
 
        addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -619,19 +1298,20 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(struct net *net)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct socket *sock;
        int result;
 
        /* First create a socket */
-       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+       result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
        }
 
-       result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
+       result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
        if (result < 0) {
                pr_err("Error setting outbound mcast interface\n");
                goto error;
@@ -640,7 +1320,7 @@ static struct socket * make_send_sock(void)
        set_mcast_loop(sock->sk, 0);
        set_mcast_ttl(sock->sk, 1);
 
-       result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+       result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
        if (result < 0) {
                pr_err("Error binding address of the mcast interface\n");
                goto error;
@@ -664,13 +1344,14 @@ static struct socket * make_send_sock(void)
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(struct net *net)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct socket *sock;
        int result;
 
        /* First create a socket */
-       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+       result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
@@ -689,7 +1370,7 @@ static struct socket * make_receive_sock(void)
        /* join the multicast group */
        result = join_mcast_group(sock->sk,
                        (struct in_addr *) &mcast_addr.sin_addr,
-                       ip_vs_backup_mcast_ifn);
+                       ipvs->backup_mcast_ifn);
        if (result < 0) {
                pr_err("Error joining to the multicast group\n");
                goto error;
@@ -760,20 +1441,21 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
 static int sync_thread_master(void *data)
 {
        struct ip_vs_sync_thread_data *tinfo = data;
+       struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
        struct ip_vs_sync_buff *sb;
 
        pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
                "syncid = %d\n",
-               ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+               ipvs->master_mcast_ifn, ipvs->master_syncid);
 
        while (!kthread_should_stop()) {
-               while ((sb = sb_dequeue())) {
+               while ((sb = sb_dequeue(ipvs))) {
                        ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
                        ip_vs_sync_buff_release(sb);
                }
 
-               /* check if entries stay in curr_sb for 2 seconds */
-               sb = get_curr_sync_buff(2 * HZ);
+               /* check if entries stay in ipvs->sync_buff for 2 seconds */
+               sb = get_curr_sync_buff(ipvs, 2 * HZ);
                if (sb) {
                        ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
                        ip_vs_sync_buff_release(sb);
@@ -783,14 +1465,13 @@ static int sync_thread_master(void *data)
        }
 
        /* clean up the sync_buff queue */
-       while ((sb=sb_dequeue())) {
+       while ((sb = sb_dequeue(ipvs)))
                ip_vs_sync_buff_release(sb);
-       }
 
        /* clean up the current sync_buff */
-       if ((sb = get_curr_sync_buff(0))) {
+       sb = get_curr_sync_buff(ipvs, 0);
+       if (sb)
                ip_vs_sync_buff_release(sb);
-       }
 
        /* release the sending multicast socket */
        sock_release(tinfo->sock);
@@ -803,11 +1484,12 @@ static int sync_thread_master(void *data)
 static int sync_thread_backup(void *data)
 {
        struct ip_vs_sync_thread_data *tinfo = data;
+       struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
        int len;
 
        pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
                "syncid = %d\n",
-               ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+               ipvs->backup_mcast_ifn, ipvs->backup_syncid);
 
        while (!kthread_should_stop()) {
                wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -817,7 +1499,7 @@ static int sync_thread_backup(void *data)
                /* do we have data now? */
                while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
                        len = ip_vs_receive(tinfo->sock, tinfo->buf,
-                                       sync_recv_mesg_maxlen);
+                                       ipvs->recv_mesg_maxlen);
                        if (len <= 0) {
                                pr_err("receiving message error\n");
                                break;
@@ -826,7 +1508,7 @@ static int sync_thread_backup(void *data)
                        /* disable bottom half, because it accesses the data
                           shared by softirq while getting/creating conns */
                        local_bh_disable();
-                       ip_vs_process_message(tinfo->buf, len);
+                       ip_vs_process_message(tinfo->net, tinfo->buf, len);
                        local_bh_enable();
                }
        }
@@ -840,41 +1522,42 @@ static int sync_thread_backup(void *data)
 }
 
 
-int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
 {
        struct ip_vs_sync_thread_data *tinfo;
        struct task_struct **realtask, *task;
        struct socket *sock;
+       struct netns_ipvs *ipvs = net_ipvs(net);
        char *name, *buf = NULL;
        int (*threadfn)(void *data);
        int result = -ENOMEM;
 
        IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
-                 sizeof(struct ip_vs_sync_conn));
+                 sizeof(struct ip_vs_sync_conn_v0));
 
        if (state == IP_VS_STATE_MASTER) {
-               if (sync_master_thread)
+               if (ipvs->master_thread)
                        return -EEXIST;
 
-               strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
-                       sizeof(ip_vs_master_mcast_ifn));
-               ip_vs_master_syncid = syncid;
-               realtask = &sync_master_thread;
-               name = "ipvs_syncmaster";
+               strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
+                       sizeof(ipvs->master_mcast_ifn));
+               ipvs->master_syncid = syncid;
+               realtask = &ipvs->master_thread;
+               name = "ipvs_master:%d";
                threadfn = sync_thread_master;
-               sock = make_send_sock();
+               sock = make_send_sock(net);
        } else if (state == IP_VS_STATE_BACKUP) {
-               if (sync_backup_thread)
+               if (ipvs->backup_thread)
                        return -EEXIST;
 
-               strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
-                       sizeof(ip_vs_backup_mcast_ifn));
-               ip_vs_backup_syncid = syncid;
-               realtask = &sync_backup_thread;
-               name = "ipvs_syncbackup";
+               strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
+                       sizeof(ipvs->backup_mcast_ifn));
+               ipvs->backup_syncid = syncid;
+               realtask = &ipvs->backup_thread;
+               name = "ipvs_backup:%d";
                threadfn = sync_thread_backup;
-               sock = make_receive_sock();
+               sock = make_receive_sock(net);
        } else {
                return -EINVAL;
        }
@@ -884,9 +1567,9 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
                goto out;
        }
 
-       set_sync_mesg_maxlen(state);
+       set_sync_mesg_maxlen(net, state);
        if (state == IP_VS_STATE_BACKUP) {
-               buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
+               buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
                if (!buf)
                        goto outsocket;
        }
@@ -895,10 +1578,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
        if (!tinfo)
                goto outbuf;
 
+       tinfo->net = net;
        tinfo->sock = sock;
        tinfo->buf = buf;
 
-       task = kthread_run(threadfn, tinfo, name);
+       task = kthread_run(threadfn, tinfo, name, ipvs->gen);
        if (IS_ERR(task)) {
                result = PTR_ERR(task);
                goto outtinfo;
@@ -906,7 +1590,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
 
        /* mark as active */
        *realtask = task;
-       ip_vs_sync_state |= state;
+       ipvs->sync_state |= state;
 
        /* increase the module use count */
        ip_vs_use_count_inc();
@@ -924,16 +1608,18 @@ out:
 }
 
 
-int stop_sync_thread(int state)
+int stop_sync_thread(struct net *net, int state)
 {
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
        IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 
        if (state == IP_VS_STATE_MASTER) {
-               if (!sync_master_thread)
+               if (!ipvs->master_thread)
                        return -ESRCH;
 
                pr_info("stopping master sync thread %d ...\n",
-                       task_pid_nr(sync_master_thread));
+                       task_pid_nr(ipvs->master_thread));
 
                /*
                 * The lock synchronizes with sb_queue_tail(), so that we don't
@@ -941,21 +1627,21 @@ int stop_sync_thread(int state)
                 * progress of stopping the master sync daemon.
                 */
 
-               spin_lock_bh(&ip_vs_sync_lock);
-               ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
-               spin_unlock_bh(&ip_vs_sync_lock);
-               kthread_stop(sync_master_thread);
-               sync_master_thread = NULL;
+               spin_lock_bh(&ipvs->sync_lock);
+               ipvs->sync_state &= ~IP_VS_STATE_MASTER;
+               spin_unlock_bh(&ipvs->sync_lock);
+               kthread_stop(ipvs->master_thread);
+               ipvs->master_thread = NULL;
        } else if (state == IP_VS_STATE_BACKUP) {
-               if (!sync_backup_thread)
+               if (!ipvs->backup_thread)
                        return -ESRCH;
 
                pr_info("stopping backup sync thread %d ...\n",
-                       task_pid_nr(sync_backup_thread));
+                       task_pid_nr(ipvs->backup_thread));
 
-               ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
-               kthread_stop(sync_backup_thread);
-               sync_backup_thread = NULL;
+               ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+               kthread_stop(ipvs->backup_thread);
+               ipvs->backup_thread = NULL;
        } else {
                return -EINVAL;
        }
@@ -965,3 +1651,42 @@ int stop_sync_thread(int state)
 
        return 0;
 }
+
+/*
+ * Initialize data struct for each netns
+ */
+static int __net_init __ip_vs_sync_init(struct net *net)
+{
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       INIT_LIST_HEAD(&ipvs->sync_queue);
+       spin_lock_init(&ipvs->sync_lock);
+       spin_lock_init(&ipvs->sync_buff_lock);
+
+       ipvs->sync_mcast_addr.sin_family = AF_INET;
+       ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
+       ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
+       return 0;
+}
+
+static void __ip_vs_sync_cleanup(struct net *net)
+{
+       stop_sync_thread(net, IP_VS_STATE_MASTER);
+       stop_sync_thread(net, IP_VS_STATE_BACKUP);
+}
+
+static struct pernet_operations ipvs_sync_ops = {
+       .init = __ip_vs_sync_init,
+       .exit = __ip_vs_sync_cleanup,
+};
+
+
+int __init ip_vs_sync_init(void)
+{
+       return register_pernet_subsys(&ipvs_sync_ops);
+}
+
+void ip_vs_sync_cleanup(void)
+{
+       unregister_pernet_subsys(&ipvs_sync_ops);
+}
index bbddfdb10db2b1afc026618456f2bc99a8698c48..bc1bfc48a17fcc2c01024dadc80592ded11b27f3 100644 (file)
 
 #include <net/ip_vs.h>
 
-
-static inline unsigned int
-ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
-{
-       /*
-        * We think the overhead of processing active connections is 256
-        * times higher than that of inactive connections in average. (This
-        * 256 times might not be accurate, we will change it later) We
-        * use the following formula to estimate the overhead now:
-        *                dest->activeconns*256 + dest->inactconns
-        */
-       return (atomic_read(&dest->activeconns) << 8) +
-               atomic_read(&dest->inactconns);
-}
-
-
 /*
  *     Weighted Least Connection scheduling
  */
@@ -71,11 +55,11 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
                    atomic_read(&dest->weight) > 0) {
                        least = dest;
-                       loh = ip_vs_wlc_dest_overhead(least);
+                       loh = ip_vs_dest_conn_overhead(least);
                        goto nextstage;
                }
        }
-       IP_VS_ERR_RL("WLC: no destination available\n");
+       ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
        /*
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
        list_for_each_entry_continue(dest, &svc->destinations, n_list) {
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
-               doh = ip_vs_wlc_dest_overhead(dest);
+               doh = ip_vs_dest_conn_overhead(dest);
                if (loh * atomic_read(&dest->weight) >
                    doh * atomic_read(&least->weight)) {
                        least = dest;
index 30db633f88f10c948e3e15c01ffb2b37bd0c4d13..1ef41f50723c04c13cfcff46ac1ca387aa6def7a 100644 (file)
@@ -147,8 +147,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 
                        if (mark->cl == mark->cl->next) {
                                /* no dest entry */
-                               IP_VS_ERR_RL("WRR: no destination available: "
-                                            "no destinations present\n");
+                               ip_vs_scheduler_err(svc,
+                                       "no destination available: "
+                                       "no destinations present");
                                dest = NULL;
                                goto out;
                        }
@@ -162,8 +163,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                                 */
                                if (mark->cw == 0) {
                                        mark->cl = &svc->destinations;
-                                       IP_VS_ERR_RL("WRR: no destination "
-                                                    "available\n");
+                                       ip_vs_scheduler_err(svc,
+                                               "no destination available");
                                        dest = NULL;
                                        goto out;
                                }
@@ -185,8 +186,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                        /* back to the start, and no dest is found.
                           It is only possible when all dests are OVERLOADED */
                        dest = NULL;
-                       IP_VS_ERR_RL("WRR: no destination available: "
-                                    "all destinations are overloaded\n");
+                       ip_vs_scheduler_err(svc,
+                               "no destination available: "
+                               "all destinations are overloaded");
                        goto out;
                }
        }
index 5325a3fbe4ac8e8ab5a2e8175f9a5b93e1cc663c..878f6dd9dbadd4994e25f76ac97351c72cde8ddb 100644 (file)
 
 #include <net/ip_vs.h>
 
+enum {
+       IP_VS_RT_MODE_LOCAL     = 1, /* Allow local dest */
+       IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
+       IP_VS_RT_MODE_RDR       = 4, /* Allow redirect from remote daddr to
+                                     * local
+                                     */
+};
 
 /*
  *      Destination cache to speed up outgoing route lookup
@@ -77,11 +84,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
        return dst;
 }
 
-/*
- * Get route to destination or remote server
- * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
- *         &4=Allow redirect from remote daddr to local
- */
+/* Get route to destination or remote server */
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                   __be32 daddr, u32 rtos, int rt_mode)
@@ -100,7 +103,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                                .fl4_tos = rtos,
                        };
 
-                       if (ip_route_output_key(net, &rt, &fl)) {
+                       rt = ip_route_output_key(net, &fl);
+                       if (IS_ERR(rt)) {
                                spin_unlock(&dest->dst_lock);
                                IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
                                             &dest->addr.ip);
@@ -118,7 +122,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                        .fl4_tos = rtos,
                };
 
-               if (ip_route_output_key(net, &rt, &fl)) {
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt)) {
                        IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
                                     &daddr);
                        return NULL;
@@ -126,15 +131,16 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
        }
 
        local = rt->rt_flags & RTCF_LOCAL;
-       if (!((local ? 1 : 2) & rt_mode)) {
+       if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
+             rt_mode)) {
                IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
                             (rt->rt_flags & RTCF_LOCAL) ?
                             "local":"non-local", &rt->rt_dst);
                ip_rt_put(rt);
                return NULL;
        }
-       if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
-                                        ort->rt_flags & RTCF_LOCAL)) {
+       if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
+           !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
                IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
                             "requires NAT method, dest: %pI4\n",
                             &ip_hdr(skb)->daddr, &rt->rt_dst);
@@ -175,9 +181,9 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
                        .fl4_tos = RT_TOS(iph->tos),
                        .mark = skb->mark,
                };
-               struct rtable *rt;
 
-               if (ip_route_output_key(net, &rt, &fl))
+               rt = ip_route_output_key(net, &fl);
+               if (IS_ERR(rt))
                        return 0;
                if (!(rt->rt_flags & RTCF_LOCAL)) {
                        ip_rt_put(rt);
@@ -215,8 +221,13 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
            ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
                               &fl.fl6_dst, 0, &fl.fl6_src) < 0)
                goto out_err;
-       if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0)
-               goto out_err;
+       if (do_xfrm) {
+               dst = xfrm_lookup(net, dst, &fl, NULL, 0);
+               if (IS_ERR(dst)) {
+                       dst = NULL;
+                       goto out_err;
+               }
+       }
        ipv6_addr_copy(ret_saddr, &fl.fl6_src);
        return dst;
 
@@ -384,13 +395,14 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        EnterFunction(10);
 
-       if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
-                                     RT_TOS(iph->tos), 2)))
+       if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
+                                     IP_VS_RT_MODE_NON_LOCAL)))
                goto tx_error_icmp;
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+           !skb_is_gso(skb)) {
                ip_rt_put(rt);
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -443,7 +455,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -512,7 +524,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        }
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(iph->tos), 1|2|4)))
+                                     RT_TOS(iph->tos),
+                                     IP_VS_RT_MODE_LOCAL |
+                                       IP_VS_RT_MODE_NON_LOCAL |
+                                       IP_VS_RT_MODE_RDR)))
                goto tx_error_icmp;
        local = rt->rt_flags & RTCF_LOCAL;
        /*
@@ -543,7 +558,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+       if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+           !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
                                 "ip_vs_nat_xmit(): frag needed for");
@@ -658,7 +674,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -754,7 +770,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        EnterFunction(10);
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(tos), 1|2)))
+                                     RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
+                                                  IP_VS_RT_MODE_NON_LOCAL)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
                ip_rt_put(rt);
@@ -773,8 +790,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        df |= (old_iph->frag_off & htons(IP_DF));
 
-       if ((old_iph->frag_off & htons(IP_DF))
-           && mtu < ntohs(old_iph->tot_len)) {
+       if ((old_iph->frag_off & htons(IP_DF) &&
+           mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
@@ -886,7 +903,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (skb_dst(skb))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 
-       if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+       if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
+           !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -982,7 +1000,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        EnterFunction(10);
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(iph->tos), 1|2)))
+                                     RT_TOS(iph->tos),
+                                     IP_VS_RT_MODE_LOCAL |
+                                       IP_VS_RT_MODE_NON_LOCAL)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
                ip_rt_put(rt);
@@ -991,7 +1011,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
+       if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
+           !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                ip_rt_put(rt);
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -1125,7 +1146,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
         */
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-                                     RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
+                                     RT_TOS(ip_hdr(skb)->tos),
+                                     IP_VS_RT_MODE_LOCAL |
+                                       IP_VS_RT_MODE_NON_LOCAL |
+                                       IP_VS_RT_MODE_RDR)))
                goto tx_error_icmp;
        local = rt->rt_flags & RTCF_LOCAL;
 
@@ -1158,7 +1182,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
+       if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
+           !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
@@ -1272,7 +1297,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (skb->len > mtu && !skb_is_gso(skb)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
new file mode 100644 (file)
index 0000000..4e99cca
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ *      broadcast connection tracking helper
+ *
+ *      (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <net/route.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb,
+                               unsigned int protoff,
+                               struct nf_conn *ct,
+                               enum ip_conntrack_info ctinfo,
+                               unsigned int timeout)
+{
+       struct nf_conntrack_expect *exp;
+       struct iphdr *iph = ip_hdr(skb);
+       struct rtable *rt = skb_rtable(skb);
+       struct in_device *in_dev;
+       struct nf_conn_help *help = nfct_help(ct);
+       __be32 mask = 0;
+
+       /* we're only interested in locally generated packets */
+       if (skb->sk == NULL)
+               goto out;
+       if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+               goto out;
+       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+               goto out;
+
+       rcu_read_lock();
+       in_dev = __in_dev_get_rcu(rt->dst.dev);
+       if (in_dev != NULL) {
+               for_primary_ifa(in_dev) {
+                       if (ifa->ifa_broadcast == iph->daddr) {
+                               mask = ifa->ifa_mask;
+                               break;
+                       }
+               } endfor_ifa(in_dev);
+       }
+       rcu_read_unlock();
+
+       if (mask == 0)
+               goto out;
+
+       exp = nf_ct_expect_alloc(ct);
+       if (exp == NULL)
+               goto out;
+
+       exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+       exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+       exp->mask.src.u3.ip       = mask;
+       exp->mask.src.u.udp.port  = htons(0xFFFF);
+
+       exp->expectfn             = NULL;
+       exp->flags                = NF_CT_EXPECT_PERMANENT;
+       exp->class                = NF_CT_EXPECT_CLASS_DEFAULT;
+       exp->helper               = NULL;
+
+       nf_ct_expect_related(exp);
+       nf_ct_expect_put(exp);
+
+       nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+       return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
+
+MODULE_LICENSE("GPL");
index 84f4fcc5884be4fd6a4a08774eb91ba1facd2f7b..2f454efa1a8bc92460179761939cf3931379e064 100644 (file)
@@ -43,6 +43,7 @@
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 
@@ -282,6 +283,11 @@ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 static void death_by_timeout(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
+       struct nf_conn_tstamp *tstamp;
+
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp && tstamp->stop == 0)
+               tstamp->stop = ktime_to_ns(ktime_get_real());
 
        if (!test_bit(IPS_DYING_BIT, &ct->status) &&
            unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
@@ -419,6 +425,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct nf_conn_help *help;
+       struct nf_conn_tstamp *tstamp;
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
@@ -486,8 +493,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        ct->timeout.expires += jiffies;
        add_timer(&ct->timeout);
        atomic_inc(&ct->ct_general.use);
-       set_bit(IPS_CONFIRMED_BIT, &ct->status);
+       ct->status |= IPS_CONFIRMED;
+
+       /* set conntrack timestamp, if enabled. */
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp) {
+               if (skb->tstamp.tv64 == 0)
+                       __net_timestamp((struct sk_buff *)skb);
 
+               tstamp->start = ktime_to_ns(skb->tstamp);
+       }
        /* Since the lookup is lockless, hash insertion must be done after
         * starting the timer and setting the CONFIRMED bit. The RCU barriers
         * guarantee that no other CPU can find the conntrack before the above
@@ -655,7 +670,8 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
         * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
         */
        memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
-              sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+              offsetof(struct nf_conn, proto) -
+              offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
        spin_lock_init(&ct->lock);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -745,6 +761,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        }
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+       nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
 
        ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
        nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -1192,6 +1209,11 @@ struct __nf_ct_flush_report {
 static int kill_report(struct nf_conn *i, void *data)
 {
        struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
+       struct nf_conn_tstamp *tstamp;
+
+       tstamp = nf_conn_tstamp_find(i);
+       if (tstamp && tstamp->stop == 0)
+               tstamp->stop = ktime_to_ns(ktime_get_real());
 
        /* If we fail to deliver the event, death_by_timeout() will retry */
        if (nf_conntrack_event_report(IPCT_DESTROY, i,
@@ -1208,9 +1230,9 @@ static int kill_all(struct nf_conn *i, void *data)
        return 1;
 }
 
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
 {
-       if (vmalloced)
+       if (is_vmalloc_addr(hash))
                vfree(hash);
        else
                free_pages((unsigned long)hash,
@@ -1277,8 +1299,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
                goto i_see_dead_people;
        }
 
-       nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            net->ct.htable_size);
+       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
        nf_conntrack_ecache_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
@@ -1307,21 +1328,18 @@ void nf_conntrack_cleanup(struct net *net)
        }
 }
 
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
 {
        struct hlist_nulls_head *hash;
        unsigned int nr_slots, i;
        size_t sz;
 
-       *vmalloced = 0;
-
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
        if (!hash) {
-               *vmalloced = 1;
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
                hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
                                 PAGE_KERNEL);
@@ -1337,7 +1355,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
-       int i, bucket, vmalloced, old_vmalloced;
+       int i, bucket;
        unsigned int hashsize, old_size;
        struct hlist_nulls_head *hash, *old_hash;
        struct nf_conntrack_tuple_hash *h;
@@ -1354,7 +1372,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
        if (!hashsize)
                return -EINVAL;
 
-       hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+       hash = nf_ct_alloc_hashtable(&hashsize, 1);
        if (!hash)
                return -ENOMEM;
 
@@ -1376,15 +1394,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
                }
        }
        old_size = init_net.ct.htable_size;
-       old_vmalloced = init_net.ct.hash_vmalloc;
        old_hash = init_net.ct.hash;
 
        init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
-       init_net.ct.hash_vmalloc = vmalloced;
        init_net.ct.hash = hash;
        spin_unlock_bh(&nf_conntrack_lock);
 
-       nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+       nf_ct_free_hashtable(old_hash, old_size);
        return 0;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
@@ -1497,8 +1513,7 @@ static int nf_conntrack_init_net(struct net *net)
        }
 
        net->ct.htable_size = nf_conntrack_htable_size;
-       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
-                                            &net->ct.hash_vmalloc, 1);
+       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
        if (!net->ct.hash) {
                ret = -ENOMEM;
                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
@@ -1510,6 +1525,9 @@ static int nf_conntrack_init_net(struct net *net)
        ret = nf_conntrack_acct_init(net);
        if (ret < 0)
                goto err_acct;
+       ret = nf_conntrack_tstamp_init(net);
+       if (ret < 0)
+               goto err_tstamp;
        ret = nf_conntrack_ecache_init(net);
        if (ret < 0)
                goto err_ecache;
@@ -1517,12 +1535,13 @@ static int nf_conntrack_init_net(struct net *net)
        return 0;
 
 err_ecache:
+       nf_conntrack_tstamp_fini(net);
+err_tstamp:
        nf_conntrack_acct_fini(net);
 err_acct:
        nf_conntrack_expect_fini(net);
 err_expect:
-       nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            net->ct.htable_size);
+       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
 err_hash:
        kmem_cache_destroy(net->ct.nf_conntrack_cachep);
 err_cache:
index a20fb0bd1efe850543a9dea62fdf4bf53330a335..cd1e8e0970f226b5a2dd5e82aa2d121680ff24a3 100644 (file)
@@ -319,7 +319,8 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        const struct nf_conntrack_expect_policy *p;
        unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
 
-       atomic_inc(&exp->use);
+       /* two references : one for hash insert, one for the timer */
+       atomic_add(2, &exp->use);
 
        if (master_help) {
                hlist_add_head(&exp->lnode, &master_help->expectations);
@@ -333,12 +334,14 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
                    (unsigned long)exp);
        if (master_help) {
-               p = &master_help->helper->expect_policy[exp->class];
+               p = &rcu_dereference_protected(
+                               master_help->helper,
+                               lockdep_is_held(&nf_conntrack_lock)
+                               )->expect_policy[exp->class];
                exp->timeout.expires = jiffies + p->timeout * HZ;
        }
        add_timer(&exp->timeout);
 
-       atomic_inc(&exp->use);
        NF_CT_STAT_INC(net, expect_create);
 }
 
@@ -369,7 +372,10 @@ static inline int refresh_timer(struct nf_conntrack_expect *i)
        if (!del_timer(&i->timeout))
                return 0;
 
-       p = &master_help->helper->expect_policy[i->class];
+       p = &rcu_dereference_protected(
+               master_help->helper,
+               lockdep_is_held(&nf_conntrack_lock)
+               )->expect_policy[i->class];
        i->timeout.expires = jiffies + p->timeout * HZ;
        add_timer(&i->timeout);
        return 1;
@@ -407,7 +413,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        }
        /* Will be over limit? */
        if (master_help) {
-               p = &master_help->helper->expect_policy[expect->class];
+               p = &rcu_dereference_protected(
+                       master_help->helper,
+                       lockdep_is_held(&nf_conntrack_lock)
+                       )->expect_policy[expect->class];
                if (p->max_expected &&
                    master_help->expecting[expect->class] >= p->max_expected) {
                        evict_oldest_expect(master, expect);
@@ -478,7 +487,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
        struct hlist_node *n;
 
        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-               n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
                if (n)
                        return n;
        }
@@ -491,11 +500,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_expect_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_next_rcu(head));
        while (head == NULL) {
                if (++st->bucket >= nf_ct_expect_hsize)
                        return NULL;
-               head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+               head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
        }
        return head;
 }
@@ -630,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net)
        }
 
        net->ct.expect_count = 0;
-       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
-                                                 &net->ct.expect_vmalloc, 0);
+       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
        if (net->ct.expect_hash == NULL)
                goto err1;
 
@@ -653,8 +661,7 @@ err3:
        if (net_eq(net, &init_net))
                kmem_cache_destroy(nf_ct_expect_cachep);
 err2:
-       nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-                            nf_ct_expect_hsize);
+       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 err1:
        return err;
 }
@@ -666,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net)
                rcu_barrier(); /* Wait for call_rcu() before destroy */
                kmem_cache_destroy(nf_ct_expect_cachep);
        }
-       nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-                            nf_ct_expect_hsize);
+       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 }
index bd82450c193f5dbb4895f4fc7565040686fa740d..80a23ed62bb0739c2d9d97f195dbea57216230b5 100644 (file)
@@ -140,15 +140,16 @@ static void update_alloc_size(struct nf_ct_ext_type *type)
        /* This assumes that extended areas in conntrack for the types
           whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
        for (i = min; i <= max; i++) {
-               t1 = nf_ct_ext_types[i];
+               t1 = rcu_dereference_protected(nf_ct_ext_types[i],
+                               lockdep_is_held(&nf_ct_ext_type_mutex));
                if (!t1)
                        continue;
 
-               t1->alloc_size = sizeof(struct nf_ct_ext)
-                                + ALIGN(sizeof(struct nf_ct_ext), t1->align)
-                                + t1->len;
+               t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
+                                t1->len;
                for (j = 0; j < NF_CT_EXT_NUM; j++) {
-                       t2 = nf_ct_ext_types[j];
+                       t2 = rcu_dereference_protected(nf_ct_ext_types[j],
+                               lockdep_is_held(&nf_ct_ext_type_mutex));
                        if (t2 == NULL || t2 == t1 ||
                            (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
                                continue;
index 59e1a4cd4e8b8b115c77e788fe2b821d50f32f65..1bdfea3579552e2ab1048bffb392ba469d0673c1 100644 (file)
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex);
 static struct hlist_head *nf_ct_helper_hash __read_mostly;
 static unsigned int nf_ct_helper_hsize __read_mostly;
 static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
 
 
 /* Stupid hash, but collision free for the default registrations of the
@@ -158,7 +157,10 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
        struct nf_conn_help *help = nfct_help(ct);
 
-       if (help && help->helper == me) {
+       if (help && rcu_dereference_protected(
+                       help->helper,
+                       lockdep_is_held(&nf_conntrack_lock)
+                       ) == me) {
                nf_conntrack_event(IPCT_HELPER, ct);
                rcu_assign_pointer(help->helper, NULL);
        }
@@ -210,7 +212,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
                hlist_for_each_entry_safe(exp, n, next,
                                          &net->ct.expect_hash[i], hnode) {
                        struct nf_conn_help *help = nfct_help(exp->master);
-                       if ((help->helper == me || exp->helper == me) &&
+                       if ((rcu_dereference_protected(
+                                       help->helper,
+                                       lockdep_is_held(&nf_conntrack_lock)
+                                       ) == me || exp->helper == me) &&
                            del_timer(&exp->timeout)) {
                                nf_ct_unlink_expect(exp);
                                nf_ct_expect_put(exp);
@@ -261,8 +266,7 @@ int nf_conntrack_helper_init(void)
        int err;
 
        nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
-       nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
-                                                 &nf_ct_helper_vmalloc, 0);
+       nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
        if (!nf_ct_helper_hash)
                return -ENOMEM;
 
@@ -273,14 +277,12 @@ int nf_conntrack_helper_init(void)
        return 0;
 
 err1:
-       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-                            nf_ct_helper_hsize);
+       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
        return err;
 }
 
 void nf_conntrack_helper_fini(void)
 {
        nf_ct_extend_unregister(&helper_extend);
-       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-                            nf_ct_helper_hsize);
+       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
 }
index aadde018a0720bd6e6a35dea4d639eb3d1eab293..4c8f30a3d6d2762e69604c4b1f6cd6821663bc64 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
 #include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <net/route.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
@@ -40,75 +33,26 @@ MODULE_ALIAS("ip_conntrack_netbios_ns");
 MODULE_ALIAS_NFCT_HELPER("netbios_ns");
 
 static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, 0400);
+module_param(timeout, uint, S_IRUSR);
 MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
 
-static int help(struct sk_buff *skb, unsigned int protoff,
-               struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
-       struct nf_conntrack_expect *exp;
-       struct iphdr *iph = ip_hdr(skb);
-       struct rtable *rt = skb_rtable(skb);
-       struct in_device *in_dev;
-       __be32 mask = 0;
-
-       /* we're only interested in locally generated packets */
-       if (skb->sk == NULL)
-               goto out;
-       if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
-               goto out;
-       if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
-               goto out;
-
-       rcu_read_lock();
-       in_dev = __in_dev_get_rcu(rt->dst.dev);
-       if (in_dev != NULL) {
-               for_primary_ifa(in_dev) {
-                       if (ifa->ifa_broadcast == iph->daddr) {
-                               mask = ifa->ifa_mask;
-                               break;
-                       }
-               } endfor_ifa(in_dev);
-       }
-       rcu_read_unlock();
-
-       if (mask == 0)
-               goto out;
-
-       exp = nf_ct_expect_alloc(ct);
-       if (exp == NULL)
-               goto out;
-
-       exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-       exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
-       exp->mask.src.u3.ip       = mask;
-       exp->mask.src.u.udp.port  = htons(0xFFFF);
-
-       exp->expectfn             = NULL;
-       exp->flags                = NF_CT_EXPECT_PERMANENT;
-       exp->class                = NF_CT_EXPECT_CLASS_DEFAULT;
-       exp->helper               = NULL;
-
-       nf_ct_expect_related(exp);
-       nf_ct_expect_put(exp);
-
-       nf_ct_refresh(ct, skb, timeout * HZ);
-out:
-       return NF_ACCEPT;
-}
-
 static struct nf_conntrack_expect_policy exp_policy = {
        .max_expected   = 1,
 };
 
+static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
+                  struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+       return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+}
+
 static struct nf_conntrack_helper helper __read_mostly = {
        .name                   = "netbios-ns",
-       .tuple.src.l3num        = AF_INET,
+       .tuple.src.l3num        = NFPROTO_IPV4,
        .tuple.src.u.udp.port   = cpu_to_be16(NMBD_PORT),
        .tuple.dst.protonum     = IPPROTO_UDP,
        .me                     = THIS_MODULE,
-       .help                   = help,
+       .help                   = netbios_ns_help,
        .expect_policy          = &exp_policy,
 };
 
index eead9db6f899ad71869013dec0b3e2de480361a1..30bf8a167fc8800ff26b4ab72cafe3e219f38139 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_protocol.h>
@@ -230,6 +231,33 @@ nla_put_failure:
        return -1;
 }
 
+static int
+ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+       struct nlattr *nest_count;
+       const struct nf_conn_tstamp *tstamp;
+
+       tstamp = nf_conn_tstamp_find(ct);
+       if (!tstamp)
+               return 0;
+
+       nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
+       if (!nest_count)
+               goto nla_put_failure;
+
+       NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
+       if (tstamp->stop != 0) {
+               NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
+                            cpu_to_be64(tstamp->stop));
+       }
+       nla_nest_end(skb, nest_count);
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
 #ifdef CONFIG_NF_CONNTRACK_MARK
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
@@ -404,6 +432,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
            ctnetlink_dump_timeout(skb, ct) < 0 ||
            ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
            ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+           ctnetlink_dump_timestamp(skb, ct) < 0 ||
            ctnetlink_dump_protoinfo(skb, ct) < 0 ||
            ctnetlink_dump_helpinfo(skb, ct) < 0 ||
            ctnetlink_dump_mark(skb, ct) < 0 ||
@@ -470,6 +499,18 @@ ctnetlink_secctx_size(const struct nf_conn *ct)
 #endif
 }
 
+static inline size_t
+ctnetlink_timestamp_size(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+               return 0;
+       return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+#else
+       return 0;
+#endif
+}
+
 static inline size_t
 ctnetlink_nlmsg_size(const struct nf_conn *ct)
 {
@@ -481,6 +522,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
               + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
               + ctnetlink_counters_size(ct)
+              + ctnetlink_timestamp_size(ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
               + nla_total_size(0) /* CTA_PROTOINFO */
               + nla_total_size(0) /* CTA_HELP */
@@ -571,7 +613,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 
        if (events & (1 << IPCT_DESTROY)) {
                if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-                   ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+                   ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+                   ctnetlink_dump_timestamp(skb, ct) < 0)
                        goto nla_put_failure;
        } else {
                if (ctnetlink_dump_timeout(skb, ct) < 0)
@@ -761,7 +804,7 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
                      struct nf_conntrack_tuple *tuple,
-                     enum ctattr_tuple type, u_int8_t l3num)
+                     enum ctattr_type type, u_int8_t l3num)
 {
        struct nlattr *tb[CTA_TUPLE_MAX+1];
        int err;
@@ -1358,6 +1401,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
        }
 
        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+       nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
        nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
        /* we must add conntrack extensions before confirmation. */
        ct->status |= IPS_CONFIRMED;
@@ -1376,6 +1420,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
        }
 #endif
 
+       memset(&ct->proto, 0, sizeof(ct->proto));
        if (cda[CTA_PROTOINFO]) {
                err = ctnetlink_change_protoinfo(ct, cda);
                if (err < 0)
index dc7bb74110df22818b42222450f0141068b79b6d..5701c8dd783c02df1ef6d055f473845754e37d30 100644 (file)
@@ -166,6 +166,7 @@ static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto
 int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
 {
        int ret = 0;
+       struct nf_conntrack_l3proto *old;
 
        if (proto->l3proto >= AF_MAX)
                return -EBUSY;
@@ -174,7 +175,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
                return -EINVAL;
 
        mutex_lock(&nf_ct_proto_mutex);
-       if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+       old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+                                       lockdep_is_held(&nf_ct_proto_mutex));
+       if (old != &nf_conntrack_l3proto_generic) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@ -201,7 +204,9 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
        BUG_ON(proto->l3proto >= AF_MAX);
 
        mutex_lock(&nf_ct_proto_mutex);
-       BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
+       BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+                                        lockdep_is_held(&nf_ct_proto_mutex)
+                                        ) != proto);
        rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
                           &nf_conntrack_l3proto_generic);
        nf_ct_l3proto_unregister_sysctl(proto);
@@ -279,7 +284,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
        mutex_lock(&nf_ct_proto_mutex);
        if (!nf_ct_protos[l4proto->l3proto]) {
                /* l3proto may be loaded latter. */
-               struct nf_conntrack_l4proto **proto_array;
+               struct nf_conntrack_l4proto __rcu **proto_array;
                int i;
 
                proto_array = kmalloc(MAX_NF_CT_PROTO *
@@ -291,7 +296,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
                }
 
                for (i = 0; i < MAX_NF_CT_PROTO; i++)
-                       proto_array[i] = &nf_conntrack_l4proto_generic;
+                       RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic);
 
                /* Before making proto_array visible to lockless readers,
                 * we must make sure its content is committed to memory.
@@ -299,8 +304,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
                smp_wmb();
 
                nf_ct_protos[l4proto->l3proto] = proto_array;
-       } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
-                                       &nf_conntrack_l4proto_generic) {
+       } else if (rcu_dereference_protected(
+                       nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+                       lockdep_is_held(&nf_ct_proto_mutex)
+                       ) != &nf_conntrack_l4proto_generic) {
                ret = -EBUSY;
                goto out_unlock;
        }
@@ -331,7 +338,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
        BUG_ON(l4proto->l3proto >= PF_MAX);
 
        mutex_lock(&nf_ct_proto_mutex);
-       BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
+       BUG_ON(rcu_dereference_protected(
+                       nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+                       lockdep_is_held(&nf_ct_proto_mutex)
+                       ) != l4proto);
        rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
                           &nf_conntrack_l4proto_generic);
        nf_ct_l4proto_unregister_sysctl(l4proto);
index 5292560d6d4aedbfa33c1369a66fc2d72657df8f..9ae57c57c50eaf2b1cd20013a41d72d3acfdb584 100644 (file)
@@ -452,6 +452,9 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
        ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
        ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
        ct->proto.dccp.state = CT_DCCP_NONE;
+       ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
+       ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
+       ct->proto.dccp.handshake_seq = 0;
        return true;
 
 out_invalid:
index c6049c2d5ea8d9be54aa6f1d77accafa3b2eb920..6f4ee70f460b028b3b45c7e8f2adbea47a108cbd 100644 (file)
@@ -413,6 +413,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
            test_bit(SCTP_CID_COOKIE_ACK, map))
                return false;
 
+       memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
        new_state = SCTP_CONNTRACK_MAX;
        for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
                /* Don't need lock here: this conntrack not in circulation yet */
index 3fb2b73b24dc982629dca06073d82a60eb42d358..37bf94394be0d04a3285c0761e804225ef9c2e48 100644 (file)
@@ -227,11 +227,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sCL -> sIV
  */
 /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
-/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
 /*
  *     sSS -> sSR      Standard open.
  *     sS2 -> sSR      Simultaneous open
- *     sSR -> sSR      Retransmitted SYN/ACK.
+ *     sSR -> sIG      Retransmitted SYN/ACK, ignore it.
  *     sES -> sIG      Late retransmitted SYN/ACK?
  *     sFW -> sIG      Might be SYN/ACK answering ignored SYN
  *     sCW -> sIG
@@ -1066,9 +1066,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        BUG_ON(th == NULL);
 
        /* Don't need lock here: this conntrack not in circulation yet */
-       new_state
-               = tcp_conntracks[0][get_conntrack_index(th)]
-               [TCP_CONNTRACK_NONE];
+       new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
 
        /* Invalid: delete conntrack */
        if (new_state >= TCP_CONNTRACK_MAX) {
@@ -1077,6 +1075,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        }
 
        if (new_state == TCP_CONNTRACK_SYN_SENT) {
+               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
                /* SYN packet */
                ct->proto.tcp.seen[0].td_end =
                        segment_seq_plus_len(ntohl(th->seq), skb->len,
@@ -1088,11 +1087,11 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
                        ct->proto.tcp.seen[0].td_end;
 
                tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
-               ct->proto.tcp.seen[1].flags = 0;
        } else if (nf_ct_tcp_loose == 0) {
                /* Don't try to pick up connections. */
                return false;
        } else {
+               memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
                /*
                 * We are in the middle of a connection,
                 * its history is lost for us.
@@ -1107,7 +1106,6 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
                ct->proto.tcp.seen[0].td_maxend =
                        ct->proto.tcp.seen[0].td_end +
                        ct->proto.tcp.seen[0].td_maxwin;
-               ct->proto.tcp.seen[0].td_scale = 0;
 
                /* We assume SACK and liberal window checking to handle
                 * window scaling */
@@ -1116,13 +1114,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
                                              IP_CT_TCP_FLAG_BE_LIBERAL;
        }
 
-       ct->proto.tcp.seen[1].td_end = 0;
-       ct->proto.tcp.seen[1].td_maxend = 0;
-       ct->proto.tcp.seen[1].td_maxwin = 0;
-       ct->proto.tcp.seen[1].td_scale = 0;
-
        /* tcp_packet will set them */
-       ct->proto.tcp.state = TCP_CONNTRACK_NONE;
        ct->proto.tcp.last_index = TCP_NONE_SET;
 
        pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
new file mode 100644 (file)
index 0000000..6e545e2
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ *      SNMP service broadcast connection tracking helper
+ *
+ *      (c) 2011 Jiri Olsa <jolsa@redhat.com>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SNMP_PORT      161
+
+MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
+MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFCT_HELPER("snmp");
+
+static unsigned int timeout __read_mostly = 30;
+module_param(timeout, uint, S_IRUSR);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+                       unsigned int protoff,
+                       struct nf_conn *ct,
+                       enum ip_conntrack_info ctinfo);
+EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
+
+static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
+               struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+       typeof(nf_nat_snmp_hook) nf_nat_snmp;
+
+       nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+
+       nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
+       if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
+               return nf_nat_snmp(skb, protoff, ct, ctinfo);
+
+       return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+       .max_expected   = 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+       .name                   = "snmp",
+       .tuple.src.l3num        = NFPROTO_IPV4,
+       .tuple.src.u.udp.port   = cpu_to_be16(SNMP_PORT),
+       .tuple.dst.protonum     = IPPROTO_UDP,
+       .me                     = THIS_MODULE,
+       .help                   = snmp_conntrack_help,
+       .expect_policy          = &exp_policy,
+};
+
+static int __init nf_conntrack_snmp_init(void)
+{
+       exp_policy.timeout = timeout;
+       return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_snmp_fini(void)
+{
+       nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_snmp_init);
+module_exit(nf_conntrack_snmp_fini);
index b4d7f0f24b27e9534a97851e7830714c8135a7bf..0ae14282588115a8baee1bc92314a5e9727fd779 100644 (file)
@@ -29,6 +29,8 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <linux/rculist_nulls.h>
 
 MODULE_LICENSE("GPL");
 
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(print_tuple);
 struct ct_iter_state {
        struct seq_net_private p;
        unsigned int bucket;
+       u_int64_t time_now;
 };
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
@@ -56,7 +59,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
        for (st->bucket = 0;
             st->bucket < net->ct.htable_size;
             st->bucket++) {
-               n = rcu_dereference(net->ct.hash[st->bucket].first);
+               n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
                if (!is_a_nulls(n))
                        return n;
        }
@@ -69,13 +72,15 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct ct_iter_state *st = seq->private;
 
-       head = rcu_dereference(head->next);
+       head = rcu_dereference(hlist_nulls_next_rcu(head));
        while (is_a_nulls(head)) {
                if (likely(get_nulls_value(head) == st->bucket)) {
                        if (++st->bucket >= net->ct.htable_size)
                                return NULL;
                }
-               head = rcu_dereference(net->ct.hash[st->bucket].first);
+               head = rcu_dereference(
+                               hlist_nulls_first_rcu(
+                                       &net->ct.hash[st->bucket]));
        }
        return head;
 }
@@ -93,6 +98,9 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(RCU)
 {
+       struct ct_iter_state *st = seq->private;
+
+       st->time_now = ktime_to_ns(ktime_get_real());
        rcu_read_lock();
        return ct_get_idx(seq, *pos);
 }
@@ -132,6 +140,34 @@ static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+       struct ct_iter_state *st = s->private;
+       struct nf_conn_tstamp *tstamp;
+       s64 delta_time;
+
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp) {
+               delta_time = st->time_now - tstamp->start;
+               if (delta_time > 0)
+                       delta_time = div_s64(delta_time, NSEC_PER_SEC);
+               else
+                       delta_time = 0;
+
+               return seq_printf(s, "delta-time=%llu ",
+                                 (unsigned long long)delta_time);
+       }
+       return 0;
+}
+#else
+static inline int
+ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+       return 0;
+}
+#endif
+
 /* return 0 on success, 1 in case of error */
 static int ct_seq_show(struct seq_file *s, void *v)
 {
@@ -200,6 +236,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
                goto release;
 #endif
 
+       if (ct_show_delta_time(s, ct))
+               goto release;
+
        if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
                goto release;
 
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
new file mode 100644 (file)
index 0000000..af7dd31
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+
+static int nf_ct_tstamp __read_mostly;
+
+module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
+MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table tstamp_sysctl_table[] = {
+       {
+               .procname       = "nf_conntrack_timestamp",
+               .data           = &init_net.ct.sysctl_tstamp,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+       .len    = sizeof(struct nf_conn_tstamp),
+       .align  = __alignof__(struct nf_conn_tstamp),
+       .id     = NF_CT_EXT_TSTAMP,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+       struct ctl_table *table;
+
+       table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+                       GFP_KERNEL);
+       if (!table)
+               goto out;
+
+       table[0].data = &net->ct.sysctl_tstamp;
+
+       net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
+                       nf_net_netfilter_sysctl_path, table);
+       if (!net->ct.tstamp_sysctl_header) {
+               printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
+               goto out_register;
+       }
+       return 0;
+
+out_register:
+       kfree(table);
+out:
+       return -ENOMEM;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+       struct ctl_table *table;
+
+       table = net->ct.tstamp_sysctl_header->ctl_table_arg;
+       unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
+       kfree(table);
+}
+#else
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+       return 0;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+}
+#endif
+
+int nf_conntrack_tstamp_init(struct net *net)
+{
+       int ret;
+
+       net->ct.sysctl_tstamp = nf_ct_tstamp;
+
+       if (net_eq(net, &init_net)) {
+               ret = nf_ct_extend_register(&tstamp_extend);
+               if (ret < 0) {
+                       printk(KERN_ERR "nf_ct_tstamp: Unable to register "
+                                       "extension\n");
+                       goto out_extend_register;
+               }
+       }
+
+       ret = nf_conntrack_tstamp_init_sysctl(net);
+       if (ret < 0)
+               goto out_sysctl;
+
+       return 0;
+
+out_sysctl:
+       if (net_eq(net, &init_net))
+               nf_ct_extend_unregister(&tstamp_extend);
+out_extend_register:
+       return ret;
+}
+
+void nf_conntrack_tstamp_fini(struct net *net)
+{
+       nf_conntrack_tstamp_fini_sysctl(net);
+       if (net_eq(net, &init_net))
+               nf_ct_extend_unregister(&tstamp_extend);
+}
index 91816998ed86c065041acd185eb1a197672cadca..20714edf6cd2a16699100380b90f77f305898162 100644 (file)
@@ -165,7 +165,8 @@ static int seq_show(struct seq_file *s, void *v)
        struct nf_logger *t;
        int ret;
 
-       logger = nf_loggers[*pos];
+       logger = rcu_dereference_protected(nf_loggers[*pos],
+                                          lockdep_is_held(&nf_log_mutex));
 
        if (!logger)
                ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -253,7 +254,8 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
                mutex_unlock(&nf_log_mutex);
        } else {
                mutex_lock(&nf_log_mutex);
-               logger = nf_loggers[tindex];
+               logger = rcu_dereference_protected(nf_loggers[tindex],
+                                                  lockdep_is_held(&nf_log_mutex));
                if (!logger)
                        table->data = "NONE";
                else
index 74aebed5bd28bb5c0c924cec7d908615b82ffdd6..5ab22e2bbd7de8814d5cc2f3d17f4fe9158b2f68 100644 (file)
@@ -27,14 +27,17 @@ static DEFINE_MUTEX(queue_handler_mutex);
 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 {
        int ret;
+       const struct nf_queue_handler *old;
 
        if (pf >= ARRAY_SIZE(queue_handler))
                return -EINVAL;
 
        mutex_lock(&queue_handler_mutex);
-       if (queue_handler[pf] == qh)
+       old = rcu_dereference_protected(queue_handler[pf],
+                                       lockdep_is_held(&queue_handler_mutex));
+       if (old == qh)
                ret = -EEXIST;
-       else if (queue_handler[pf])
+       else if (old)
                ret = -EBUSY;
        else {
                rcu_assign_pointer(queue_handler[pf], qh);
@@ -49,11 +52,15 @@ EXPORT_SYMBOL(nf_register_queue_handler);
 /* The caller must flush their queue before this */
 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 {
+       const struct nf_queue_handler *old;
+
        if (pf >= ARRAY_SIZE(queue_handler))
                return -EINVAL;
 
        mutex_lock(&queue_handler_mutex);
-       if (queue_handler[pf] && queue_handler[pf] != qh) {
+       old = rcu_dereference_protected(queue_handler[pf],
+                                       lockdep_is_held(&queue_handler_mutex));
+       if (old && old != qh) {
                mutex_unlock(&queue_handler_mutex);
                return -EINVAL;
        }
@@ -73,7 +80,10 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
 
        mutex_lock(&queue_handler_mutex);
        for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
-               if (queue_handler[pf] == qh)
+               if (rcu_dereference_protected(
+                               queue_handler[pf],
+                               lockdep_is_held(&queue_handler_mutex)
+                               ) == qh)
                        rcu_assign_pointer(queue_handler[pf], NULL);
        }
        mutex_unlock(&queue_handler_mutex);
@@ -115,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb,
                      int (*okfn)(struct sk_buff *),
                      unsigned int queuenum)
 {
-       int status;
+       int status = -ENOENT;
        struct nf_queue_entry *entry = NULL;
 #ifdef CONFIG_BRIDGE_NETFILTER
        struct net_device *physindev;
@@ -128,16 +138,20 @@ static int __nf_queue(struct sk_buff *skb,
        rcu_read_lock();
 
        qh = rcu_dereference(queue_handler[pf]);
-       if (!qh)
+       if (!qh) {
+               status = -ESRCH;
                goto err_unlock;
+       }
 
        afinfo = nf_get_afinfo(pf);
        if (!afinfo)
                goto err_unlock;
 
        entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
-       if (!entry)
+       if (!entry) {
+               status = -ENOMEM;
                goto err_unlock;
+       }
 
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
@@ -151,11 +165,9 @@ static int __nf_queue(struct sk_buff *skb,
 
        /* If it's going away, ignore hook. */
        if (!try_module_get(entry->elem->owner)) {
-               rcu_read_unlock();
-               kfree(entry);
-               return 0;
+               status = -ECANCELED;
+               goto err_unlock;
        }
-
        /* Bump dev refs so they don't vanish while packet is out */
        if (indev)
                dev_hold(indev);
@@ -182,14 +194,13 @@ static int __nf_queue(struct sk_buff *skb,
                goto err;
        }
 
-       return 1;
+       return 0;
 
 err_unlock:
        rcu_read_unlock();
 err:
-       kfree_skb(skb);
        kfree(entry);
-       return 1;
+       return status;
 }
 
 int nf_queue(struct sk_buff *skb,
@@ -201,6 +212,8 @@ int nf_queue(struct sk_buff *skb,
             unsigned int queuenum)
 {
        struct sk_buff *segs;
+       int err;
+       unsigned int queued;
 
        if (!skb_is_gso(skb))
                return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
@@ -216,20 +229,35 @@ int nf_queue(struct sk_buff *skb,
        }
 
        segs = skb_gso_segment(skb, 0);
-       kfree_skb(skb);
+       /* Does not use PTR_ERR to limit the number of error codes that can be
+        * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
+        * 'ignore this hook'.
+        */
        if (IS_ERR(segs))
-               return 1;
+               return -EINVAL;
 
+       queued = 0;
+       err = 0;
        do {
                struct sk_buff *nskb = segs->next;
 
                segs->next = NULL;
-               if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
-                               queuenum))
+               if (err == 0)
+                       err = __nf_queue(segs, elem, pf, hook, indev,
+                                          outdev, okfn, queuenum);
+               if (err == 0)
+                       queued++;
+               else
                        kfree_skb(segs);
                segs = nskb;
        } while (segs);
-       return 1;
+
+       /* also free orig skb if only some segments were queued */
+       if (unlikely(err && queued))
+               err = 0;
+       if (err == 0)
+               kfree_skb(skb);
+       return err;
 }
 
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
@@ -237,6 +265,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        struct sk_buff *skb = entry->skb;
        struct list_head *elem = &entry->elem->list;
        const struct nf_afinfo *afinfo;
+       int err;
 
        rcu_read_lock();
 
@@ -270,10 +299,17 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
                local_bh_enable();
                break;
        case NF_QUEUE:
-               if (!__nf_queue(skb, elem, entry->pf, entry->hook,
-                               entry->indev, entry->outdev, entry->okfn,
-                               verdict >> NF_VERDICT_BITS))
-                       goto next_hook;
+               err = __nf_queue(skb, elem, entry->pf, entry->hook,
+                                entry->indev, entry->outdev, entry->okfn,
+                                verdict >> NF_VERDICT_QBITS);
+               if (err < 0) {
+                       if (err == -ECANCELED)
+                               goto next_hook;
+                       if (err == -ESRCH &&
+                          (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+                               goto next_hook;
+                       kfree_skb(skb);
+               }
                break;
        case NF_STOLEN:
        default:
index 6a1572b0ab416a65425abc135cca9d93a4769911..985e9b76c9162f253f839401a40b1f1eb51dc364 100644 (file)
@@ -376,7 +376,6 @@ __build_packet_message(struct nfulnl_instance *inst,
                        unsigned int hooknum,
                        const struct net_device *indev,
                        const struct net_device *outdev,
-                       const struct nf_loginfo *li,
                        const char *prefix, unsigned int plen)
 {
        struct nfulnl_msg_packet_hdr pmsg;
@@ -652,7 +651,7 @@ nfulnl_log_packet(u_int8_t pf,
        inst->qlen++;
 
        __build_packet_message(inst, skb, data_len, pf,
-                               hooknum, in, out, li, prefix, plen);
+                               hooknum, in, out, prefix, plen);
 
        if (inst->qlen >= qthreshold)
                __nfulnl_flush(inst);
@@ -874,19 +873,19 @@ static struct hlist_node *get_first(struct iter_state *st)
 
        for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
                if (!hlist_empty(&instance_table[st->bucket]))
-                       return rcu_dereference_bh(instance_table[st->bucket].first);
+                       return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
        }
        return NULL;
 }
 
 static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
 {
-       h = rcu_dereference_bh(h->next);
+       h = rcu_dereference_bh(hlist_next_rcu(h));
        while (!h) {
                if (++st->bucket >= INSTANCE_BUCKETS)
                        return NULL;
 
-               h = rcu_dereference_bh(instance_table[st->bucket].first);
+               h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
        }
        return h;
 }
index 68e67d19724d83f844c25a5aa0f603156421f57e..b83123f12b42e30612481796bc6c9a4d3bacab60 100644 (file)
@@ -387,25 +387,31 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 {
        struct sk_buff *nskb;
        struct nfqnl_instance *queue;
-       int err;
+       int err = -ENOBUFS;
 
        /* rcu_read_lock()ed by nf_hook_slow() */
        queue = instance_lookup(queuenum);
-       if (!queue)
+       if (!queue) {
+               err = -ESRCH;
                goto err_out;
+       }
 
-       if (queue->copy_mode == NFQNL_COPY_NONE)
+       if (queue->copy_mode == NFQNL_COPY_NONE) {
+               err = -EINVAL;
                goto err_out;
+       }
 
        nskb = nfqnl_build_packet_message(queue, entry);
-       if (nskb == NULL)
+       if (nskb == NULL) {
+               err = -ENOMEM;
                goto err_out;
-
+       }
        spin_lock_bh(&queue->lock);
 
-       if (!queue->peer_pid)
+       if (!queue->peer_pid) {
+               err = -EINVAL;
                goto err_out_free_nskb;
-
+       }
        if (queue->queue_total >= queue->queue_maxlen) {
                queue->queue_dropped++;
                if (net_ratelimit())
@@ -432,7 +438,7 @@ err_out_free_nskb:
 err_out_unlock:
        spin_unlock_bh(&queue->lock);
 err_out:
-       return -1;
+       return err;
 }
 
 static int
index c94237631077242a624bcab29326dd548f7df4ee..0a77d2ff21543937ba00d6ee09230c9124f7ed5b 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/audit.h>
 #include <net/net_namespace.h>
 
 #include <linux/netfilter/x_tables.h>
@@ -38,9 +39,8 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
 
 struct compat_delta {
-       struct compat_delta *next;
-       unsigned int offset;
-       int delta;
+       unsigned int offset; /* offset in kernel */
+       int delta; /* delta in 32bit user land */
 };
 
 struct xt_af {
@@ -49,7 +49,9 @@ struct xt_af {
        struct list_head target;
 #ifdef CONFIG_COMPAT
        struct mutex compat_mutex;
-       struct compat_delta *compat_offsets;
+       struct compat_delta *compat_tab;
+       unsigned int number; /* number of slots in compat_tab[] */
+       unsigned int cur; /* number of used slots in compat_tab[] */
 #endif
 };
 
@@ -414,54 +416,67 @@ int xt_check_match(struct xt_mtchk_param *par,
 EXPORT_SYMBOL_GPL(xt_check_match);
 
 #ifdef CONFIG_COMPAT
-int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 {
-       struct compat_delta *tmp;
+       struct xt_af *xp = &xt[af];
 
-       tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
-       if (!tmp)
-               return -ENOMEM;
+       if (!xp->compat_tab) {
+               if (!xp->number)
+                       return -EINVAL;
+               xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+               if (!xp->compat_tab)
+                       return -ENOMEM;
+               xp->cur = 0;
+       }
 
-       tmp->offset = offset;
-       tmp->delta = delta;
+       if (xp->cur >= xp->number)
+               return -EINVAL;
 
-       if (xt[af].compat_offsets) {
-               tmp->next = xt[af].compat_offsets->next;
-               xt[af].compat_offsets->next = tmp;
-       } else {
-               xt[af].compat_offsets = tmp;
-               tmp->next = NULL;
-       }
+       if (xp->cur)
+               delta += xp->compat_tab[xp->cur - 1].delta;
+       xp->compat_tab[xp->cur].offset = offset;
+       xp->compat_tab[xp->cur].delta = delta;
+       xp->cur++;
        return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 
 void xt_compat_flush_offsets(u_int8_t af)
 {
-       struct compat_delta *tmp, *next;
-
-       if (xt[af].compat_offsets) {
-               for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
-                       next = tmp->next;
-                       kfree(tmp);
-               }
-               xt[af].compat_offsets = NULL;
+       if (xt[af].compat_tab) {
+               vfree(xt[af].compat_tab);
+               xt[af].compat_tab = NULL;
+               xt[af].number = 0;
        }
 }
 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 
 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 {
-       struct compat_delta *tmp;
-       int delta;
-
-       for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
-               if (tmp->offset < offset)
-                       delta += tmp->delta;
-       return delta;
+       struct compat_delta *tmp = xt[af].compat_tab;
+       int mid, left = 0, right = xt[af].cur - 1;
+
+       while (left <= right) {
+               mid = (left + right) >> 1;
+               if (offset > tmp[mid].offset)
+                       left = mid + 1;
+               else if (offset < tmp[mid].offset)
+                       right = mid - 1;
+               else
+                       return mid ? tmp[mid - 1].delta : 0;
+       }
+       WARN_ON_ONCE(1);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 
+void xt_compat_init_offsets(u_int8_t af, unsigned int number)
+{
+       xt[af].number = number;
+       xt[af].cur = 0;
+}
+EXPORT_SYMBOL(xt_compat_init_offsets);
+
 int xt_compat_match_offset(const struct xt_match *match)
 {
        u_int16_t csize = match->compatsize ? : match->matchsize;
@@ -820,6 +835,21 @@ xt_replace_table(struct xt_table *table,
         */
        local_bh_enable();
 
+#ifdef CONFIG_AUDIT
+       if (audit_enabled) {
+               struct audit_buffer *ab;
+
+               ab = audit_log_start(current->audit_context, GFP_KERNEL,
+                                    AUDIT_NETFILTER_CFG);
+               if (ab) {
+                       audit_log_format(ab, "table=%s family=%u entries=%u",
+                                        table->name, table->af,
+                                        private->number);
+                       audit_log_end(ab);
+               }
+       }
+#endif
+
        return private;
 }
 EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1338,7 +1368,7 @@ static int __init xt_init(void)
                mutex_init(&xt[i].mutex);
 #ifdef CONFIG_COMPAT
                mutex_init(&xt[i].compat_mutex);
-               xt[i].compat_offsets = NULL;
+               xt[i].compat_tab = NULL;
 #endif
                INIT_LIST_HEAD(&xt[i].target);
                INIT_LIST_HEAD(&xt[i].match);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
new file mode 100644 (file)
index 0000000..81802d2
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * Creates audit record for dropped/accepted packets
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_AUDIT.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
+MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
+MODULE_ALIAS("ipt_AUDIT");
+MODULE_ALIAS("ip6t_AUDIT");
+MODULE_ALIAS("ebt_AUDIT");
+MODULE_ALIAS("arpt_AUDIT");
+
+static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb,
+                       unsigned int proto, unsigned int offset)
+{
+       switch (proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE: {
+               const __be16 *pptr;
+               __be16 _ports[2];
+
+               pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports);
+               if (pptr == NULL) {
+                       audit_log_format(ab, " truncated=1");
+                       return;
+               }
+
+               audit_log_format(ab, " sport=%hu dport=%hu",
+                                ntohs(pptr[0]), ntohs(pptr[1]));
+               }
+               break;
+
+       case IPPROTO_ICMP:
+       case IPPROTO_ICMPV6: {
+               const u8 *iptr;
+               u8 _ih[2];
+
+               iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih);
+               if (iptr == NULL) {
+                       audit_log_format(ab, " truncated=1");
+                       return;
+               }
+
+               audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu",
+                                iptr[0], iptr[1]);
+
+               }
+               break;
+       }
+}
+
+static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
+{
+       struct iphdr _iph;
+       const struct iphdr *ih;
+
+       ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+       if (!ih) {
+               audit_log_format(ab, " truncated=1");
+               return;
+       }
+
+       audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu",
+               &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol);
+
+       if (ntohs(ih->frag_off) & IP_OFFSET) {
+               audit_log_format(ab, " frag=1");
+               return;
+       }
+
+       audit_proto(ab, skb, ih->protocol, ih->ihl * 4);
+}
+
+static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
+{
+       struct ipv6hdr _ip6h;
+       const struct ipv6hdr *ih;
+       u8 nexthdr;
+       int offset;
+
+       ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
+       if (!ih) {
+               audit_log_format(ab, " truncated=1");
+               return;
+       }
+
+       nexthdr = ih->nexthdr;
+       offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
+                                 &nexthdr);
+
+       audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
+                        &ih->saddr, &ih->daddr, nexthdr);
+
+       if (offset)
+               audit_proto(ab, skb, nexthdr, offset);
+}
+
+static unsigned int
+audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct xt_audit_info *info = par->targinfo;
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
+       if (ab == NULL)
+               goto errout;
+
+       audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s",
+                        info->type, par->hooknum, skb->len,
+                        par->in ? par->in->name : "?",
+                        par->out ? par->out->name : "?");
+
+       if (skb->mark)
+               audit_log_format(ab, " mark=%#x", skb->mark);
+
+       if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+               audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x",
+                                eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+                                ntohs(eth_hdr(skb)->h_proto));
+
+               if (par->family == NFPROTO_BRIDGE) {
+                       switch (eth_hdr(skb)->h_proto) {
+                       case __constant_htons(ETH_P_IP):
+                               audit_ip4(ab, skb);
+                               break;
+
+                       case __constant_htons(ETH_P_IPV6):
+                               audit_ip6(ab, skb);
+                               break;
+                       }
+               }
+       }
+
+       switch (par->family) {
+       case NFPROTO_IPV4:
+               audit_ip4(ab, skb);
+               break;
+
+       case NFPROTO_IPV6:
+               audit_ip6(ab, skb);
+               break;
+       }
+
+       audit_log_end(ab);
+
+errout:
+       return XT_CONTINUE;
+}
+
+static int audit_tg_check(const struct xt_tgchk_param *par)
+{
+       const struct xt_audit_info *info = par->targinfo;
+
+       if (info->type > XT_AUDIT_TYPE_MAX) {
+               pr_info("Audit type out of range (valid range: 0..%hhu)\n",
+                       XT_AUDIT_TYPE_MAX);
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static struct xt_target audit_tg_reg __read_mostly = {
+       .name           = "AUDIT",
+       .family         = NFPROTO_UNSPEC,
+       .target         = audit_tg,
+       .targetsize     = sizeof(struct xt_audit_info),
+       .checkentry     = audit_tg_check,
+       .me             = THIS_MODULE,
+};
+
+static int __init audit_tg_init(void)
+{
+       return xt_register_target(&audit_tg_reg);
+}
+
+static void __exit audit_tg_exit(void)
+{
+       xt_unregister_target(&audit_tg_reg);
+}
+
+module_init(audit_tg_init);
+module_exit(audit_tg_exit);
index c2c0e4abeb996fe689ebac8e964a9aa11163ce9f..af9c4dadf8165922af9ee23b02abc48047496a87 100644 (file)
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CLASSIFY.h>
+#include <linux/netfilter_arp.h>
 
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Xtables: Qdisc classification");
 MODULE_ALIAS("ipt_CLASSIFY");
 MODULE_ALIAS("ip6t_CLASSIFY");
+MODULE_ALIAS("arpt_CLASSIFY");
 
 static unsigned int
 classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -35,26 +37,36 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static struct xt_target classify_tg_reg __read_mostly = {
-       .name       = "CLASSIFY",
-       .revision   = 0,
-       .family     = NFPROTO_UNSPEC,
-       .table      = "mangle",
-       .hooks      = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
-                     (1 << NF_INET_POST_ROUTING),
-       .target     = classify_tg,
-       .targetsize = sizeof(struct xt_classify_target_info),
-       .me         = THIS_MODULE,
+static struct xt_target classify_tg_reg[] __read_mostly = {
+       {
+               .name       = "CLASSIFY",
+               .revision   = 0,
+               .family     = NFPROTO_UNSPEC,
+               .hooks      = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+                             (1 << NF_INET_POST_ROUTING),
+               .target     = classify_tg,
+               .targetsize = sizeof(struct xt_classify_target_info),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "CLASSIFY",
+               .revision   = 0,
+               .family     = NFPROTO_ARP,
+               .hooks      = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
+               .target     = classify_tg,
+               .targetsize = sizeof(struct xt_classify_target_info),
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init classify_tg_init(void)
 {
-       return xt_register_target(&classify_tg_reg);
+       return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
 }
 
 static void __exit classify_tg_exit(void)
 {
-       xt_unregister_target(&classify_tg_reg);
+       xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
 }
 
 module_init(classify_tg_init);
index be1f22e13545055601a7feca73a6085361216d82..3bdd443aaf154d7946abdd802bd080df4e2c8fad 100644 (file)
@@ -313,3 +313,5 @@ MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_DESCRIPTION("Xtables: idle time monitor");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ipt_IDLETIMER");
+MODULE_ALIAS("ip6t_IDLETIMER");
index a4140509eea1f3e821bd6ffb56b5b747dfd8c9fc..993de2ba89d33bb3fa532ded0952bfb83de63836 100644 (file)
@@ -31,6 +31,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
 MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+MODULE_ALIAS("ipt_LED");
+MODULE_ALIAS("ip6t_LED");
 
 static LIST_HEAD(xt_led_triggers);
 static DEFINE_MUTEX(xt_led_mutex);
index 039cce1bde3dc60ccdbb3e6857bb2685dc123079..d4f4b5d66b2075cb816421387a2e0f6f78bc01f8 100644 (file)
@@ -72,18 +72,31 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
 
        if (info->queues_total > 1) {
                if (par->family == NFPROTO_IPV4)
-                       queue = hash_v4(skb) % info->queues_total + queue;
+                       queue = (((u64) hash_v4(skb) * info->queues_total) >>
+                                32) + queue;
 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
                else if (par->family == NFPROTO_IPV6)
-                       queue = hash_v6(skb) % info->queues_total + queue;
+                       queue = (((u64) hash_v6(skb) * info->queues_total) >>
+                                32) + queue;
 #endif
        }
        return NF_QUEUE_NR(queue);
 }
 
-static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static unsigned int
+nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       const struct xt_NFQ_info_v1 *info = par->targinfo;
+       const struct xt_NFQ_info_v2 *info = par->targinfo;
+       unsigned int ret = nfqueue_tg_v1(skb, par);
+
+       if (info->bypass)
+               ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+       return ret;
+}
+
+static int nfqueue_tg_check(const struct xt_tgchk_param *par)
+{
+       const struct xt_NFQ_info_v2 *info = par->targinfo;
        u32 maxid;
 
        if (unlikely(!rnd_inited)) {
@@ -100,6 +113,8 @@ static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
                       info->queues_total, maxid);
                return -ERANGE;
        }
+       if (par->target->revision == 2 && info->bypass > 1)
+               return -EINVAL;
        return 0;
 }
 
@@ -115,11 +130,20 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
                .name           = "NFQUEUE",
                .revision       = 1,
                .family         = NFPROTO_UNSPEC,
-               .checkentry     = nfqueue_tg_v1_check,
+               .checkentry     = nfqueue_tg_check,
                .target         = nfqueue_tg_v1,
                .targetsize     = sizeof(struct xt_NFQ_info_v1),
                .me             = THIS_MODULE,
        },
+       {
+               .name           = "NFQUEUE",
+               .revision       = 2,
+               .family         = NFPROTO_UNSPEC,
+               .checkentry     = nfqueue_tg_check,
+               .target         = nfqueue_tg_v2,
+               .targetsize     = sizeof(struct xt_NFQ_info_v2),
+               .me             = THIS_MODULE,
+       },
 };
 
 static int __init nfqueue_tg_init(void)
index 5128a6c4cb2cd9011b87131ad334e762d1dc7d2f..624725b5286f10ee7c05af2529ea149010a64d60 100644 (file)
@@ -73,7 +73,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
        fl.fl4_dst = info->gw.ip;
        fl.fl4_tos = RT_TOS(iph->tos);
        fl.fl4_scope = RT_SCOPE_UNIVERSE;
-       if (ip_route_output_key(net, &rt, &fl) != 0)
+       rt = ip_route_output_key(net, &fl);
+       if (IS_ERR(rt))
                return false;
 
        skb_dst_drop(skb);
index 5c5b6b921b845b2fe1ba438ef9c13a2161e5b0ea..e029c4807404f3a4c5644cff7e9f8ef2618d1e7f 100644 (file)
@@ -185,18 +185,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
        int connections;
 
        ct = nf_ct_get(skb, &ctinfo);
-       if (ct != NULL)
-               tuple_ptr = &ct->tuplehash[0].tuple;
-       else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
-                                   par->family, &tuple))
+       if (ct != NULL) {
+               if (info->flags & XT_CONNLIMIT_DADDR)
+                       tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+               else
+                       tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+       } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+                                   par->family, &tuple)) {
                goto hotdrop;
+       }
 
        if (par->family == NFPROTO_IPV6) {
                const struct ipv6hdr *iph = ipv6_hdr(skb);
-               memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
+               memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
+                      &iph->daddr : &iph->saddr, sizeof(addr.ip6));
        } else {
                const struct iphdr *iph = ip_hdr(skb);
-               addr.ip = iph->saddr;
+               addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+                         iph->daddr : iph->saddr;
        }
 
        spin_lock_bh(&info->data->lock);
@@ -204,13 +210,12 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
                                 &info->mask, par->family);
        spin_unlock_bh(&info->data->lock);
 
-       if (connections < 0) {
+       if (connections < 0)
                /* kmalloc failed, drop it entirely */
-               par->hotdrop = true;
-               return false;
-       }
+               goto hotdrop;
 
-       return (connections > info->limit) ^ info->inverse;
+       return (connections > info->limit) ^
+              !!(info->flags & XT_CONNLIMIT_INVERT);
 
  hotdrop:
        par->hotdrop = true;
@@ -268,25 +273,38 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
        kfree(info->data);
 }
 
-static struct xt_match connlimit_mt_reg __read_mostly = {
-       .name       = "connlimit",
-       .revision   = 0,
-       .family     = NFPROTO_UNSPEC,
-       .checkentry = connlimit_mt_check,
-       .match      = connlimit_mt,
-       .matchsize  = sizeof(struct xt_connlimit_info),
-       .destroy    = connlimit_mt_destroy,
-       .me         = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+       {
+               .name       = "connlimit",
+               .revision   = 0,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = connlimit_mt_check,
+               .match      = connlimit_mt,
+               .matchsize  = sizeof(struct xt_connlimit_info),
+               .destroy    = connlimit_mt_destroy,
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "connlimit",
+               .revision   = 1,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = connlimit_mt_check,
+               .match      = connlimit_mt,
+               .matchsize  = sizeof(struct xt_connlimit_info),
+               .destroy    = connlimit_mt_destroy,
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init connlimit_mt_init(void)
 {
-       return xt_register_match(&connlimit_mt_reg);
+       return xt_register_matches(connlimit_mt_reg,
+              ARRAY_SIZE(connlimit_mt_reg));
 }
 
 static void __exit connlimit_mt_exit(void)
 {
-       xt_unregister_match(&connlimit_mt_reg);
+       xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
 }
 
 module_init(connlimit_mt_init);
index e536710ad916246a4f558eaad02092d4c81168df..2c0086a4751e3a846d517236869d52a5ae6f548a 100644 (file)
@@ -112,6 +112,54 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
        return true;
 }
 
+static inline bool
+port_match(u16 min, u16 max, u16 port, bool invert)
+{
+       return (port >= min && port <= max) ^ invert;
+}
+
+static inline bool
+ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
+                      const struct nf_conn *ct)
+{
+       const struct nf_conntrack_tuple *tuple;
+
+       tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+       if ((info->match_flags & XT_CONNTRACK_PROTO) &&
+           (nf_ct_protonum(ct) == info->l4proto) ^
+           !(info->invert_flags & XT_CONNTRACK_PROTO))
+               return false;
+
+       /* Shortcut to match all recognized protocols by using ->src.all. */
+       if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
+           !port_match(info->origsrc_port, info->origsrc_port_high,
+                       ntohs(tuple->src.u.all),
+                       info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
+               return false;
+
+       if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
+           !port_match(info->origdst_port, info->origdst_port_high,
+                       ntohs(tuple->dst.u.all),
+                       info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
+               return false;
+
+       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+       if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
+           !port_match(info->replsrc_port, info->replsrc_port_high,
+                       ntohs(tuple->src.u.all),
+                       info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
+               return false;
+
+       if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
+           !port_match(info->repldst_port, info->repldst_port_high,
+                       ntohs(tuple->dst.u.all),
+                       info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
+               return false;
+
+       return true;
+}
+
 static bool
 conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
              u16 state_mask, u16 status_mask)
@@ -170,8 +218,13 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
                    !(info->invert_flags & XT_CONNTRACK_REPLDST))
                        return false;
 
-       if (!ct_proto_port_check(info, ct))
-               return false;
+       if (par->match->revision != 3) {
+               if (!ct_proto_port_check(info, ct))
+                       return false;
+       } else {
+               if (!ct_proto_port_check_v3(par->matchinfo, ct))
+                       return false;
+       }
 
        if ((info->match_flags & XT_CONNTRACK_STATUS) &&
            (!!(status_mask & ct->status) ^
@@ -207,10 +260,23 @@ conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
        return conntrack_mt(skb, par, info->state_mask, info->status_mask);
 }
 
+static bool
+conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
+
+       return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+}
+
 static int conntrack_mt_check(const struct xt_mtchk_param *par)
 {
        int ret;
 
+       if (strcmp(par->table, "raw") == 0) {
+               pr_info("state is undetermined at the time of raw table\n");
+               return -EINVAL;
+       }
+
        ret = nf_ct_l3proto_try_module_get(par->family);
        if (ret < 0)
                pr_info("cannot load conntrack support for proto=%u\n",
@@ -244,6 +310,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
                .destroy    = conntrack_mt_destroy,
                .me         = THIS_MODULE,
        },
+       {
+               .name       = "conntrack",
+               .revision   = 3,
+               .family     = NFPROTO_UNSPEC,
+               .matchsize  = sizeof(struct xt_conntrack_mtinfo3),
+               .match      = conntrack_mt_v3,
+               .checkentry = conntrack_mt_check,
+               .destroy    = conntrack_mt_destroy,
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init conntrack_mt_init(void)
index b39db8a5cbae8bb36618880effef34aaff1dc606..c7a2e5466bc47013887a40684b19e189edc5e994 100644 (file)
@@ -22,6 +22,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
 MODULE_DESCRIPTION("Xtables: CPU match");
+MODULE_ALIAS("ipt_cpu");
+MODULE_ALIAS("ip6t_cpu");
 
 static int cpu_mt_check(const struct xt_mtchk_param *par)
 {
diff --git a/net/netfilter/xt_devgroup.c b/net/netfilter/xt_devgroup.c
new file mode 100644 (file)
index 0000000..d9202cd
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/netfilter/xt_devgroup.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: Device group match");
+MODULE_ALIAS("ipt_devgroup");
+MODULE_ALIAS("ip6t_devgroup");
+
+static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_devgroup_info *info = par->matchinfo;
+
+       if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+           (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^
+            ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
+               return false;
+
+       if (info->flags & XT_DEVGROUP_MATCH_DST &&
+           (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^
+            ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
+               return false;
+
+       return true;
+}
+
+static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
+{
+       const struct xt_devgroup_info *info = par->matchinfo;
+
+       if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
+                           XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
+               return -EINVAL;
+
+       if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+           par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+                              (1 << NF_INET_LOCAL_IN) |
+                              (1 << NF_INET_FORWARD)))
+               return -EINVAL;
+
+       if (info->flags & XT_DEVGROUP_MATCH_DST &&
+           par->hook_mask & ~((1 << NF_INET_FORWARD) |
+                              (1 << NF_INET_LOCAL_OUT) |
+                              (1 << NF_INET_POST_ROUTING)))
+               return -EINVAL;
+
+       return 0;
+}
+
+static struct xt_match devgroup_mt_reg __read_mostly = {
+       .name           = "devgroup",
+       .match          = devgroup_mt,
+       .checkentry     = devgroup_mt_checkentry,
+       .matchsize      = sizeof(struct xt_devgroup_info),
+       .family         = NFPROTO_UNSPEC,
+       .me             = THIS_MODULE
+};
+
+static int __init devgroup_mt_init(void)
+{
+       return xt_register_match(&devgroup_mt_reg);
+}
+
+static void __exit devgroup_mt_exit(void)
+{
+       xt_unregister_match(&devgroup_mt_reg);
+}
+
+module_init(devgroup_mt_init);
+module_exit(devgroup_mt_exit);
index 73c33a42f87f8fddbe901b7403a9041182842f54..b46626cddd933022d796c2f4e0f54ddd2c1ae8e1 100644 (file)
@@ -31,7 +31,7 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
                        pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
                                 &iph->saddr,
                                 (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
-                                &info->src_max.ip,
+                                &info->src_min.ip,
                                 &info->src_max.ip);
                        return false;
                }
@@ -76,15 +76,27 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                m  = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
                m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
                m ^= !!(info->flags & IPRANGE_SRC_INV);
-               if (m)
+               if (m) {
+                       pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
+                                &iph->saddr,
+                                (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
+                                &info->src_min.in6,
+                                &info->src_max.in6);
                        return false;
+               }
        }
        if (info->flags & IPRANGE_DST) {
                m  = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
                m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
                m ^= !!(info->flags & IPRANGE_DST_INV);
-               if (m)
+               if (m) {
+                       pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
+                                &iph->daddr,
+                                (info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
+                                &info->dst_min.in6,
+                                &info->dst_max.in6);
                        return false;
+               }
        }
        return true;
 }
index 9127a3d8aa355d5ff94612bae755d69cb89fa402..bb10b0717f1bd1472411ce9b0892750722400bab 100644 (file)
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
        /*
         * Check if the packet belongs to an existing entry
         */
-       cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+       cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
        if (unlikely(cp == NULL)) {
                match = false;
                goto out;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
new file mode 100644 (file)
index 0000000..061d48c
--- /dev/null
@@ -0,0 +1,359 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                         Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module which implements the set match and SET target
+ * for netfilter/iptables. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_set.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("Xtables: IP set match and target module");
+MODULE_ALIAS("xt_SET");
+MODULE_ALIAS("ipt_set");
+MODULE_ALIAS("ip6t_set");
+MODULE_ALIAS("ipt_SET");
+MODULE_ALIAS("ip6t_SET");
+
+static inline int
+match_set(ip_set_id_t index, const struct sk_buff *skb,
+         u8 pf, u8 dim, u8 flags, int inv)
+{
+       if (ip_set_test(index, skb, pf, dim, flags))
+               inv = !inv;
+       return inv;
+}
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static bool
+set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match_v0 *info = par->matchinfo;
+
+       return match_set(info->match_set.index, skb, par->family,
+                        info->match_set.u.compat.dim,
+                        info->match_set.u.compat.flags,
+                        info->match_set.u.compat.flags & IPSET_INV_MATCH);
+}
+
+static void
+compat_flags(struct xt_set_info_v0 *info)
+{
+       u_int8_t i;
+
+       /* Fill out compatibility data according to enum ip_set_kopt */
+       info->u.compat.dim = IPSET_DIM_ZERO;
+       if (info->u.flags[0] & IPSET_MATCH_INV)
+               info->u.compat.flags |= IPSET_INV_MATCH;
+       for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+               info->u.compat.dim++;
+               if (info->u.flags[i] & IPSET_SRC)
+                       info->u.compat.flags |= (1<<info->u.compat.dim);
+       }
+}
+
+static int
+set_match_v0_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_set_info_match_v0 *info = par->matchinfo;
+       ip_set_id_t index;
+
+       index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+       if (index == IPSET_INVALID_ID) {
+               pr_warning("Cannot find set indentified by id %u to match\n",
+                          info->match_set.index);
+               return -ENOENT;
+       }
+       if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+               pr_warning("Protocol error: set match dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       /* Fill out compatibility data */
+       compat_flags(&info->match_set);
+
+       return 0;
+}
+
+static void
+set_match_v0_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_set_info_match_v0 *info = par->matchinfo;
+
+       ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct xt_set_info_target_v0 *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_add(info->add_set.index, skb, par->family,
+                          info->add_set.u.compat.dim,
+                          info->add_set.u.compat.flags);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_del(info->del_set.index, skb, par->family,
+                          info->del_set.u.compat.dim,
+                          info->del_set.u.compat.flags);
+
+       return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+       struct xt_set_info_target_v0 *info = par->targinfo;
+       ip_set_id_t index;
+
+       if (info->add_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find add_set index %u as target\n",
+                                  info->add_set.index);
+                       return -ENOENT;
+               }
+       }
+
+       if (info->del_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find del_set index %u as target\n",
+                                  info->del_set.index);
+                       return -ENOENT;
+               }
+       }
+       if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+           info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+               pr_warning("Protocol error: SET target dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       /* Fill out compatibility data */
+       compat_flags(&info->add_set);
+       compat_flags(&info->del_set);
+
+       return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+       const struct xt_set_info_target_v0 *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->add_set.index);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->del_set.index);
+}
+
+/* Revision 1: current interface to netfilter/iptables */
+
+static bool
+set_match(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match *info = par->matchinfo;
+
+       return match_set(info->match_set.index, skb, par->family,
+                        info->match_set.dim,
+                        info->match_set.flags,
+                        info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_set_info_match *info = par->matchinfo;
+       ip_set_id_t index;
+
+       index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+       if (index == IPSET_INVALID_ID) {
+               pr_warning("Cannot find set indentified by id %u to match\n",
+                          info->match_set.index);
+               return -ENOENT;
+       }
+       if (info->match_set.dim > IPSET_DIM_MAX) {
+               pr_warning("Protocol error: set match dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static void
+set_match_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_set_info_match *info = par->matchinfo;
+
+       ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct xt_set_info_target *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_add(info->add_set.index,
+                          skb, par->family,
+                          info->add_set.dim,
+                          info->add_set.flags);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_del(info->del_set.index,
+                          skb, par->family,
+                          info->add_set.dim,
+                          info->del_set.flags);
+
+       return XT_CONTINUE;
+}
+
+static int
+set_target_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct xt_set_info_target *info = par->targinfo;
+       ip_set_id_t index;
+
+       if (info->add_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find add_set index %u as target\n",
+                                  info->add_set.index);
+                       return -ENOENT;
+               }
+       }
+
+       if (info->del_set.index != IPSET_INVALID_ID) {
+               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               if (index == IPSET_INVALID_ID) {
+                       pr_warning("Cannot find del_set index %u as target\n",
+                                  info->del_set.index);
+                       return -ENOENT;
+               }
+       }
+       if (info->add_set.dim > IPSET_DIM_MAX ||
+           info->del_set.flags > IPSET_DIM_MAX) {
+               pr_warning("Protocol error: SET target dimension "
+                          "is over the limit!\n");
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static void
+set_target_destroy(const struct xt_tgdtor_param *par)
+{
+       const struct xt_set_info_target *info = par->targinfo;
+
+       if (info->add_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->add_set.index);
+       if (info->del_set.index != IPSET_INVALID_ID)
+               ip_set_nfnl_put(info->del_set.index);
+}
+
+static struct xt_match set_matches[] __read_mostly = {
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV4,
+               .revision       = 0,
+               .match          = set_match_v0,
+               .matchsize      = sizeof(struct xt_set_info_match_v0),
+               .checkentry     = set_match_v0_checkentry,
+               .destroy        = set_match_v0_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV4,
+               .revision       = 1,
+               .match          = set_match,
+               .matchsize      = sizeof(struct xt_set_info_match),
+               .checkentry     = set_match_checkentry,
+               .destroy        = set_match_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV6,
+               .revision       = 1,
+               .match          = set_match,
+               .matchsize      = sizeof(struct xt_set_info_match),
+               .checkentry     = set_match_checkentry,
+               .destroy        = set_match_destroy,
+               .me             = THIS_MODULE
+       },
+};
+
+static struct xt_target set_targets[] __read_mostly = {
+       {
+               .name           = "SET",
+               .revision       = 0,
+               .family         = NFPROTO_IPV4,
+               .target         = set_target_v0,
+               .targetsize     = sizeof(struct xt_set_info_target_v0),
+               .checkentry     = set_target_v0_checkentry,
+               .destroy        = set_target_v0_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "SET",
+               .revision       = 1,
+               .family         = NFPROTO_IPV4,
+               .target         = set_target,
+               .targetsize     = sizeof(struct xt_set_info_target),
+               .checkentry     = set_target_checkentry,
+               .destroy        = set_target_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "SET",
+               .revision       = 1,
+               .family         = NFPROTO_IPV6,
+               .target         = set_target,
+               .targetsize     = sizeof(struct xt_set_info_target),
+               .checkentry     = set_target_checkentry,
+               .destroy        = set_target_destroy,
+               .me             = THIS_MODULE
+       },
+};
+
+static int __init xt_set_init(void)
+{
+       int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
+
+       if (!ret) {
+               ret = xt_register_targets(set_targets,
+                                         ARRAY_SIZE(set_targets));
+               if (ret)
+                       xt_unregister_matches(set_matches,
+                                             ARRAY_SIZE(set_matches));
+       }
+       return ret;
+}
+
+static void __exit xt_set_fini(void)
+{
+       xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
+       xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
+}
+
+module_init(xt_set_init);
+module_exit(xt_set_fini);
index 6caef8b20611c55443fb1d4617f33cebfa42d835..f4fc4c9ad5670340f356bd854a89db1f93416e3f 100644 (file)
@@ -49,9 +49,9 @@
 static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
                                            struct netlbl_audit *audit_info)
 {
-       audit_info->secid = NETLINK_CB(skb).sid;
-       audit_info->loginuid = NETLINK_CB(skb).loginuid;
-       audit_info->sessionid = NETLINK_CB(skb).sessionid;
+       security_task_getsecid(current, &audit_info->secid);
+       audit_info->loginuid = audit_get_loginuid(current);
+       audit_info->sessionid = audit_get_sessionid(current);
 }
 
 /* NetLabel NETLINK I/O functions */
index 1f924595bdefd8e6f632563b29aebf670c566869..c8f35b5d2ee9b4398eb28a8fe3de02b9f532f947 100644 (file)
@@ -1362,17 +1362,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        NETLINK_CB(skb).pid     = nlk->pid;
        NETLINK_CB(skb).dst_group = dst_group;
-       NETLINK_CB(skb).loginuid = audit_get_loginuid(current);
-       NETLINK_CB(skb).sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &(NETLINK_CB(skb).sid));
        memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
 
-       /* What can I do? Netlink is asynchronous, so that
-          we will have to save current capabilities to
-          check them, when this message will be delivered
-          to corresponding kernel module.   --ANK (980802)
-        */
-
        err = -EFAULT;
        if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
                kfree_skb(skb);
index 91cb1d71f018d8b6b77c029dca1b99cb2855ef21..b5362e96022b84259739770ff87114ccbe743bae 100644 (file)
@@ -164,7 +164,6 @@ struct packet_mreq_max {
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
                int closing, int tx_ring);
 
-#define PGV_FROM_VMALLOC 1
 struct pgv {
        char *buffer;
 };
@@ -466,7 +465,7 @@ retry:
         */
 
        err = -EMSGSIZE;
-       if (len > dev->mtu + dev->hard_header_len)
+       if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
                goto out_unlock;
 
        if (!skb) {
@@ -497,6 +496,19 @@ retry:
                goto retry;
        }
 
+       if (len > (dev->mtu + dev->hard_header_len)) {
+               /* Earlier code assumed this would be a VLAN pkt,
+                * double-check this now that we have the actual
+                * packet in hand.
+                */
+               struct ethhdr *ehdr;
+               skb_reset_mac_header(skb);
+               ehdr = eth_hdr(skb);
+               if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+                       err = -EMSGSIZE;
+                       goto out_unlock;
+               }
+       }
 
        skb->protocol = proto;
        skb->dev = dev;
@@ -523,11 +535,11 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
 {
        struct sk_filter *filter;
 
-       rcu_read_lock_bh();
-       filter = rcu_dereference_bh(sk->sk_filter);
+       rcu_read_lock();
+       filter = rcu_dereference(sk->sk_filter);
        if (filter != NULL)
                res = sk_run_filter(skb, filter->insns);
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 
        return res;
 }
@@ -954,7 +966,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 
 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 {
-       struct socket *sock;
        struct sk_buff *skb;
        struct net_device *dev;
        __be16 proto;
@@ -966,8 +977,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        int len_sum = 0;
        int status = 0;
 
-       sock = po->sk.sk_socket;
-
        mutex_lock(&po->pg_vec_lock);
 
        err = -EBUSY;
@@ -1200,7 +1209,7 @@ static int packet_snd(struct socket *sock,
        }
 
        err = -EMSGSIZE;
-       if (!gso_type && (len > dev->mtu+reserve))
+       if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
                goto out_unlock;
 
        err = -ENOBUFS;
@@ -1225,6 +1234,20 @@ static int packet_snd(struct socket *sock,
        if (err < 0)
                goto out_free;
 
+       if (!gso_type && (len > dev->mtu + reserve)) {
+               /* Earlier code assumed this would be a VLAN pkt,
+                * double-check this now that we have the actual
+                * packet in hand.
+                */
+               struct ethhdr *ehdr;
+               skb_reset_mac_header(skb);
+               ehdr = eth_hdr(skb);
+               if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+                       err = -EMSGSIZE;
+                       goto out_free;
+               }
+       }
+
        skb->protocol = proto;
        skb->dev = dev;
        skb->priority = sk->sk_priority;
index 0d9b8a220a7871a5bed180567a50036a1520d920..6ec7d55b1769933f399af5f4673b95c2de83150c 100644 (file)
@@ -14,15 +14,3 @@ config PHONET
 
          To compile this driver as a module, choose M here: the module
          will be called phonet. If unsure, say N.
-
-config PHONET_PIPECTRLR
-       bool "Phonet Pipe Controller (EXPERIMENTAL)"
-       depends on PHONET && EXPERIMENTAL
-       default N
-       help
-         The Pipe Controller implementation in Phonet stack to support Pipe
-         data with Nokia Slim modems like WG2.5 used on ST-Ericsson U8500
-         platform.
-
-         This option is incompatible with older Nokia modems.
-         Say N here unless you really know what you are doing.
index 1072b2c19d31d1ac87b4a546c6333fc35c756891..c6fffd946d42e4dc44b818a7b194b3a92274aa2e 100644 (file)
@@ -110,6 +110,7 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        pn = pn_sk(sk);
        pn->sobject = 0;
+       pn->dobject = 0;
        pn->resource = 0;
        sk->sk_prot->init(sk);
        err = 0;
@@ -194,11 +195,7 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
        if (skb->pkt_type == PACKET_LOOPBACK) {
                skb_reset_mac_header(skb);
                skb_orphan(skb);
-               if (irq)
-                       netif_rx(skb);
-               else
-                       netif_rx_ni(skb);
-               err = 0;
+               err = (irq ? netif_rx(skb) : netif_rx_ni(skb)) ? -ENOBUFS : 0;
        } else {
                err = dev_hard_header(skb, dev, ntohs(skb->protocol),
                                        NULL, NULL, skb->len);
@@ -207,6 +204,8 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
                        goto drop;
                }
                err = dev_queue_xmit(skb);
+               if (unlikely(err > 0))
+                       err = net_xmit_errno(err);
        }
 
        return err;
@@ -242,8 +241,18 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        struct net_device *dev;
        struct pn_sock *pn = pn_sk(sk);
        int err;
-       u16 src;
-       u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR;
+       u16 src, dst;
+       u8 daddr, saddr, res;
+
+       src = pn->sobject;
+       if (target != NULL) {
+               dst = pn_sockaddr_get_object(target);
+               res = pn_sockaddr_get_resource(target);
+       } else {
+               dst = pn->dobject;
+               res = pn->resource;
+       }
+       daddr = pn_addr(dst);
 
        err = -EHOSTUNREACH;
        if (sk->sk_bound_dev_if)
@@ -251,10 +260,9 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        else if (phonet_address_lookup(net, daddr) == 0) {
                dev = phonet_device_get(net);
                skb->pkt_type = PACKET_LOOPBACK;
-       } else if (pn_sockaddr_get_object(target) == 0) {
+       } else if (dst == 0) {
                /* Resource routing (small race until phonet_rcv()) */
-               struct sock *sk = pn_find_sock_by_res(net,
-                                                       target->spn_resource);
+               struct sock *sk = pn_find_sock_by_res(net, res);
                if (sk) {
                        sock_put(sk);
                        dev = phonet_device_get(net);
@@ -271,12 +279,10 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
        if (saddr == PN_NO_ADDR)
                goto drop;
 
-       src = pn->sobject;
        if (!pn_addr(src))
                src = pn_object(saddr, pn_obj(src));
 
-       err = pn_send(skb, dev, pn_sockaddr_get_object(target),
-                       src, pn_sockaddr_get_resource(target), 0);
+       err = pn_send(skb, dev, dst, src, res, 0);
        dev_put(dev);
        return err;
 
index 3e60f2e4e6c2d1e5982b2a56aed64fc38aa49259..68e635f11de8958f08f46909fab96bbc0ea4d004 100644 (file)
@@ -42,7 +42,7 @@
  * TCP_ESTABLISHED     connected pipe in enabled state
  *
  * pep_sock locking:
- *  - sk_state, ackq, hlist: sock lock needed
+ *  - sk_state, hlist: sock lock needed
  *  - listener: read only
  *  - pipe_handle: read only
  */
 #define CREDITS_MAX    10
 #define CREDITS_THR    7
 
-static const struct sockaddr_pn pipe_srv = {
-       .spn_family = AF_PHONET,
-       .spn_resource = 0xD9, /* pipe service */
-};
-
 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
 
 /* Get the next TLV sub-block. */
@@ -82,236 +77,95 @@ static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
        return data;
 }
 
-static int pep_reply(struct sock *sk, struct sk_buff *oskb,
-                       u8 code, const void *data, int len, gfp_t priority)
+static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
+                                       int len, gfp_t priority)
 {
-       const struct pnpipehdr *oph = pnp_hdr(oskb);
-       struct pnpipehdr *ph;
-       struct sk_buff *skb;
-
-       skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+       struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
        if (!skb)
-               return -ENOMEM;
+               return NULL;
        skb_set_owner_w(skb, sk);
 
        skb_reserve(skb, MAX_PNPIPE_HEADER);
        __skb_put(skb, len);
-       skb_copy_to_linear_data(skb, data, len);
-       __skb_push(skb, sizeof(*ph));
+       skb_copy_to_linear_data(skb, payload, len);
+       __skb_push(skb, sizeof(struct pnpipehdr));
        skb_reset_transport_header(skb);
-       ph = pnp_hdr(skb);
-       ph->utid = oph->utid;
-       ph->message_id = oph->message_id + 1; /* REQ -> RESP */
-       ph->pipe_handle = oph->pipe_handle;
-       ph->error_code = code;
-
-       return pn_skb_send(sk, skb, &pipe_srv);
-}
-
-#define PAD 0x00
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-static u8 pipe_negotiate_fc(u8 *host_fc, u8 *remote_fc, int len)
-{
-       int i, j;
-       u8 base_fc, final_fc;
-
-       for (i = 0; i < len; i++) {
-               base_fc = host_fc[i];
-               for (j = 0; j < len; j++) {
-                       if (remote_fc[j] == base_fc) {
-                               final_fc = base_fc;
-                               goto done;
-                       }
-               }
-       }
-       return -EINVAL;
-
-done:
-       return final_fc;
-
-}
-
-static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb,
-               u8 *pref_rx_fc, u8 *req_tx_fc)
-{
-       struct pnpipehdr *hdr;
-       u8 n_sb;
-
-       if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
-               return -EINVAL;
-
-       hdr = pnp_hdr(skb);
-       n_sb = hdr->data[4];
-
-       __skb_pull(skb, sizeof(*hdr) + 4);
-       while (n_sb > 0) {
-               u8 type, buf[3], len = sizeof(buf);
-               u8 *data = pep_get_sb(skb, &type, &len, buf);
-
-               if (data == NULL)
-                       return -EINVAL;
-
-               switch (type) {
-               case PN_PIPE_SB_REQUIRED_FC_TX:
-                       if (len < 3 || (data[2] | data[3] | data[4]) > 3)
-                               break;
-                       req_tx_fc[0] = data[2];
-                       req_tx_fc[1] = data[3];
-                       req_tx_fc[2] = data[4];
-                       break;
-
-               case PN_PIPE_SB_PREFERRED_FC_RX:
-                       if (len < 3 || (data[2] | data[3] | data[4]) > 3)
-                               break;
-                       pref_rx_fc[0] = data[2];
-                       pref_rx_fc[1] = data[3];
-                       pref_rx_fc[2] = data[4];
-                       break;
-
-               }
-               n_sb--;
-       }
-       return 0;
+       return skb;
 }
 
-static int pipe_handler_send_req(struct sock *sk, u8 utid,
-               u8 msg_id, gfp_t priority)
+static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
+                       const void *data, int len, gfp_t priority)
 {
-       int len;
+       const struct pnpipehdr *oph = pnp_hdr(oskb);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
-       struct pep_sock *pn = pep_sk(sk);
-
-       static const u8 data[4] = {
-               PAD, PAD, PAD, PAD,
-       };
+       struct sockaddr_pn peer;
 
-       switch (msg_id) {
-       case PNS_PEP_CONNECT_REQ:
-               len = sizeof(data);
-               break;
-
-       case PNS_PEP_DISCONNECT_REQ:
-       case PNS_PEP_ENABLE_REQ:
-       case PNS_PEP_DISABLE_REQ:
-               len = 0;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+       skb = pep_alloc_skb(sk, data, len, priority);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       if (len) {
-               __skb_put(skb, len);
-               skb_copy_to_linear_data(skb, data, len);
-       }
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
-       ph->utid = utid;
-       ph->message_id = msg_id;
-       ph->pipe_handle = pn->pipe_handle;
-       ph->error_code = PN_PIPE_NO_ERROR;
+       ph->utid = oph->utid;
+       ph->message_id = oph->message_id + 1; /* REQ -> RESP */
+       ph->pipe_handle = oph->pipe_handle;
+       ph->error_code = code;
 
-       return pn_skb_send(sk, skb, &pn->remote_pep);
+       pn_skb_get_src_sockaddr(oskb, &peer);
+       return pn_skb_send(sk, skb, &peer);
 }
 
-static int pipe_handler_send_created_ind(struct sock *sk,
-               u8 utid, u8 msg_id)
+static int pep_indicate(struct sock *sk, u8 id, u8 code,
+                       const void *data, int len, gfp_t priority)
 {
-       int err_code;
+       struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
 
-       struct pep_sock *pn = pep_sk(sk);
-       static u8 data[4] = {
-               0x03, 0x04,
-       };
-       data[2] = pn->tx_fc;
-       data[3] = pn->rx_fc;
-
-       /*
-        * actually, below is number of sub-blocks and not error code.
-        * Pipe_created_ind message format does not have any
-        * error code field. However, the Phonet stack will always send
-        * an error code as part of pnpipehdr. So, use that err_code to
-        * specify the number of sub-blocks.
-        */
-       err_code = 0x01;
-
-       skb = alloc_skb(MAX_PNPIPE_HEADER + sizeof(data), GFP_ATOMIC);
+       skb = pep_alloc_skb(sk, data, len, priority);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       __skb_put(skb, sizeof(data));
-       skb_copy_to_linear_data(skb, data, sizeof(data));
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
-       ph->utid = utid;
-       ph->message_id = msg_id;
+       ph->utid = 0;
+       ph->message_id = id;
        ph->pipe_handle = pn->pipe_handle;
-       ph->error_code = err_code;
-
-       return pn_skb_send(sk, skb, &pn->remote_pep);
+       ph->data[0] = code;
+       return pn_skb_send(sk, skb, NULL);
 }
 
-static int pipe_handler_send_ind(struct sock *sk, u8 utid, u8 msg_id)
+#define PAD 0x00
+
+static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
+                               const void *data, int len)
 {
-       int err_code;
+       struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
-       struct pep_sock *pn = pep_sk(sk);
-
-       /*
-        * actually, below is a filler.
-        * Pipe_enabled/disabled_ind message format does not have any
-        * error code field. However, the Phonet stack will always send
-        * an error code as part of pnpipehdr. So, use that err_code to
-        * specify the filler value.
-        */
-       err_code = 0x0;
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
+       skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
-       ph->utid = utid;
-       ph->message_id = msg_id;
+       ph->utid = id; /* whatever */
+       ph->message_id = id;
        ph->pipe_handle = pn->pipe_handle;
-       ph->error_code = err_code;
-
-       return pn_skb_send(sk, skb, &pn->remote_pep);
+       ph->data[0] = code;
+       return pn_skb_send(sk, skb, NULL);
 }
 
-static int pipe_handler_enable_pipe(struct sock *sk, int enable)
+static int pipe_handler_send_created_ind(struct sock *sk)
 {
-       int utid, req;
-
-       if (enable) {
-               utid = PNS_PIPE_ENABLE_UTID;
-               req = PNS_PEP_ENABLE_REQ;
-       } else {
-               utid = PNS_PIPE_DISABLE_UTID;
-               req = PNS_PEP_DISABLE_REQ;
-       }
-       return pipe_handler_send_req(sk, utid, req, GFP_ATOMIC);
+       struct pep_sock *pn = pep_sk(sk);
+       u8 data[4] = {
+               PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
+               pn->tx_fc, pn->rx_fc,
+       };
+
+       return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
+                               data, 4, GFP_ATOMIC);
 }
-#endif
 
 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
 {
@@ -334,11 +188,12 @@ static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
                                GFP_KERNEL);
 }
 
-static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code)
+static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
+                               gfp_t priority)
 {
        static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
        WARN_ON(code == PN_PIPE_NO_ERROR);
-       return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
+       return pep_reply(sk, skb, code, data, sizeof(data), priority);
 }
 
 /* Control requests are not sent by the pipe service and have a specific
@@ -350,23 +205,21 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
        struct sk_buff *skb;
        struct pnpipehdr *ph;
        struct sockaddr_pn dst;
+       u8 data[4] = {
+               oph->data[0], /* PEP type */
+               code, /* error code, at an unusual offset */
+               PAD, PAD,
+       };
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
+       skb = pep_alloc_skb(sk, data, 4, priority);
        if (!skb)
                return -ENOMEM;
-       skb_set_owner_w(skb, sk);
-
-       skb_reserve(skb, MAX_PHONET_HEADER);
-       ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4);
 
+       ph = pnp_hdr(skb);
        ph->utid = oph->utid;
        ph->message_id = PNS_PEP_CTRL_RESP;
        ph->pipe_handle = oph->pipe_handle;
        ph->data[0] = oph->data[1]; /* CTRL id */
-       ph->data[1] = oph->data[0]; /* PEP type */
-       ph->data[2] = code; /* error code, at an usual offset */
-       ph->data[3] = PAD;
-       ph->data[4] = PAD;
 
        pn_skb_get_src_sockaddr(oskb, &dst);
        return pn_skb_send(sk, skb, &dst);
@@ -374,38 +227,15 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
 
 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
 {
-       struct pep_sock *pn = pep_sk(sk);
-       struct pnpipehdr *ph;
-       struct sk_buff *skb;
+       u8 data[4] = { type, PAD, PAD, status };
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
-       if (!skb)
-               return -ENOMEM;
-       skb_set_owner_w(skb, sk);
-
-       skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
-       __skb_push(skb, sizeof(*ph) + 4);
-       skb_reset_transport_header(skb);
-       ph = pnp_hdr(skb);
-       ph->utid = 0;
-       ph->message_id = PNS_PEP_STATUS_IND;
-       ph->pipe_handle = pn->pipe_handle;
-       ph->pep_type = PN_PEP_TYPE_COMMON;
-       ph->data[1] = type;
-       ph->data[2] = PAD;
-       ph->data[3] = PAD;
-       ph->data[4] = status;
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-       return pn_skb_send(sk, skb, &pn->remote_pep);
-#else
-       return pn_skb_send(sk, skb, &pipe_srv);
-#endif
+       return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
+                               data, 4, priority);
 }
 
 /* Send our RX flow control information to the sender.
  * Socket must be locked. */
-static void pipe_grant_credits(struct sock *sk)
+static void pipe_grant_credits(struct sock *sk, gfp_t priority)
 {
        struct pep_sock *pn = pep_sk(sk);
 
@@ -415,16 +245,16 @@ static void pipe_grant_credits(struct sock *sk)
        case PN_LEGACY_FLOW_CONTROL: /* TODO */
                break;
        case PN_ONE_CREDIT_FLOW_CONTROL:
-               pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
-                               PEP_IND_READY, GFP_ATOMIC);
-               pn->rx_credits = 1;
+               if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
+                                       PEP_IND_READY, priority) == 0)
+                       pn->rx_credits = 1;
                break;
        case PN_MULTI_CREDIT_FLOW_CONTROL:
                if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
                        break;
                if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
                                        CREDITS_MAX - pn->rx_credits,
-                                       GFP_ATOMIC) == 0)
+                                       priority) == 0)
                        pn->rx_credits = CREDITS_MAX;
                break;
        }
@@ -522,7 +352,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        switch (hdr->message_id) {
        case PNS_PEP_CONNECT_REQ:
-               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
                break;
 
        case PNS_PEP_DISCONNECT_REQ:
@@ -532,35 +362,11 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                        sk->sk_state_change(sk);
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_DISCONNECT_RESP:
-               pn->pipe_state = PIPE_IDLE;
-               sk->sk_state = TCP_CLOSE;
-               break;
-#endif
-
        case PNS_PEP_ENABLE_REQ:
                /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_ENABLE_RESP:
-               pn->pipe_state = PIPE_ENABLED;
-               pipe_handler_send_ind(sk, PNS_PIPE_ENABLED_IND_UTID,
-                               PNS_PIPE_ENABLED_IND);
-
-               if (!pn_flow_safe(pn->tx_fc)) {
-                       atomic_set(&pn->tx_credits, 1);
-                       sk->sk_write_space(sk);
-               }
-               if (sk->sk_state == TCP_ESTABLISHED)
-                       break; /* Nothing to do */
-               sk->sk_state = TCP_ESTABLISHED;
-               pipe_grant_credits(sk);
-               break;
-#endif
-
        case PNS_PEP_RESET_REQ:
                switch (hdr->state_after_reset) {
                case PN_PIPE_DISABLE:
@@ -579,17 +385,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_DISABLE_RESP:
-               pn->pipe_state = PIPE_DISABLED;
-               atomic_set(&pn->tx_credits, 0);
-               pipe_handler_send_ind(sk, PNS_PIPE_DISABLED_IND_UTID,
-                               PNS_PIPE_DISABLED_IND);
-               sk->sk_state = TCP_SYN_RECV;
-               pn->rx_credits = 0;
-               break;
-#endif
-
        case PNS_PEP_CTRL_REQ:
                if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
                        atomic_inc(&sk->sk_drops);
@@ -607,7 +402,8 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                if (!pn_flow_safe(pn->rx_fc)) {
                        err = sock_queue_rcv_skb(sk, skb);
                        if (!err)
-                               return 0;
+                               return NET_RX_SUCCESS;
+                       err = -ENOBUFS;
                        break;
                }
 
@@ -645,7 +441,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                if (sk->sk_state == TCP_ESTABLISHED)
                        break; /* Nothing to do */
                sk->sk_state = TCP_ESTABLISHED;
-               pipe_grant_credits(sk);
+               pipe_grant_credits(sk, GFP_ATOMIC);
                break;
 
        case PNS_PIPE_DISABLED_IND:
@@ -660,7 +456,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
        }
 out:
        kfree_skb(skb);
-       return err;
+       return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
 
 queue:
        skb->dev = NULL;
@@ -669,7 +465,7 @@ queue:
        skb_queue_tail(queue, skb);
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_data_ready(sk, err);
-       return 0;
+       return NET_RX_SUCCESS;
 }
 
 /* Destroy connected sock. */
@@ -681,133 +477,126 @@ static void pipe_destruct(struct sock *sk)
        skb_queue_purge(&pn->ctrlreq_queue);
 }
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
 {
-       struct pep_sock *pn = pep_sk(sk);
-       u8 host_pref_rx_fc[3] = {3, 2, 1}, host_req_tx_fc[3] = {3, 2, 1};
-       u8 remote_pref_rx_fc[3], remote_req_tx_fc[3];
-       u8 negotiated_rx_fc, negotiated_tx_fc;
-       int ret;
-
-       pipe_get_flow_info(sk, skb, remote_pref_rx_fc,
-                       remote_req_tx_fc);
-       negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc,
-                       host_pref_rx_fc,
-                       sizeof(host_pref_rx_fc));
-       negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc,
-                       remote_pref_rx_fc,
-                       sizeof(host_pref_rx_fc));
-
-       pn->pipe_state = PIPE_DISABLED;
-       sk->sk_state = TCP_SYN_RECV;
-       sk->sk_backlog_rcv = pipe_do_rcv;
-       sk->sk_destruct = pipe_destruct;
-       pn->rx_credits = 0;
-       pn->rx_fc = negotiated_rx_fc;
-       pn->tx_fc = negotiated_tx_fc;
-       sk->sk_state_change(sk);
+       unsigned i;
+       u8 final_fc = PN_NO_FLOW_CONTROL;
 
-       ret = pipe_handler_send_created_ind(sk,
-                       PNS_PIPE_CREATED_IND_UTID,
-                       PNS_PIPE_CREATED_IND
-                       );
+       for (i = 0; i < n; i++) {
+               u8 fc = fcs[i];
 
-       return ret;
+               if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
+                       final_fc = fc;
+       }
+       return final_fc;
 }
-#endif
 
-static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
+static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
 {
-       struct sock *newsk;
-       struct pep_sock *newpn, *pn = pep_sk(sk);
+       struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *hdr;
-       struct sockaddr_pn dst;
-       u16 peer_type;
-       u8 pipe_handle, enabled, n_sb;
-       u8 aligned = 0;
+       u8 n_sb;
 
        if (!pskb_pull(skb, sizeof(*hdr) + 4))
                return -EINVAL;
 
        hdr = pnp_hdr(skb);
-       pipe_handle = hdr->pipe_handle;
-       switch (hdr->state_after_connect) {
-       case PN_PIPE_DISABLE:
-               enabled = 0;
-               break;
-       case PN_PIPE_ENABLE:
-               enabled = 1;
-               break;
-       default:
-               pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
-               return -EINVAL;
-       }
-       peer_type = hdr->other_pep_type << 8;
-
-       if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
-               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
-               return -ENOBUFS;
-       }
+       if (hdr->error_code != PN_PIPE_NO_ERROR)
+               return -ECONNREFUSED;
 
-       /* Parse sub-blocks (options) */
+       /* Parse sub-blocks */
        n_sb = hdr->data[4];
        while (n_sb > 0) {
-               u8 type, buf[1], len = sizeof(buf);
+               u8 type, buf[6], len = sizeof(buf);
                const u8 *data = pep_get_sb(skb, &type, &len, buf);
 
                if (data == NULL)
                        return -EINVAL;
+
                switch (type) {
-               case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
-                       if (len < 1)
-                               return -EINVAL;
-                       peer_type = (peer_type & 0xff00) | data[0];
+               case PN_PIPE_SB_REQUIRED_FC_TX:
+                       if (len < 2 || len < data[0])
+                               break;
+                       pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
                        break;
-               case PN_PIPE_SB_ALIGNED_DATA:
-                       aligned = data[0] != 0;
+
+               case PN_PIPE_SB_PREFERRED_FC_RX:
+                       if (len < 2 || len < data[0])
+                               break;
+                       pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
                        break;
+
                }
                n_sb--;
        }
 
-       skb = skb_clone(skb, GFP_ATOMIC);
-       if (!skb)
-               return -ENOMEM;
+       return pipe_handler_send_created_ind(sk);
+}
 
-       /* Create a new to-be-accepted sock */
-       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
-       if (!newsk) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-       sock_init_data(NULL, newsk);
-       newsk->sk_state = TCP_SYN_RECV;
-       newsk->sk_backlog_rcv = pipe_do_rcv;
-       newsk->sk_protocol = sk->sk_protocol;
-       newsk->sk_destruct = pipe_destruct;
+/* Queue an skb to an actively connected sock.
+ * Socket lock must be held. */
+static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
+{
+       struct pep_sock *pn = pep_sk(sk);
+       struct pnpipehdr *hdr = pnp_hdr(skb);
+       int err = NET_RX_SUCCESS;
 
-       newpn = pep_sk(newsk);
-       pn_skb_get_dst_sockaddr(skb, &dst);
-       newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
-       newpn->pn_sk.resource = pn->pn_sk.resource;
-       skb_queue_head_init(&newpn->ctrlreq_queue);
-       newpn->pipe_handle = pipe_handle;
-       atomic_set(&newpn->tx_credits, 0);
-       newpn->peer_type = peer_type;
-       newpn->rx_credits = 0;
-       newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
-       newpn->init_enable = enabled;
-       newpn->aligned = aligned;
+       switch (hdr->message_id) {
+       case PNS_PIPE_ALIGNED_DATA:
+               __skb_pull(skb, 1);
+               /* fall through */
+       case PNS_PIPE_DATA:
+               __skb_pull(skb, 3); /* Pipe data header */
+               if (!pn_flow_safe(pn->rx_fc)) {
+                       err = sock_queue_rcv_skb(sk, skb);
+                       if (!err)
+                               return NET_RX_SUCCESS;
+                       err = NET_RX_DROP;
+                       break;
+               }
 
-       BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
-       skb_queue_head(&newsk->sk_receive_queue, skb);
-       if (!sock_flag(sk, SOCK_DEAD))
-               sk->sk_data_ready(sk, 0);
+               if (pn->rx_credits == 0) {
+                       atomic_inc(&sk->sk_drops);
+                       err = NET_RX_DROP;
+                       break;
+               }
+               pn->rx_credits--;
+               skb->dev = NULL;
+               skb_set_owner_r(skb, sk);
+               err = skb->len;
+               skb_queue_tail(&sk->sk_receive_queue, skb);
+               if (!sock_flag(sk, SOCK_DEAD))
+                       sk->sk_data_ready(sk, err);
+               return NET_RX_SUCCESS;
 
-       sk_acceptq_added(sk);
-       sk_add_node(newsk, &pn->ackq);
-       return 0;
+       case PNS_PEP_CONNECT_RESP:
+               if (sk->sk_state != TCP_SYN_SENT)
+                       break;
+               if (!sock_flag(sk, SOCK_DEAD))
+                       sk->sk_state_change(sk);
+               if (pep_connresp_rcv(sk, skb)) {
+                       sk->sk_state = TCP_CLOSE_WAIT;
+                       break;
+               }
+
+               sk->sk_state = TCP_ESTABLISHED;
+               if (!pn_flow_safe(pn->tx_fc)) {
+                       atomic_set(&pn->tx_credits, 1);
+                       sk->sk_write_space(sk);
+               }
+               pipe_grant_credits(sk, GFP_ATOMIC);
+               break;
+
+       case PNS_PEP_DISCONNECT_RESP:
+               /* sock should already be dead, nothing to do */
+               break;
+
+       case PNS_PEP_STATUS_IND:
+               pipe_rcv_status(sk, skb);
+               break;
+       }
+       kfree_skb(skb);
+       return err;
 }
 
 /* Listening sock must be locked */
@@ -847,7 +636,6 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
        struct sock *sknode;
        struct pnpipehdr *hdr;
        struct sockaddr_pn dst;
-       int err = NET_RX_SUCCESS;
        u8 pipe_handle;
 
        if (!pskb_may_pull(skb, sizeof(*hdr)))
@@ -865,26 +653,18 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
        if (sknode)
                return sk_receive_skb(sknode, skb, 1);
 
-       /* Look for a pipe handle pending accept */
-       sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle);
-       if (sknode) {
-               sock_put(sknode);
-               if (net_ratelimit())
-                       printk(KERN_WARNING"Phonet unconnected PEP ignored");
-               err = NET_RX_DROP;
-               goto drop;
-       }
-
        switch (hdr->message_id) {
        case PNS_PEP_CONNECT_REQ:
-               err = pep_connreq_rcv(sk, skb);
-               break;
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNS_PEP_CONNECT_RESP:
-               err = pep_connresp_rcv(sk, skb);
-               break;
-#endif
+               if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
+                       pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
+                                       GFP_ATOMIC);
+                       break;
+               }
+               skb_queue_head(&sk->sk_receive_queue, skb);
+               sk_acceptq_added(sk);
+               if (!sock_flag(sk, SOCK_DEAD))
+                       sk->sk_data_ready(sk, 0);
+               return NET_RX_SUCCESS;
 
        case PNS_PEP_DISCONNECT_REQ:
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
@@ -898,12 +678,17 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
        case PNS_PEP_ENABLE_REQ:
        case PNS_PEP_DISABLE_REQ:
                /* invalid handle is not even allowed here! */
+               break;
+
        default:
-               err = NET_RX_DROP;
+               if ((1 << sk->sk_state)
+                               & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
+                       /* actively connected socket */
+                       return pipe_handler_do_rcv(sk, skb);
        }
 drop:
        kfree_skb(skb);
-       return err;
+       return NET_RX_SUCCESS;
 }
 
 static int pipe_do_remove(struct sock *sk)
@@ -912,20 +697,16 @@ static int pipe_do_remove(struct sock *sk)
        struct pnpipehdr *ph;
        struct sk_buff *skb;
 
-       skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_KERNEL);
+       skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
 
-       skb_reserve(skb, MAX_PNPIPE_HEADER);
-       __skb_push(skb, sizeof(*ph));
-       skb_reset_transport_header(skb);
        ph = pnp_hdr(skb);
        ph->utid = 0;
        ph->message_id = PNS_PIPE_REMOVE_REQ;
        ph->pipe_handle = pn->pipe_handle;
        ph->data[0] = PAD;
-
-       return pn_skb_send(sk, skb, &pipe_srv);
+       return pn_skb_send(sk, skb, NULL);
 }
 
 /* associated socket ceases to exist */
@@ -938,29 +719,15 @@ static void pep_sock_close(struct sock *sk, long timeout)
        sk_common_release(sk);
 
        lock_sock(sk);
-       if (sk->sk_state == TCP_LISTEN) {
-               /* Destroy the listen queue */
-               struct sock *sknode;
-               struct hlist_node *p, *n;
-
-               sk_for_each_safe(sknode, p, n, &pn->ackq)
-                       sk_del_node_init(sknode);
-               sk->sk_state = TCP_CLOSE;
-       } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
-               /* Forcefully remove dangling Phonet pipe */
-               pipe_do_remove(sk);
-
-#ifdef CONFIG_PHONET_PIPECTRLR
-       if (pn->pipe_state != PIPE_IDLE) {
-               /* send pep disconnect request */
-               pipe_handler_send_req(sk,
-                               PNS_PEP_DISCONNECT_UTID, PNS_PEP_DISCONNECT_REQ,
-                               GFP_KERNEL);
-
-               pn->pipe_state = PIPE_IDLE;
-               sk->sk_state = TCP_CLOSE;
+       if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
+               if (sk->sk_backlog_rcv == pipe_do_rcv)
+                       /* Forcefully remove dangling Phonet pipe */
+                       pipe_do_remove(sk);
+               else
+                       pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
+                                               NULL, 0);
        }
-#endif
+       sk->sk_state = TCP_CLOSE;
 
        ifindex = pn->ifindex;
        pn->ifindex = 0;
@@ -971,86 +738,141 @@ static void pep_sock_close(struct sock *sk, long timeout)
        sock_put(sk);
 }
 
-static int pep_wait_connreq(struct sock *sk, int noblock)
+static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
 {
-       struct task_struct *tsk = current;
-       struct pep_sock *pn = pep_sk(sk);
-       long timeo = sock_rcvtimeo(sk, noblock);
-
-       for (;;) {
-               DEFINE_WAIT(wait);
+       struct pep_sock *pn = pep_sk(sk), *newpn;
+       struct sock *newsk = NULL;
+       struct sk_buff *skb;
+       struct pnpipehdr *hdr;
+       struct sockaddr_pn dst, src;
+       int err;
+       u16 peer_type;
+       u8 pipe_handle, enabled, n_sb;
+       u8 aligned = 0;
 
-               if (sk->sk_state != TCP_LISTEN)
-                       return -EINVAL;
-               if (!hlist_empty(&pn->ackq))
-                       break;
-               if (!timeo)
-                       return -EWOULDBLOCK;
-               if (signal_pending(tsk))
-                       return sock_intr_errno(timeo);
+       skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
+       if (!skb)
+               return NULL;
 
-               prepare_to_wait_exclusive(sk_sleep(sk), &wait,
-                                               TASK_INTERRUPTIBLE);
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-               finish_wait(sk_sleep(sk), &wait);
+       lock_sock(sk);
+       if (sk->sk_state != TCP_LISTEN) {
+               err = -EINVAL;
+               goto drop;
        }
+       sk_acceptq_removed(sk);
 
-       return 0;
-}
+       err = -EPROTO;
+       if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+               goto drop;
 
-static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
-{
-       struct pep_sock *pn = pep_sk(sk);
-       struct sock *newsk = NULL;
-       struct sk_buff *oskb;
-       int err;
+       hdr = pnp_hdr(skb);
+       pipe_handle = hdr->pipe_handle;
+       switch (hdr->state_after_connect) {
+       case PN_PIPE_DISABLE:
+               enabled = 0;
+               break;
+       case PN_PIPE_ENABLE:
+               enabled = 1;
+               break;
+       default:
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
+                               GFP_KERNEL);
+               goto drop;
+       }
+       peer_type = hdr->other_pep_type << 8;
 
-       lock_sock(sk);
-       err = pep_wait_connreq(sk, flags & O_NONBLOCK);
-       if (err)
-               goto out;
+       /* Parse sub-blocks (options) */
+       n_sb = hdr->data[4];
+       while (n_sb > 0) {
+               u8 type, buf[1], len = sizeof(buf);
+               const u8 *data = pep_get_sb(skb, &type, &len, buf);
 
-       newsk = __sk_head(&pn->ackq);
+               if (data == NULL)
+                       goto drop;
+               switch (type) {
+               case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
+                       if (len < 1)
+                               goto drop;
+                       peer_type = (peer_type & 0xff00) | data[0];
+                       break;
+               case PN_PIPE_SB_ALIGNED_DATA:
+                       aligned = data[0] != 0;
+                       break;
+               }
+               n_sb--;
+       }
 
-       oskb = skb_dequeue(&newsk->sk_receive_queue);
-       err = pep_accept_conn(newsk, oskb);
-       if (err) {
-               skb_queue_head(&newsk->sk_receive_queue, oskb);
+       /* Check for duplicate pipe handle */
+       newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
+       if (unlikely(newsk)) {
+               __sock_put(newsk);
                newsk = NULL;
-               goto out;
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
+               goto drop;
+       }
+
+       /* Create a new to-be-accepted sock */
+       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
+       if (!newsk) {
+               pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
+               err = -ENOBUFS;
+               goto drop;
        }
-       kfree_skb(oskb);
 
+       sock_init_data(NULL, newsk);
+       newsk->sk_state = TCP_SYN_RECV;
+       newsk->sk_backlog_rcv = pipe_do_rcv;
+       newsk->sk_protocol = sk->sk_protocol;
+       newsk->sk_destruct = pipe_destruct;
+
+       newpn = pep_sk(newsk);
+       pn_skb_get_dst_sockaddr(skb, &dst);
+       pn_skb_get_src_sockaddr(skb, &src);
+       newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
+       newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
+       newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
        sock_hold(sk);
-       pep_sk(newsk)->listener = sk;
+       newpn->listener = sk;
+       skb_queue_head_init(&newpn->ctrlreq_queue);
+       newpn->pipe_handle = pipe_handle;
+       atomic_set(&newpn->tx_credits, 0);
+       newpn->ifindex = 0;
+       newpn->peer_type = peer_type;
+       newpn->rx_credits = 0;
+       newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+       newpn->init_enable = enabled;
+       newpn->aligned = aligned;
 
-       sock_hold(newsk);
-       sk_del_node_init(newsk);
-       sk_acceptq_removed(sk);
+       err = pep_accept_conn(newsk, skb);
+       if (err) {
+               sock_put(newsk);
+               newsk = NULL;
+               goto drop;
+       }
        sk_add_node(newsk, &pn->hlist);
-       __sock_put(newsk);
-
-out:
+drop:
        release_sock(sk);
+       kfree_skb(skb);
        *errp = err;
        return newsk;
 }
 
-#ifdef CONFIG_PHONET_PIPECTRLR
 static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
 {
        struct pep_sock *pn = pep_sk(sk);
-       struct sockaddr_pn *spn =  (struct sockaddr_pn *)addr;
-
-       memcpy(&pn->remote_pep, spn, sizeof(struct sockaddr_pn));
+       int err;
+       u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 
-       return pipe_handler_send_req(sk,
-                       PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ,
-                       GFP_ATOMIC);
+       pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+       err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
+                                       PN_PIPE_ENABLE, data, 4);
+       if (err) {
+               pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+               return err;
+       }
+       sk->sk_state = TCP_SYN_SENT;
+       return 0;
 }
-#endif
 
 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -1081,10 +903,18 @@ static int pep_init(struct sock *sk)
 {
        struct pep_sock *pn = pep_sk(sk);
 
-       INIT_HLIST_HEAD(&pn->ackq);
+       sk->sk_destruct = pipe_destruct;
        INIT_HLIST_HEAD(&pn->hlist);
+       pn->listener = NULL;
        skb_queue_head_init(&pn->ctrlreq_queue);
+       atomic_set(&pn->tx_credits, 0);
+       pn->ifindex = 0;
+       pn->peer_type = 0;
        pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+       pn->rx_credits = 0;
+       pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+       pn->init_enable = 1;
+       pn->aligned = 0;
        return 0;
 }
 
@@ -1103,18 +933,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
 
        lock_sock(sk);
        switch (optname) {
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNPIPE_PIPE_HANDLE:
-               if (val) {
-                       if (pn->pipe_state > PIPE_IDLE) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       pn->pipe_handle = val;
-                       break;
-               }
-#endif
-
        case PNPIPE_ENCAP:
                if (val && val != PNPIPE_ENCAP_IP) {
                        err = -EINVAL;
@@ -1141,16 +959,6 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
                }
                goto out_norel;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNPIPE_ENABLE:
-               if (pn->pipe_state <= PIPE_IDLE) {
-                       err = -ENOTCONN;
-                       break;
-               }
-               err = pipe_handler_enable_pipe(sk, val);
-               break;
-#endif
-
        default:
                err = -ENOPROTOOPT;
        }
@@ -1180,13 +988,11 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
                val = pn->ifindex;
                break;
 
-#ifdef CONFIG_PHONET_PIPECTRLR
-       case PNPIPE_ENABLE:
-               if (pn->pipe_state <= PIPE_IDLE)
-                       return -ENOTCONN;
-               val = pn->pipe_state != PIPE_DISABLED;
+       case PNPIPE_HANDLE:
+               val = pn->pipe_handle;
+               if (val == PN_PIPE_INVALID_HANDLE)
+                       return -EINVAL;
                break;
-#endif
 
        default:
                return -ENOPROTOOPT;
@@ -1222,11 +1028,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
        } else
                ph->message_id = PNS_PIPE_DATA;
        ph->pipe_handle = pn->pipe_handle;
-#ifdef CONFIG_PHONET_PIPECTRLR
-       err = pn_skb_send(sk, skb, &pn->remote_pep);
-#else
-       err = pn_skb_send(sk, skb, &pipe_srv);
-#endif
+       err = pn_skb_send(sk, skb, NULL);
 
        if (err && pn_flow_safe(pn->tx_fc))
                atomic_inc(&pn->tx_credits);
@@ -1355,7 +1157,7 @@ struct sk_buff *pep_read(struct sock *sk)
        struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
 
        if (sk->sk_state == TCP_ESTABLISHED)
-               pipe_grant_credits(sk);
+               pipe_grant_credits(sk, GFP_ATOMIC);
        return skb;
 }
 
@@ -1400,7 +1202,7 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
        }
 
        if (sk->sk_state == TCP_ESTABLISHED)
-               pipe_grant_credits(sk);
+               pipe_grant_credits(sk, GFP_KERNEL);
        release_sock(sk);
 copy:
        msg->msg_flags |= MSG_EOR;
@@ -1424,9 +1226,9 @@ static void pep_sock_unhash(struct sock *sk)
 
        lock_sock(sk);
 
-#ifndef CONFIG_PHONET_PIPECTRLR
-       if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
+       if (pn->listener != NULL) {
                skparent = pn->listener;
+               pn->listener = NULL;
                release_sock(sk);
 
                pn = pep_sk(skparent);
@@ -1434,7 +1236,7 @@ static void pep_sock_unhash(struct sock *sk)
                sk_del_node_init(sk);
                sk = skparent;
        }
-#endif
+
        /* Unhash a listening sock only when it is closed
         * and all of its active connected pipes are closed. */
        if (hlist_empty(&pn->hlist))
@@ -1448,9 +1250,7 @@ static void pep_sock_unhash(struct sock *sk)
 static struct proto pep_proto = {
        .close          = pep_sock_close,
        .accept         = pep_sock_accept,
-#ifdef CONFIG_PHONET_PIPECTRLR
        .connect        = pep_sock_connect,
-#endif
        .ioctl          = pep_ioctl,
        .init           = pep_init,
        .setsockopt     = pep_setsockopt,
index 25f746d20c1f2dcbf42826cf33f07a35e44ace1c..b1adafab377c528714192d749874355a283c0a17 100644 (file)
@@ -225,15 +225,18 @@ static int pn_socket_autobind(struct socket *sock)
        return 0; /* socket was already bound */
 }
 
-#ifdef CONFIG_PHONET_PIPECTRLR
 static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
                int len, int flags)
 {
        struct sock *sk = sock->sk;
+       struct pn_sock *pn = pn_sk(sk);
        struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
-       long timeo;
+       struct task_struct *tsk = current;
+       long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
        int err;
 
+       if (pn_socket_autobind(sock))
+               return -ENOBUFS;
        if (len < sizeof(struct sockaddr_pn))
                return -EINVAL;
        if (spn->spn_family != AF_PHONET)
@@ -243,82 +246,61 @@ static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
 
        switch (sock->state) {
        case SS_UNCONNECTED:
-               sk->sk_state = TCP_CLOSE;
-               break;
-       case SS_CONNECTING:
-               switch (sk->sk_state) {
-               case TCP_SYN_RECV:
-                       sock->state = SS_CONNECTED;
+               if (sk->sk_state != TCP_CLOSE) {
                        err = -EISCONN;
                        goto out;
-               case TCP_CLOSE:
-                       err = -EALREADY;
-                       if (flags & O_NONBLOCK)
-                               goto out;
-                       goto wait_connect;
                }
                break;
-       case SS_CONNECTED:
-               switch (sk->sk_state) {
-               case TCP_SYN_RECV:
-                       err = -EISCONN;
-                       goto out;
-               case TCP_CLOSE:
-                       sock->state = SS_UNCONNECTED;
-                       break;
-               }
-               break;
-       case SS_DISCONNECTING:
-       case SS_FREE:
-               break;
+       case SS_CONNECTING:
+               err = -EALREADY;
+               goto out;
+       default:
+               err = -EISCONN;
+               goto out;
        }
-       sk->sk_state = TCP_CLOSE;
-       sk_stream_kill_queues(sk);
 
+       pn->dobject = pn_sockaddr_get_object(spn);
+       pn->resource = pn_sockaddr_get_resource(spn);
        sock->state = SS_CONNECTING;
+
        err = sk->sk_prot->connect(sk, addr, len);
-       if (err < 0) {
+       if (err) {
                sock->state = SS_UNCONNECTED;
-               sk->sk_state = TCP_CLOSE;
+               pn->dobject = 0;
                goto out;
        }
 
-       err = -EINPROGRESS;
-wait_connect:
-       if (sk->sk_state != TCP_SYN_RECV && (flags & O_NONBLOCK))
-               goto out;
-
-       timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
-       release_sock(sk);
-
-       err = -ERESTARTSYS;
-       timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
-                       sk->sk_state != TCP_CLOSE,
-                       timeo);
-
-       lock_sock(sk);
-       if (timeo < 0)
-               goto out; /* -ERESTARTSYS */
+       while (sk->sk_state == TCP_SYN_SENT) {
+               DEFINE_WAIT(wait);
 
-       err = -ETIMEDOUT;
-       if (timeo == 0 && sk->sk_state != TCP_SYN_RECV)
-               goto out;
+               if (!timeo) {
+                       err = -EINPROGRESS;
+                       goto out;
+               }
+               if (signal_pending(tsk)) {
+                       err = sock_intr_errno(timeo);
+                       goto out;
+               }
 
-       if (sk->sk_state != TCP_SYN_RECV) {
-               sock->state = SS_UNCONNECTED;
-               err = sock_error(sk);
-               if (!err)
-                       err = -ECONNREFUSED;
-               goto out;
+               prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+                                               TASK_INTERRUPTIBLE);
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
+               finish_wait(sk_sleep(sk), &wait);
        }
-       sock->state = SS_CONNECTED;
-       err = 0;
 
+       if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
+               err = 0;
+       else if (sk->sk_state == TCP_CLOSE_WAIT)
+               err = -ECONNRESET;
+       else
+               err = -ECONNREFUSED;
+       sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
 out:
        release_sock(sk);
        return err;
 }
-#endif
 
 static int pn_socket_accept(struct socket *sock, struct socket *newsock,
                                int flags)
@@ -327,6 +309,9 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
        struct sock *newsk;
        int err;
 
+       if (unlikely(sk->sk_state != TCP_LISTEN))
+               return -EINVAL;
+
        newsk = sk->sk_prot->accept(sk, flags, &err);
        if (!newsk)
                return err;
@@ -363,13 +348,8 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
 
        poll_wait(file, sk_sleep(sk), wait);
 
-       switch (sk->sk_state) {
-       case TCP_LISTEN:
-               return hlist_empty(&pn->ackq) ? 0 : POLLIN;
-       case TCP_CLOSE:
+       if (sk->sk_state == TCP_CLOSE)
                return POLLERR;
-       }
-
        if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
        if (!skb_queue_empty(&pn->ctrlreq_queue))
@@ -428,19 +408,19 @@ static int pn_socket_listen(struct socket *sock, int backlog)
        struct sock *sk = sock->sk;
        int err = 0;
 
-       if (sock->state != SS_UNCONNECTED)
-               return -EINVAL;
        if (pn_socket_autobind(sock))
                return -ENOBUFS;
 
        lock_sock(sk);
-       if (sk->sk_state != TCP_CLOSE) {
+       if (sock->state != SS_UNCONNECTED) {
                err = -EINVAL;
                goto out;
        }
 
-       sk->sk_state = TCP_LISTEN;
-       sk->sk_ack_backlog = 0;
+       if (sk->sk_state != TCP_LISTEN) {
+               sk->sk_state = TCP_LISTEN;
+               sk->sk_ack_backlog = 0;
+       }
        sk->sk_max_ack_backlog = backlog;
 out:
        release_sock(sk);
@@ -488,11 +468,7 @@ const struct proto_ops phonet_stream_ops = {
        .owner          = THIS_MODULE,
        .release        = pn_socket_release,
        .bind           = pn_socket_bind,
-#ifdef CONFIG_PHONET_PIPECTRLR
        .connect        = pn_socket_connect,
-#else
-       .connect        = sock_no_connect,
-#endif
        .socketpair     = sock_no_socketpair,
        .accept         = pn_socket_accept,
        .getname        = pn_socket_getname,
@@ -633,8 +609,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
 
                seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
                        "%d %p %d%n",
-                       sk->sk_protocol, pn->sobject, 0, pn->resource,
-                       sk->sk_state,
+                       sk->sk_protocol, pn->sobject, pn->dobject,
+                       pn->resource, sk->sk_state,
                        sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
                        sock_i_uid(sk), sock_i_ino(sk),
                        atomic_read(&sk->sk_refcnt), sk,
index 9542449c0720864af4697dd73cfb0685fd821a55..da8adac2bf06f95504ba21e8fdc6c5e09435ca9e 100644 (file)
@@ -50,7 +50,6 @@ rdsdebug(char *fmt, ...)
 #define RDS_FRAG_SIZE  ((unsigned int)(1 << RDS_FRAG_SHIFT))
 
 #define RDS_CONG_MAP_BYTES     (65536 / 8)
-#define RDS_CONG_MAP_LONGS     (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
 #define RDS_CONG_MAP_PAGES     (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
 
index d952e7eac18867501a0bf7d2034c621c2fd08df7..5ee0c62046a03d0d17c4abe78782fd7e7f07c9c5 100644 (file)
@@ -803,7 +803,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
 
                rose_insert_socket(sk);         /* Finish the bind */
        }
-rose_try_next_neigh:
        rose->dest_addr   = addr->srose_addr;
        rose->dest_call   = addr->srose_call;
        rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
@@ -865,12 +864,6 @@ rose_try_next_neigh:
        }
 
        if (sk->sk_state != TCP_ESTABLISHED) {
-       /* Try next neighbour */
-               rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
-               if (rose->neighbour)
-                       goto rose_try_next_neigh;
-
-               /* No more neighbours */
                sock->state = SS_UNCONNECTED;
                err = sock_error(sk);   /* Always set at this point */
                goto out_release;
index b4fdaac233f77ec3dbe4bd86b0f8d5bc178bb9f7..88a77e90e7e86835587aabdb17f6805377f8df82 100644 (file)
@@ -674,29 +674,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig
  *     Find a neighbour or a route given a ROSE address.
  */
 struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
-       unsigned char *diagnostic, int new)
+       unsigned char *diagnostic, int route_frame)
 {
        struct rose_neigh *res = NULL;
        struct rose_node *node;
        int failed = 0;
        int i;
 
-       if (!new) spin_lock_bh(&rose_node_list_lock);
+       if (!route_frame) spin_lock_bh(&rose_node_list_lock);
        for (node = rose_node_list; node != NULL; node = node->next) {
                if (rosecmpm(addr, &node->address, node->mask) == 0) {
                        for (i = 0; i < node->count; i++) {
-                               if (new) {
-                                       if (node->neighbour[i]->restarted) {
-                                               res = node->neighbour[i];
-                                               goto out;
-                                       }
+                               if (node->neighbour[i]->restarted) {
+                                       res = node->neighbour[i];
+                                       goto out;
                                }
-                               else {
+                       }
+               }
+       }
+       if (!route_frame) { /* connect request */
+               for (node = rose_node_list; node != NULL; node = node->next) {
+                       if (rosecmpm(addr, &node->address, node->mask) == 0) {
+                               for (i = 0; i < node->count; i++) {
                                        if (!rose_ftimer_running(node->neighbour[i])) {
                                                res = node->neighbour[i];
+                                               failed = 0;
                                                goto out;
-                                       } else
-                                               failed = 1;
+                                       }
+                                       failed = 1;
                                }
                        }
                }
@@ -711,8 +716,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
        }
 
 out:
-       if (!new) spin_unlock_bh(&rose_node_list_lock);
-
+       if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
        return res;
 }
 
index a53fb25a64edb114ee2d0b7d531081cee6426227..3620c569275f58908eb19b735d475ef50b80bd83 100644 (file)
@@ -37,7 +37,6 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
 {
        struct rtable *rt;
        struct flowi fl;
-       int ret;
 
        peer->if_mtu = 1500;
 
@@ -58,9 +57,9 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
                BUG();
        }
 
-       ret = ip_route_output_key(&init_net, &rt, &fl);
-       if (ret < 0) {
-               _leave(" [route err %d]", ret);
+       rt = ip_route_output_key(&init_net, &fl);
+       if (IS_ERR(rt)) {
+               _leave(" [route err %ld]", PTR_ERR(rt));
                return;
        }
 
index f04d4a484d5384e5b13bdcafdcae691da31eafdc..a7a5583d4f68471885953b4bcdfc0727e8f3fcbf 100644 (file)
@@ -126,6 +126,17 @@ config NET_SCH_RED
          To compile this code as a module, choose M here: the
          module will be called sch_red.
 
+config NET_SCH_SFB
+       tristate "Stochastic Fair Blue (SFB)"
+       ---help---
+         Say Y here if you want to use the Stochastic Fair Blue (SFB)
+         packet scheduling algorithm.
+
+         See the top of <file:net/sched/sch_sfb.c> for more details.
+
+         To compile this code as a module, choose M here: the
+         module will be called sch_sfb.
+
 config NET_SCH_SFQ
        tristate "Stochastic Fairness Queueing (SFQ)"
        ---help---
@@ -205,6 +216,29 @@ config NET_SCH_DRR
 
          If unsure, say N.
 
+config NET_SCH_MQPRIO
+       tristate "Multi-queue priority scheduler (MQPRIO)"
+       help
+         Say Y here if you want to use the Multi-queue Priority scheduler.
+         This scheduler allows QOS to be offloaded on NICs that have support
+         for offloading QOS schedulers.
+
+         To compile this driver as a module, choose M here: the module will
+         be called sch_mqprio.
+
+         If unsure, say N.
+
+config NET_SCH_CHOKE
+       tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
+       help
+         Say Y here if you want to use the CHOKe packet scheduler (CHOose
+         and Keep for responsive flows, CHOose and Kill for unresponsive
+         flows). This is a variation of RED which trys to penalize flows
+         that monopolize the queue.
+
+         To compile this code as a module, choose M here: the
+         module will be called sch_choke.
+
 config NET_SCH_INGRESS
        tristate "Ingress Qdisc"
        depends on NET_CLS_ACT
@@ -243,7 +277,7 @@ config NET_CLS_TCINDEX
 
 config NET_CLS_ROUTE4
        tristate "Routing decision (ROUTE)"
-       select NET_CLS_ROUTE
+       select IP_ROUTE_CLASSID
        select NET_CLS
        ---help---
          If you say Y here, you will be able to classify packets
@@ -252,9 +286,6 @@ config NET_CLS_ROUTE4
          To compile this code as a module, choose M here: the
          module will be called cls_route.
 
-config NET_CLS_ROUTE
-       bool
-
 config NET_CLS_FW
        tristate "Netfilter mark (FW)"
        select NET_CLS
index 960f5dba63041559835978dd0c5d63c2d3229436..2e77b8dba22e13009044848067f9a2c91f93f9fd 100644 (file)
@@ -24,6 +24,7 @@ obj-$(CONFIG_NET_SCH_RED)     += sch_red.o
 obj-$(CONFIG_NET_SCH_GRED)     += sch_gred.o
 obj-$(CONFIG_NET_SCH_INGRESS)  += sch_ingress.o 
 obj-$(CONFIG_NET_SCH_DSMARK)   += sch_dsmark.o
+obj-$(CONFIG_NET_SCH_SFB)      += sch_sfb.o
 obj-$(CONFIG_NET_SCH_SFQ)      += sch_sfq.o
 obj-$(CONFIG_NET_SCH_TBF)      += sch_tbf.o
 obj-$(CONFIG_NET_SCH_TEQL)     += sch_teql.o
@@ -32,6 +33,9 @@ obj-$(CONFIG_NET_SCH_MULTIQ)  += sch_multiq.o
 obj-$(CONFIG_NET_SCH_ATM)      += sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)    += sch_netem.o
 obj-$(CONFIG_NET_SCH_DRR)      += sch_drr.o
+obj-$(CONFIG_NET_SCH_MQPRIO)   += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_CHOKE)    += sch_choke.o
+
 obj-$(CONFIG_NET_CLS_U32)      += cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)   += cls_route.o
 obj-$(CONFIG_NET_CLS_FW)       += cls_fw.o
index 23b25f89e7e00d7c9219a3ec2e0a0b33e49fc552..15873e14cb546f5bc75c5baa6b3fb73a00409c34 100644 (file)
@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tc_action *a, struct tcf_hashinfo *hinfo)
 {
        struct tcf_common *p;
-       int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+       int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
        struct nlattr *nest;
 
        read_lock_bh(hinfo->lock);
@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
 {
        struct tcf_common *p, *s_p;
        struct nlattr *nest;
-       int i= 0, n_i = 0;
+       int i = 0, n_i = 0;
 
        nest = nla_nest_start(skb, a->order);
        if (nest == NULL)
@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
                while (p != NULL) {
                        s_p = p->tcfc_next;
                        if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
-                                module_put(a->ops->owner);
+                               module_put(a->ops->owner);
                        n_i++;
                        p = s_p;
                }
@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
+       err = tcf_action_dump_old(skb, a, bind, ref);
+       if (err > 0) {
                nla_nest_end(skb, nest);
                return err;
        }
@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
        struct tc_action *a;
        struct tc_action_ops *a_o;
        char act_name[IFNAMSIZ];
-       struct nlattr *tb[TCA_ACT_MAX+1];
+       struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
        int err;
 
@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
                goto err_free;
 
        /* module count goes up only when brand new policy is created
-          if it exists and is only bound to in a_o->init() then
-          ACT_P_CREATED is not returned (a zero is).
-       */
+        * if it exists and is only bound to in a_o->init() then
+        * ACT_P_CREATED is not returned (a zero is).
+        */
        if (err != ACT_P_CREATED)
                module_put(a_o->owner);
        a->ops = a_o;
@@ -569,7 +570,7 @@ err_out:
 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
                                  char *name, int ovr, int bind)
 {
-       struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+       struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *head = NULL, *act, *act_prev = NULL;
        int err;
        int i;
@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
 static struct tc_action *
 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
 {
-       struct nlattr *tb[TCA_ACT_MAX+1];
+       struct nlattr *tb[TCA_ACT_MAX + 1];
        struct tc_action *a;
        int index;
        int err;
@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        struct tcamsg *t;
        struct netlink_callback dcb;
        struct nlattr *nest;
-       struct nlattr *tb[TCA_ACT_MAX+1];
+       struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
        struct tc_action *a = create_a(0);
        int err = -ENOMEM;
@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        nlh->nlmsg_flags |= NLM_F_ROOT;
        module_put(a->ops->owner);
        kfree(a);
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+                            n->nlmsg_flags & NLM_F_ECHO);
        if (err > 0)
                return 0;
 
@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
              u32 pid, int event)
 {
        int i, ret;
-       struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+       struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *head = NULL, *act, *act_prev = NULL;
 
        ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
        if (ret < 0)
                return ret;
 
-       if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
+       if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
                if (tb[1] != NULL)
                        return tca_action_flush(net, tb[1], n, pid);
                else
@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                /* now do the delete */
                tcf_action_destroy(head, 0);
                ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
-                                    n->nlmsg_flags&NLM_F_ECHO);
+                                    n->nlmsg_flags & NLM_F_ECHO);
                if (ret > 0)
                        return 0;
                return ret;
@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        NETLINK_CB(skb).dst_group = RTNLGRP_TC;
 
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
        if (err > 0)
                err = 0;
        return err;
@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
        /* dump then free all the actions after update; inserted policy
         * stays intact
-        * */
+        */
        ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
        for (a = act; a; a = act) {
                act = a->next;
@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                return -EINVAL;
        }
 
-       /* n->nlmsg_flags&NLM_F_CREATE
-        * */
+       /* n->nlmsg_flags & NLM_F_CREATE */
        switch (n->nlmsg_type) {
        case RTM_NEWACTION:
                /* we are going to assume all other flags
@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                 * but since we want avoid ambiguity (eg when flags
                 * is zero) then just set this
                 */
-               if (n->nlmsg_flags&NLM_F_REPLACE)
+               if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
 replay:
                ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
@@ -1028,7 +1029,7 @@ replay:
 static struct nlattr *
 find_dump_kind(const struct nlmsghdr *n)
 {
-       struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
+       struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct nlattr *nla[TCAA_MAX + 1];
        struct nlattr *kind;
@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
        }
 
        a_o = tc_lookup_action(kind);
-       if (a_o == NULL) {
+       if (a_o == NULL)
                return 0;
-       }
 
        memset(&a, 0, sizeof(struct tc_action));
        a.ops = a_o;
index 83ddfc07e45da91831a9c765c8af3896bf9ee079..6cdf9abe475f065aa2b5d9c648131e80932bdeb0 100644 (file)
@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
        if (nla == NULL)
                return -EINVAL;
 
-       err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+       err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
        if (err < 0)
                return err;
 
index c2ed90a4c0b428a984c7329e1af0f8d3957a0ae4..2b4ab4b05ce84843a30c1a378371bea829cdf3a5 100644 (file)
@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
 }
 
 typedef int (*g_rand)(struct tcf_gact *gact);
-static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
+static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
 #endif /* CONFIG_GACT_PROB */
 
 static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
                                     bind, &gact_idx_gen, &gact_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
                if (!ovr) {
@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
 static int __init gact_init_module(void)
 {
 #ifdef CONFIG_GACT_PROB
-       printk(KERN_INFO "GACT probability on\n");
+       pr_info("GACT probability on\n");
 #else
-       printk(KERN_INFO "GACT probability NOT on\n");
+       pr_info("GACT probability NOT on\n");
 #endif
        return tcf_register_action(&act_gact_ops);
 }
index c2a7c20e81c184ceea4cabb66659c22798c9fa8f..9fc211a1b20e6c1d344bcd99d756824c76d2de91 100644 (file)
@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
                                     &ipt_idx_gen, &ipt_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                ret = ACT_P_CREATED;
        } else {
                if (!ovr) {
@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
        if (unlikely(!t))
                goto err2;
 
-       if ((err = ipt_init_target(t, tname, hook)) < 0)
+       err = ipt_init_target(t, tname, hook);
+       if (err < 0)
                goto err3;
 
        spin_lock_bh(&ipt->tcf_lock);
@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
        bstats_update(&ipt->tcf_bstats, skb);
 
        /* yes, we have to worry about both in and out dev
-        worry later - danger - this API seems to have changed
-        from earlier kernels */
+        * worry later - danger - this API seems to have changed
+        * from earlier kernels
+        */
        par.in       = skb->dev;
        par.out      = NULL;
        par.hooknum  = ipt->tcfi_hook;
@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
        struct tc_cnt c;
 
        /* for simple targets kernel size == user size
-       ** user name = target name
-       ** for foolproof you need to not assume this
-       */
+        * user name = target name
+        * for foolproof you need to not assume this
+        */
 
        t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
        if (unlikely(!t))
index d765067e99dbd1485135b406b69abf512d8cbd26..961386e2f2c08e0dffdd496b9bc7af2cc6eb04d0 100644 (file)
@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
        .lock   =       &mirred_lock,
 };
 
-static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static int tcf_mirred_release(struct tcf_mirred *m, int bind)
 {
        if (m) {
                if (bind)
                        m->tcf_bindcnt--;
                m->tcf_refcnt--;
-               if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+               if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
                        list_del(&m->tcfm_list);
                        if (m->tcfm_dev)
                                dev_put(m->tcfm_dev);
index 178a4bd7b7cbfd28accd393ef6b9700bc6f6d3f4..762b027650a904a7cfd8d0bb62222423adba0a36 100644 (file)
@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
                                     &nat_idx_gen, &nat_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                p = to_tcf_nat(pc);
                ret = ACT_P_CREATED;
        } else {
index 445bef716f770f5fa625243d999d11228edbf5b2..50c7c06c019d6e217cb55623b8bbd282dfd2b439 100644 (file)
@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
                                     &pedit_idx_gen, &pedit_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
                p = to_pedit(pc);
                keys = kmalloc(ksize, GFP_KERNEL);
                if (keys == NULL) {
@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
        int i, munged = 0;
        unsigned int off;
 
-       if (skb_cloned(skb)) {
-               if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-                       return p->tcf_action;
-               }
-       }
+       if (skb_cloned(skb) &&
+           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+               return p->tcf_action;
 
        off = skb_network_offset(skb);
 
index e2f08b1e2e5897084e84fd378d7505d2310f7cae..8a1630774fd6bdd6536f199b9665517e67312381 100644 (file)
@@ -22,8 +22,8 @@
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-#define L2T(p,L)   qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
+#define L2T(p, L)   qdisc_l2t((p)->tcfp_R_tab, L)
+#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
 
 #define POL_TAB_MASK     15
 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
 };
 
 /* old policer structure from before tc actions */
-struct tc_police_compat
-{
+struct tc_police_compat {
        u32                     index;
        int                     action;
        u32                     limit;
@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
                                 struct tc_action *a, int ovr, int bind)
 {
-       unsigned h;
+       unsigned int h;
        int ret = 0, err;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
index 7287cff7af3e045047020143045fbb9213cd9957..a34a22de60b3c65e0ed3b4613fe77e329663fe83 100644 (file)
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
        /* print policy string followed by _ then packet count
         * Example if this was the 3rd packet and the string was "hello"
         * then it would look like "hello_3" (without quotes)
-        **/
+        */
        pr_info("simple: %s_%d\n",
               (char *)d->tcfd_defdata, d->tcf_bstats.packets);
        spin_unlock(&d->tcf_lock);
@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
                                     &simp_idx_gen, &simp_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
 
                d = to_defact(pc);
                ret = alloc_defdata(d, defdata);
@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
        return ret;
 }
 
-static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+static int tcf_simp_cleanup(struct tc_action *a, int bind)
 {
        struct tcf_defact *d = a->priv;
 
@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
        return 0;
 }
 
-static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
-                               int bind, int ref)
+static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+                        int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_defact *d = a->priv;
index 836f5fee9e5898c400eb651fa099b485d6f2f124..5f6f0c7c39059bb2f94bf69438bb324891a88a41 100644 (file)
@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
                pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
                                     &skbedit_idx_gen, &skbedit_hash_info);
                if (IS_ERR(pc))
-                   return PTR_ERR(pc);
+                       return PTR_ERR(pc);
 
                d = to_skbedit(pc);
                ret = ACT_P_CREATED;
@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
        return ret;
 }
 
-static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
 {
        struct tcf_skbedit *d = a->priv;
 
@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
        return 0;
 }
 
-static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
-                               int bind, int ref)
+static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+                           int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_skbedit *d = a->priv;
index 5fd0c28ef79a319ef1ab54474f6db98eb3b274cd..bb2c523f81587adb116a0f5dc659807d27eda855 100644 (file)
@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
        int rc = -ENOENT;
 
        write_lock(&cls_mod_lock);
-       for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
+       for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
                if (t == ops)
                        break;
 
@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
        u32 first = TC_H_MAKE(0xC0000000U, 0U);
 
        if (tp)
-               first = tp->prio-1;
+               first = tp->prio - 1;
 
        return first;
 }
@@ -149,7 +149,8 @@ replay:
 
        if (prio == 0) {
                /* If no priority is given, user wants we allocated it. */
-               if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+               if (n->nlmsg_type != RTM_NEWTFILTER ||
+                   !(n->nlmsg_flags & NLM_F_CREATE))
                        return -ENOENT;
                prio = TC_H_MAKE(0x80000000U, 0U);
        }
@@ -176,7 +177,8 @@ replay:
        }
 
        /* Is it classful? */
-       if ((cops = q->ops->cl_ops) == NULL)
+       cops = q->ops->cl_ops;
+       if (!cops)
                return -EINVAL;
 
        if (cops->tcf_chain == NULL)
@@ -196,10 +198,11 @@ replay:
                goto errout;
 
        /* Check the chain for existence of proto-tcf with this priority */
-       for (back = chain; (tp=*back) != NULL; back = &tp->next) {
+       for (back = chain; (tp = *back) != NULL; back = &tp->next) {
                if (tp->prio >= prio) {
                        if (tp->prio == prio) {
-                               if (!nprio || (tp->protocol != protocol && protocol))
+                               if (!nprio ||
+                                   (tp->protocol != protocol && protocol))
                                        goto errout;
                        } else
                                tp = NULL;
@@ -216,7 +219,8 @@ replay:
                        goto errout;
 
                err = -ENOENT;
-               if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+               if (n->nlmsg_type != RTM_NEWTFILTER ||
+                   !(n->nlmsg_flags & NLM_F_CREATE))
                        goto errout;
 
 
@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 
        if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
                return skb->len;
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return skb->len;
 
        if (!tcm->tcm_parent)
@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
        if (!q)
                goto out;
-       if ((cops = q->ops->cl_ops) == NULL)
+       cops = q->ops->cl_ops;
+       if (!cops)
                goto errout;
        if (cops->tcf_chain == NULL)
                goto errout;
@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 
        s_t = cb->args[0];
 
-       for (tp=*chain, t=0; tp; tp = tp->next, t++) {
-               if (t < s_t) continue;
+       for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+               if (t < s_t)
+                       continue;
                if (TC_H_MAJ(tcm->tcm_info) &&
                    TC_H_MAJ(tcm->tcm_info) != tp->prio)
                        continue;
@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                arg.skb = skb;
                arg.cb = cb;
                arg.w.stop = 0;
-               arg.w.skip = cb->args[1]-1;
+               arg.w.skip = cb->args[1] - 1;
                arg.w.count = 0;
                tp->ops->walk(tp, &arg.w);
-               cb->args[1] = arg.w.count+1;
+               cb->args[1] = arg.w.count + 1;
                if (arg.w.stop)
                        break;
        }
index f23d9155b1efc94cdf7a2d2a65b7abcfc6ec0a77..8be8872dd571c168c798807f0dc89a3dbb4d859c 100644 (file)
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-struct basic_head
-{
+struct basic_head {
        u32                     hgenerator;
        struct list_head        flist;
 };
 
-struct basic_filter
-{
+struct basic_filter {
        u32                     handle;
        struct tcf_exts         exts;
        struct tcf_ematch_tree  ematches;
@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
        return 0;
 }
 
-static inline void basic_delete_filter(struct tcf_proto *tp,
-                                      struct basic_filter *f)
+static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
        tcf_exts_destroy(tp, &f->exts);
@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
        [TCA_BASIC_EMATCHES]    = { .type = NLA_NESTED },
 };
 
-static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
-                                 unsigned long base, struct nlattr **tb,
-                                 struct nlattr *est)
+static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
+                          unsigned long base, struct nlattr **tb,
+                          struct nlattr *est)
 {
        int err = -EINVAL;
        struct tcf_exts e;
@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
                } while (--i > 0 && basic_get(tp, head->hgenerator));
 
                if (i <= 0) {
-                       printk(KERN_ERR "Insufficient number of handles\n");
+                       pr_err("Insufficient number of handles\n");
                        goto errout;
                }
 
index d49c40fb7e0960daa905353e714e9317dd38088b..32a335194ca51e8dfb356d7a8a13f930e0cf247e 100644 (file)
@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
 {
        struct cgroup_cls_state *cs;
 
-       if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
+       cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+       if (!cs)
                return ERR_PTR(-ENOMEM);
 
        if (cgrp->parent)
@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
        return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
 }
 
-struct cls_cgroup_head
-{
+struct cls_cgroup_head {
        u32                     handle;
        struct tcf_exts         exts;
        struct tcf_ematch_tree  ematches;
@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
                             unsigned long *arg)
 {
-       struct nlattr *tb[TCA_CGROUP_MAX+1];
+       struct nlattr *tb[TCA_CGROUP_MAX + 1];
        struct cls_cgroup_head *head = tp->root;
        struct tcf_ematch_tree t;
        struct tcf_exts e;
index 5b271a18bc3a54199510d8d7c6b8f2844afefcf9..8ec01391d98866dff1b0287ba392379d11b84d54 100644 (file)
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
                if (!pskb_network_may_pull(skb, sizeof(*iph)))
                        break;
                iph = ip_hdr(skb);
-               if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+               if (iph->frag_off & htons(IP_MF | IP_OFFSET))
                        break;
                poff = proto_ports_offset(iph->protocol);
                if (poff >= 0 &&
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
                if (!pskb_network_may_pull(skb, sizeof(*iph)))
                        break;
                iph = ip_hdr(skb);
-               if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+               if (iph->frag_off & htons(IP_MF | IP_OFFSET))
                        break;
                poff = proto_ports_offset(iph->protocol);
                if (poff >= 0 &&
@@ -276,7 +276,7 @@ fallback:
 
 static u32 flow_get_rtclassid(const struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
        if (skb_dst(skb))
                return skb_dst(skb)->tclassid;
 #endif
index 93b0a7b6f9b474641595d4f093b79330bd0d05f9..26e7bc4ffb79fc32e18a6bf678f3ee80824fcf2c 100644 (file)
 
 #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
 
-struct fw_head
-{
+struct fw_head {
        struct fw_filter *ht[HTSIZE];
        u32 mask;
 };
 
-struct fw_filter
-{
+struct fw_filter {
        struct fw_filter        *next;
        u32                     id;
        struct tcf_result       res;
@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
        .police = TCA_FW_POLICE
 };
 
-static __inline__ int fw_hash(u32 handle)
+static inline int fw_hash(u32 handle)
 {
        if (HTSIZE == 4096)
                return ((handle >> 24) & 0xFFF) ^
@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
 static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
                          struct tcf_result *res)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f;
        int r;
        u32 id = skb->mark;
 
        if (head != NULL) {
                id &= head->mask;
-               for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+               for (f = head->ht[fw_hash(id)]; f; f = f->next) {
                        if (f->id == id) {
                                *res = f->res;
 #ifdef CONFIG_NET_CLS_IND
@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
                }
        } else {
                /* old method */
-               if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
+               if (id && (TC_H_MAJ(id) == 0 ||
+                          !(TC_H_MAJ(id ^ tp->q->handle)))) {
                        res->classid = id;
                        res->class = 0;
                        return 0;
@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
 
 static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f;
 
        if (head == NULL)
                return 0;
 
-       for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+       for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
                if (f->id == handle)
                        return (unsigned long)f;
        }
@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
        return 0;
 }
 
-static inline void
-fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
        tcf_exts_destroy(tp, &f->exts);
@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
        if (head == NULL)
                return;
 
-       for (h=0; h<HTSIZE; h++) {
-               while ((f=head->ht[h]) != NULL) {
+       for (h = 0; h < HTSIZE; h++) {
+               while ((f = head->ht[h]) != NULL) {
                        head->ht[h] = f->next;
                        fw_delete_filter(tp, f);
                }
@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
 
 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
-       struct fw_filter *f = (struct fw_filter*)arg;
+       struct fw_head *head = (struct fw_head *)tp->root;
+       struct fw_filter *f = (struct fw_filter *)arg;
        struct fw_filter **fp;
 
        if (head == NULL || f == NULL)
                goto out;
 
-       for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+       for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
                if (*fp == f) {
                        tcf_tree_lock(tp);
                        *fp = f->next;
@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
                     struct nlattr **tca,
                     unsigned long *arg)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        struct fw_filter *f = (struct fw_filter *) *arg;
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_FW_MAX + 1];
@@ -302,7 +300,7 @@ errout:
 
 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
-       struct fw_head *head = (struct fw_head*)tp->root;
+       struct fw_head *head = (struct fw_head *)tp->root;
        int h;
 
        if (head == NULL)
@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
                   struct sk_buff *skb, struct tcmsg *t)
 {
        struct fw_head *head = (struct fw_head *)tp->root;
-       struct fw_filter *f = (struct fw_filter*)fh;
+       struct fw_filter *f = (struct fw_filter *)fh;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
 
index 694dcd85dec83bda586f8a96843a1f8fac6a4259..a907905376dfb4913d16dcd2d11bcb456bf5504a 100644 (file)
 #include <net/pkt_cls.h>
 
 /*
  1. For now we assume that route tags < 256.
     It allows to use direct table lookups, instead of hash tables.
  2. For now we assume that "from TAG" and "fromdev DEV" statements
     are mutually  exclusive.
  3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
* 1. For now we assume that route tags < 256.
*    It allows to use direct table lookups, instead of hash tables.
* 2. For now we assume that "from TAG" and "fromdev DEV" statements
*    are mutually  exclusive.
* 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
  */
 
-struct route4_fastmap
-{
+struct route4_fastmap {
        struct route4_filter    *filter;
        u32                     id;
        int                     iif;
 };
 
-struct route4_head
-{
+struct route4_head {
        struct route4_fastmap   fastmap[16];
-       struct route4_bucket    *table[256+1];
+       struct route4_bucket    *table[256 + 1];
 };
 
-struct route4_bucket
-{
+struct route4_bucket {
        /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
-       struct route4_filter    *ht[16+16+1];
+       struct route4_filter    *ht[16 + 16 + 1];
 };
 
-struct route4_filter
-{
+struct route4_filter {
        struct route4_filter    *next;
        u32                     id;
        int                     iif;
@@ -61,20 +57,20 @@ struct route4_filter
        struct route4_bucket    *bkt;
 };
 
-#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
 
 static const struct tcf_ext_map route_ext_map = {
        .police = TCA_ROUTE4_POLICE,
        .action = TCA_ROUTE4_ACT
 };
 
-static __inline__ int route4_fastmap_hash(u32 id, int iif)
+static inline int route4_fastmap_hash(u32 id, int iif)
 {
-       return id&0xF;
+       return id & 0xF;
 }
 
-static inline
-void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+static void
+route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
 {
        spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
 
@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
        spin_unlock_bh(root_lock);
 }
 
-static inline void
+static void
 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
                   struct route4_filter *f)
 {
        int h = route4_fastmap_hash(id, iif);
+
        head->fastmap[h].id = id;
        head->fastmap[h].iif = iif;
        head->fastmap[h].filter = f;
 }
 
-static __inline__ int route4_hash_to(u32 id)
+static inline int route4_hash_to(u32 id)
 {
-       return id&0xFF;
+       return id & 0xFF;
 }
 
-static __inline__ int route4_hash_from(u32 id)
+static inline int route4_hash_from(u32 id)
 {
-       return (id>>16)&0xF;
+       return (id >> 16) & 0xF;
 }
 
-static __inline__ int route4_hash_iif(int iif)
+static inline int route4_hash_iif(int iif)
 {
-       return 16 + ((iif>>16)&0xF);
+       return 16 + ((iif >> 16) & 0xF);
 }
 
-static __inline__ int route4_hash_wild(void)
+static inline int route4_hash_wild(void)
 {
        return 32;
 }
@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
                           struct tcf_result *res)
 {
-       struct route4_head *head = (struct route4_head*)tp->root;
+       struct route4_head *head = (struct route4_head *)tp->root;
        struct dst_entry *dst;
        struct route4_bucket *b;
        struct route4_filter *f;
        u32 id, h;
        int iif, dont_cache = 0;
 
-       if ((dst = skb_dst(skb)) == NULL)
+       dst = skb_dst(skb);
+       if (!dst)
                goto failure;
 
        id = dst->tclassid;
        if (head == NULL)
                goto old_method;
 
-       iif = ((struct rtable*)dst)->fl.iif;
+       iif = ((struct rtable *)dst)->rt_iif;
 
        h = route4_fastmap_hash(id, iif);
        if (id == head->fastmap[h].id &&
@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
        h = route4_hash_to(id);
 
 restart:
-       if ((b = head->table[h]) != NULL) {
+       b = head->table[h];
+       if (b) {
                for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
                        if (f->id == id)
                                ROUTE4_APPLY_RESULT();
@@ -197,8 +196,9 @@ old_method:
 
 static inline u32 to_hash(u32 id)
 {
-       u32 h = id&0xFF;
-       if (id&0x8000)
+       u32 h = id & 0xFF;
+
+       if (id & 0x8000)
                h += 256;
        return h;
 }
@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
        if (!(id & 0x8000)) {
                if (id > 255)
                        return 256;
-               return id&0xF;
+               return id & 0xF;
        }
-       return 16 + (id&0xF);
+       return 16 + (id & 0xF);
 }
 
 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
 {
-       struct route4_head *head = (struct route4_head*)tp->root;
+       struct route4_head *head = (struct route4_head *)tp->root;
        struct route4_bucket *b;
        struct route4_filter *f;
-       unsigned h1, h2;
+       unsigned int h1, h2;
 
        if (!head)
                return 0;
@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
        if (h1 > 256)
                return 0;
 
-       h2 = from_hash(handle>>16);
+       h2 = from_hash(handle >> 16);
        if (h2 > 32)
                return 0;
 
-       if ((b = head->table[h1]) != NULL) {
+       b = head->table[h1];
+       if (b) {
                for (f = b->ht[h2]; f; f = f->next)
                        if (f->handle == handle)
                                return (unsigned long)f;
@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
        return 0;
 }
 
-static inline void
+static void
 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
        if (head == NULL)
                return;
 
-       for (h1=0; h1<=256; h1++) {
+       for (h1 = 0; h1 <= 256; h1++) {
                struct route4_bucket *b;
 
-               if ((b = head->table[h1]) != NULL) {
-                       for (h2=0; h2<=32; h2++) {
+               b = head->table[h1];
+               if (b) {
+                       for (h2 = 0; h2 <= 32; h2++) {
                                struct route4_filter *f;
 
                                while ((f = b->ht[h2]) != NULL) {
@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
 
 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct route4_head *head = (struct route4_head*)tp->root;
-       struct route4_filter **fp, *f = (struct route4_filter*)arg;
-       unsigned h = 0;
+       struct route4_head *head = (struct route4_head *)tp->root;
+       struct route4_filter **fp, *f = (struct route4_filter *)arg;
+       unsigned int h = 0;
        struct route4_bucket *b;
        int i;
 
@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
        h = f->handle;
        b = f->bkt;
 
-       for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+       for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
                if (*fp == f) {
                        tcf_tree_lock(tp);
                        *fp = f->next;
@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
 
                        /* Strip tree */
 
-                       for (i=0; i<=32; i++)
+                       for (i = 0; i <= 32; i++)
                                if (b->ht[i])
                                        return 0;
 
@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
        }
 
        h1 = to_hash(nhandle);
-       if ((b = head->table[h1]) == NULL) {
+       b = head->table[h1];
+       if (!b) {
                err = -ENOBUFS;
                b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
                if (b == NULL)
@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
                tcf_tree_unlock(tp);
        } else {
                unsigned int h2 = from_hash(nhandle >> 16);
+
                err = -EEXIST;
                for (fp = b->ht[h2]; fp; fp = fp->next)
                        if (fp->handle == f->handle)
@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
        if (err < 0)
                return err;
 
-       if ((f = (struct route4_filter*)*arg) != NULL) {
+       f = (struct route4_filter *)*arg;
+       if (f) {
                if (f->handle != handle && handle)
                        return -EINVAL;
 
@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
 
 reinsert:
        h = from_hash(f->handle >> 16);
-       for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+       for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
                if (f->handle < f1->handle)
                        break;
 
@@ -492,7 +497,8 @@ reinsert:
        if (old_handle && f->handle != old_handle) {
                th = to_hash(old_handle);
                h = from_hash(old_handle >> 16);
-               if ((b = head->table[th]) != NULL) {
+               b = head->table[th];
+               if (b) {
                        for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
                                if (*fp == f) {
                                        *fp = f->next;
@@ -515,7 +521,7 @@ errout:
 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct route4_head *head = tp->root;
-       unsigned h, h1;
+       unsigned int h, h1;
 
        if (head == NULL)
                arg->stop = 1;
@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
                       struct sk_buff *skb, struct tcmsg *t)
 {
-       struct route4_filter *f = (struct route4_filter*)fh;
+       struct route4_filter *f = (struct route4_filter *)fh;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
        u32 id;
@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
        if (nest == NULL)
                goto nla_put_failure;
 
-       if (!(f->handle&0x8000)) {
-               id = f->id&0xFF;
+       if (!(f->handle & 0x8000)) {
+               id = f->id & 0xFF;
                NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
        }
-       if (f->handle&0x80000000) {
-               if ((f->handle>>16) != 0xFFFF)
+       if (f->handle & 0x80000000) {
+               if ((f->handle >> 16) != 0xFFFF)
                        NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
        } else {
-               id = f->id>>16;
+               id = f->id >> 16;
                NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
        }
        if (f->res.classid)
index 425a1790b0480327b1ae9d2c4e1adec0a132a686..402c44b241a3927e0c8efce6e0cb91d19772e7fe 100644 (file)
    powerful classification engine.  */
 
 
-struct rsvp_head
-{
+struct rsvp_head {
        u32                     tmap[256/32];
        u32                     hgenerator;
        u8                      tgenerator;
        struct rsvp_session     *ht[256];
 };
 
-struct rsvp_session
-{
+struct rsvp_session {
        struct rsvp_session     *next;
        __be32                  dst[RSVP_DST_LEN];
        struct tc_rsvp_gpi      dpi;
        u8                      protocol;
        u8                      tunnelid;
        /* 16 (src,sport) hash slots, and one wildcard source slot */
-       struct rsvp_filter      *ht[16+1];
+       struct rsvp_filter      *ht[16 + 1];
 };
 
 
-struct rsvp_filter
-{
+struct rsvp_filter {
        struct rsvp_filter      *next;
        __be32                  src[RSVP_DST_LEN];
        struct tc_rsvp_gpi      spi;
@@ -100,17 +97,19 @@ struct rsvp_filter
        struct rsvp_session     *sess;
 };
 
-static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
 {
-       unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
+       unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+
        h ^= h>>16;
        h ^= h>>8;
        return (h ^ protocol ^ tunnelid) & 0xFF;
 }
 
-static __inline__ unsigned hash_src(__be32 *src)
+static inline unsigned int hash_src(__be32 *src)
 {
-       unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
+       unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+
        h ^= h>>16;
        h ^= h>>8;
        h ^= h>>4;
@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
 static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
                         struct tcf_result *res)
 {
-       struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+       struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
        struct rsvp_session *s;
        struct rsvp_filter *f;
-       unsigned h1, h2;
+       unsigned int h1, h2;
        __be32 *dst, *src;
        u8 protocol;
        u8 tunnelid = 0;
@@ -162,13 +161,13 @@ restart:
        src = &nhptr->saddr.s6_addr32[0];
        dst = &nhptr->daddr.s6_addr32[0];
        protocol = nhptr->nexthdr;
-       xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
+       xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
 #else
        src = &nhptr->saddr;
        dst = &nhptr->daddr;
        protocol = nhptr->protocol;
-       xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
-       if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
+       xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+       if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
                return -1;
 #endif
 
@@ -176,10 +175,10 @@ restart:
        h2 = hash_src(src);
 
        for (s = sht[h1]; s; s = s->next) {
-               if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+               if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
                    protocol == s->protocol &&
                    !(s->dpi.mask &
-                     (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
+                     (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
 #if RSVP_DST_LEN == 4
                    dst[0] == s->dst[0] &&
                    dst[1] == s->dst[1] &&
@@ -188,8 +187,8 @@ restart:
                    tunnelid == s->tunnelid) {
 
                        for (f = s->ht[h2]; f; f = f->next) {
-                               if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
-                                   !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
+                               if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+                                   !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
 #if RSVP_DST_LEN == 4
                                    &&
                                    src[0] == f->src[0] &&
@@ -205,7 +204,7 @@ matched:
                                                return 0;
 
                                        tunnelid = f->res.classid;
-                                       nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+                                       nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
                                        goto restart;
                                }
                        }
@@ -224,11 +223,11 @@ matched:
 
 static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
 {
-       struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+       struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
        struct rsvp_session *s;
        struct rsvp_filter *f;
-       unsigned h1 = handle&0xFF;
-       unsigned h2 = (handle>>8)&0xFF;
+       unsigned int h1 = handle & 0xFF;
+       unsigned int h2 = (handle >> 8) & 0xFF;
 
        if (h2 > 16)
                return 0;
@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
        return -ENOBUFS;
 }
 
-static inline void
+static void
 rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 {
        tcf_unbind_filter(tp, &f->res);
@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
 
        sht = data->ht;
 
-       for (h1=0; h1<256; h1++) {
+       for (h1 = 0; h1 < 256; h1++) {
                struct rsvp_session *s;
 
                while ((s = sht[h1]) != NULL) {
                        sht[h1] = s->next;
 
-                       for (h2=0; h2<=16; h2++) {
+                       for (h2 = 0; h2 <= 16; h2++) {
                                struct rsvp_filter *f;
 
                                while ((f = s->ht[h2]) != NULL) {
@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
 
 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
-       unsigned h = f->handle;
+       struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+       unsigned int h = f->handle;
        struct rsvp_session **sp;
        struct rsvp_session *s = f->sess;
        int i;
 
-       for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
+       for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
                if (*fp == f) {
                        tcf_tree_lock(tp);
                        *fp = f->next;
@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 
                        /* Strip tree */
 
-                       for (i=0; i<=16; i++)
+                       for (i = 0; i <= 16; i++)
                                if (s->ht[i])
                                        return 0;
 
                        /* OK, session has no flows */
-                       for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
+                       for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
                             *sp; sp = &(*sp)->next) {
                                if (*sp == s) {
                                        tcf_tree_lock(tp);
@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
-static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
+static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
 {
        struct rsvp_head *data = tp->root;
        int i = 0xFFFF;
 
        while (i-- > 0) {
                u32 h;
+
                if ((data->hgenerator += 0x10000) == 0)
                        data->hgenerator = 0x10000;
                h = data->hgenerator|salt;
@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
 
 static int tunnel_bts(struct rsvp_head *data)
 {
-       int n = data->tgenerator>>5;
-       u32 b = 1<<(data->tgenerator&0x1F);
+       int n = data->tgenerator >> 5;
+       u32 b = 1 << (data->tgenerator & 0x1F);
 
-       if (data->tmap[n]&b)
+       if (data->tmap[n] & b)
                return 0;
        data->tmap[n] |= b;
        return 1;
@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
 
        memset(tmap, 0, sizeof(tmap));
 
-       for (h1=0; h1<256; h1++) {
+       for (h1 = 0; h1 < 256; h1++) {
                struct rsvp_session *s;
                for (s = sht[h1]; s; s = s->next) {
-                       for (h2=0; h2<=16; h2++) {
+                       for (h2 = 0; h2 <= 16; h2++) {
                                struct rsvp_filter *f;
 
                                for (f = s->ht[h2]; f; f = f->next) {
@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
 {
        int i, k;
 
-       for (k=0; k<2; k++) {
-               for (i=255; i>0; i--) {
+       for (k = 0; k < 2; k++) {
+               for (i = 255; i > 0; i--) {
                        if (++data->tgenerator == 0)
                                data->tgenerator = 1;
                        if (tunnel_bts(data))
@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        struct nlattr *opt = tca[TCA_OPTIONS-1];
        struct nlattr *tb[TCA_RSVP_MAX + 1];
        struct tcf_exts e;
-       unsigned h1, h2;
+       unsigned int h1, h2;
        __be32 *dst;
        int err;
 
@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
        if (err < 0)
                return err;
 
-       if ((f = (struct rsvp_filter*)*arg) != NULL) {
+       f = (struct rsvp_filter *)*arg;
+       if (f) {
                /* Node exists: adjust only classid */
 
                if (f->handle != handle && handle)
@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
                        goto errout;
        }
 
-       for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
+       for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
                if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
                    pinfo && pinfo->protocol == s->protocol &&
                    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -523,7 +524,7 @@ insert:
                        tcf_exts_change(tp, &f->exts, &e);
 
                        for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
-                               if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
+                               if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
                                        break;
                        f->next = *fp;
                        wmb();
@@ -567,7 +568,7 @@ errout2:
 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct rsvp_head *head = tp->root;
-       unsigned h, h1;
+       unsigned int h, h1;
 
        if (arg->stop)
                return;
@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct rsvp_filter *f = (struct rsvp_filter*)fh;
+       struct rsvp_filter *f = (struct rsvp_filter *)fh;
        struct rsvp_session *s;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
        NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
        if (f->res.classid)
                NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
-       if (((f->handle>>8)&0xFF) != 16)
+       if (((f->handle >> 8) & 0xFF) != 16)
                NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
 
        if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
index 20ef330bb918b552f91c3795d67a2c6f9d407c3b..36667fa64237902e52e11885745eb147be849394 100644 (file)
@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
                 * of the hashing index is below the threshold.
                 */
                if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
-                       cp.hash = (cp.mask >> cp.shift)+1;
+                       cp.hash = (cp.mask >> cp.shift) + 1;
                else
                        cp.hash = DEFAULT_HASH_SIZE;
        }
index b0c2a82178afa032ce1d09b0e9f400afb2b578f5..3b93fc0c89553829d2dea610d550b77cfb3be494 100644 (file)
@@ -42,8 +42,7 @@
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-struct tc_u_knode
-{
+struct tc_u_knode {
        struct tc_u_knode       *next;
        u32                     handle;
        struct tc_u_hnode       *ht_up;
@@ -63,19 +62,17 @@ struct tc_u_knode
        struct tc_u32_sel       sel;
 };
 
-struct tc_u_hnode
-{
+struct tc_u_hnode {
        struct tc_u_hnode       *next;
        u32                     handle;
        u32                     prio;
        struct tc_u_common      *tp_c;
        int                     refcnt;
-       unsigned                divisor;
+       unsigned int            divisor;
        struct tc_u_knode       *ht[1];
 };
 
-struct tc_u_common
-{
+struct tc_u_common {
        struct tc_u_hnode       *hlist;
        struct Qdisc            *q;
        int                     refcnt;
@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
        .police = TCA_U32_POLICE
 };
 
-static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
+static inline unsigned int u32_hash_fold(__be32 key,
+                                        const struct tc_u32_sel *sel,
+                                        u8 fshift)
 {
-       unsigned h = ntohl(key & sel->hmask)>>fshift;
+       unsigned int h = ntohl(key & sel->hmask) >> fshift;
 
        return h;
 }
@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
                unsigned int      off;
        } stack[TC_U32_MAXDEPTH];
 
-       struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
+       struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
        unsigned int off = skb_network_offset(skb);
        struct tc_u_knode *n;
        int sdepth = 0;
@@ -120,7 +119,7 @@ next_knode:
                struct tc_u32_key *key = n->sel.keys;
 
 #ifdef CONFIG_CLS_U32_PERF
-               n->pf->rcnt +=1;
+               n->pf->rcnt += 1;
                j = 0;
 #endif
 
@@ -133,14 +132,14 @@ next_knode:
                }
 #endif
 
-               for (i = n->sel.nkeys; i>0; i--, key++) {
+               for (i = n->sel.nkeys; i > 0; i--, key++) {
                        int toff = off + key->off + (off2 & key->offmask);
-                       __be32 *data, _data;
+                       __be32 *data, hdata;
 
                        if (skb_headroom(skb) + toff > INT_MAX)
                                goto out;
 
-                       data = skb_header_pointer(skb, toff, 4, &_data);
+                       data = skb_header_pointer(skb, toff, 4, &hdata);
                        if (!data)
                                goto out;
                        if ((*data ^ key->val) & key->mask) {
@@ -148,13 +147,13 @@ next_knode:
                                goto next_knode;
                        }
 #ifdef CONFIG_CLS_U32_PERF
-                       n->pf->kcnts[j] +=1;
+                       n->pf->kcnts[j] += 1;
                        j++;
 #endif
                }
                if (n->ht_down == NULL) {
 check_terminal:
-                       if (n->sel.flags&TC_U32_TERMINAL) {
+                       if (n->sel.flags & TC_U32_TERMINAL) {
 
                                *res = n->res;
 #ifdef CONFIG_NET_CLS_IND
@@ -164,7 +163,7 @@ check_terminal:
                                }
 #endif
 #ifdef CONFIG_CLS_U32_PERF
-                               n->pf->rhit +=1;
+                               n->pf->rhit += 1;
 #endif
                                r = tcf_exts_exec(skb, &n->exts, res);
                                if (r < 0) {
@@ -188,26 +187,26 @@ check_terminal:
                ht = n->ht_down;
                sel = 0;
                if (ht->divisor) {
-                       __be32 *data, _data;
+                       __be32 *data, hdata;
 
                        data = skb_header_pointer(skb, off + n->sel.hoff, 4,
-                                                 &_data);
+                                                 &hdata);
                        if (!data)
                                goto out;
                        sel = ht->divisor & u32_hash_fold(*data, &n->sel,
                                                          n->fshift);
                }
-               if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
+               if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
                        goto next_ht;
 
-               if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
+               if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
                        off2 = n->sel.off + 3;
                        if (n->sel.flags & TC_U32_VAROFFSET) {
-                               __be16 *data, _data;
+                               __be16 *data, hdata;
 
                                data = skb_header_pointer(skb,
                                                          off + n->sel.offoff,
-                                                         2, &_data);
+                                                         2, &hdata);
                                if (!data)
                                        goto out;
                                off2 += ntohs(n->sel.offmask & *data) >>
@@ -215,7 +214,7 @@ check_terminal:
                        }
                        off2 &= ~3;
                }
-               if (n->sel.flags&TC_U32_EAT) {
+               if (n->sel.flags & TC_U32_EAT) {
                        off += off2;
                        off2 = 0;
                }
@@ -236,11 +235,11 @@ out:
 
 deadloop:
        if (net_ratelimit())
-               printk(KERN_WARNING "cls_u32: dead loop\n");
+               pr_warning("cls_u32: dead loop\n");
        return -1;
 }
 
-static __inline__ struct tc_u_hnode *
+static struct tc_u_hnode *
 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
 {
        struct tc_u_hnode *ht;
@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
        return ht;
 }
 
-static __inline__ struct tc_u_knode *
+static struct tc_u_knode *
 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
 {
-       unsigned sel;
+       unsigned int sel;
        struct tc_u_knode *n = NULL;
 
        sel = TC_U32_HASH(handle);
@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
        do {
                if (++tp_c->hgenerator == 0x7FF)
                        tp_c->hgenerator = 1;
-       } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+       } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
 
        return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
 }
@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 {
        struct tc_u_knode *n;
-       unsigned h;
+       unsigned int h;
 
-       for (h=0; h<=ht->divisor; h++) {
+       for (h = 0; h <= ht->divisor; h++) {
                while ((n = ht->ht[h]) != NULL) {
                        ht->ht[h] = n->next;
 
@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
 
 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
+       struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
 
        if (ht == NULL)
                return 0;
 
        if (TC_U32_KEY(ht->handle))
-               return u32_delete_key(tp, (struct tc_u_knode*)ht);
+               return u32_delete_key(tp, (struct tc_u_knode *)ht);
 
        if (tp->root == ht)
                return -EINVAL;
@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
        struct tc_u_knode *n;
-       unsigned i = 0x7FF;
+       unsigned int i = 0x7FF;
 
-       for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+       for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
                if (i < TC_U32_NODE(n->handle))
                        i = TC_U32_NODE(n->handle);
        i++;
 
-       return handle|(i>0xFFF ? 0xFFF : i);
+       return handle | (i > 0xFFF ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
        if (err < 0)
                return err;
 
-       if ((n = (struct tc_u_knode*)*arg) != NULL) {
+       n = (struct tc_u_knode *)*arg;
+       if (n) {
                if (TC_U32_KEY(n->handle) == 0)
                        return -EINVAL;
 
@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
        }
 
        if (tb[TCA_U32_DIVISOR]) {
-               unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
+               unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 
                if (--divisor > 0x100)
                        return -EINVAL;
@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
                        if (handle == 0)
                                return -ENOMEM;
                }
-               ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+               ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
                if (ht == NULL)
                        return -ENOBUFS;
                ht->tp_c = tp_c;
@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *ht;
        struct tc_u_knode *n;
-       unsigned h;
+       unsigned int h;
 
        if (arg->stop)
                return;
@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct tc_u_knode *n = (struct tc_u_knode*)fh;
+       struct tc_u_knode *n = (struct tc_u_knode *)fh;
        struct nlattr *nest;
 
        if (n == NULL)
@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                goto nla_put_failure;
 
        if (TC_U32_KEY(n->handle) == 0) {
-               struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
-               u32 divisor = ht->divisor+1;
+               struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
+               u32 divisor = ht->divisor + 1;
+
                NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
        } else {
                NLA_PUT(skb, TCA_U32_SEL,
@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
                        goto nla_put_failure;
 
 #ifdef CONFIG_NET_CLS_IND
-               if(strlen(n->indev))
+               if (strlen(n->indev))
                        NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
 #endif
 #ifdef CONFIG_CLS_U32_PERF
index bc450397487ac7928620d5d63c95274c856b994a..1c8360a2752ae39c8058d7515d4cdb6d1efb80a9 100644 (file)
@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
                return 0;
 
        switch (cmp->align) {
-               case TCF_EM_ALIGN_U8:
-                       val = *ptr;
-                       break;
+       case TCF_EM_ALIGN_U8:
+               val = *ptr;
+               break;
 
-               case TCF_EM_ALIGN_U16:
-                       val = get_unaligned_be16(ptr);
+       case TCF_EM_ALIGN_U16:
+               val = get_unaligned_be16(ptr);
 
-                       if (cmp_needs_transformation(cmp))
-                               val = be16_to_cpu(val);
-                       break;
+               if (cmp_needs_transformation(cmp))
+                       val = be16_to_cpu(val);
+               break;
 
-               case TCF_EM_ALIGN_U32:
-                       /* Worth checking boundries? The branching seems
-                        * to get worse. Visit again. */
-                       val = get_unaligned_be32(ptr);
+       case TCF_EM_ALIGN_U32:
+               /* Worth checking boundries? The branching seems
+                * to get worse. Visit again.
+                */
+               val = get_unaligned_be32(ptr);
 
-                       if (cmp_needs_transformation(cmp))
-                               val = be32_to_cpu(val);
-                       break;
+               if (cmp_needs_transformation(cmp))
+                       val = be32_to_cpu(val);
+               break;
 
-               default:
-                       return 0;
+       default:
+               return 0;
        }
 
        if (cmp->mask)
                val &= cmp->mask;
 
        switch (cmp->opnd) {
-               case TCF_EM_OPND_EQ:
-                       return val == cmp->val;
-               case TCF_EM_OPND_LT:
-                       return val < cmp->val;
-               case TCF_EM_OPND_GT:
-                       return val > cmp->val;
+       case TCF_EM_OPND_EQ:
+               return val == cmp->val;
+       case TCF_EM_OPND_LT:
+               return val < cmp->val;
+       case TCF_EM_OPND_GT:
+               return val > cmp->val;
        }
 
        return 0;
index 34da5e29ea1a8026388805cfba52e782ba01c9ec..a4de67eca824068ea9c57befc4196d7d36f2b2cb 100644 (file)
 #include <net/pkt_cls.h>
 #include <net/sock.h>
 
-struct meta_obj
-{
+struct meta_obj {
        unsigned long           value;
        unsigned int            len;
 };
 
-struct meta_value
-{
+struct meta_value {
        struct tcf_meta_val     hdr;
        unsigned long           val;
        unsigned int            len;
 };
 
-struct meta_match
-{
+struct meta_match {
        struct meta_value       lvalue;
        struct meta_value       rvalue;
 };
@@ -255,7 +252,7 @@ META_COLLECTOR(int_rtclassid)
        if (unlikely(skb_dst(skb) == NULL))
                *err = -1;
        else
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
                dst->value = skb_dst(skb)->tclassid;
 #else
                dst->value = 0;
@@ -267,7 +264,7 @@ META_COLLECTOR(int_rtiif)
        if (unlikely(skb_rtable(skb) == NULL))
                *err = -1;
        else
-               dst->value = skb_rtable(skb)->fl.iif;
+               dst->value = skb_rtable(skb)->rt_iif;
 }
 
 /**************************************************************************
@@ -404,7 +401,7 @@ META_COLLECTOR(int_sk_sndbuf)
 META_COLLECTOR(int_sk_alloc)
 {
        SKIP_NONLOCAL(skb);
-       dst->value = skb->sk->sk_allocation;
+       dst->value = (__force int) skb->sk->sk_allocation;
 }
 
 META_COLLECTOR(int_sk_route_caps)
@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
  * Meta value collectors assignment table
  **************************************************************************/
 
-struct meta_ops
-{
+struct meta_ops {
        void            (*get)(struct sk_buff *, struct tcf_pkt_info *,
                               struct meta_value *, struct meta_obj *, int *);
 };
@@ -494,7 +490,7 @@ struct meta_ops
 
 /* Meta value operations table listing all meta value collectors and
  * assigns them to a type and meta id. */
-static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
+static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
        [TCF_META_TYPE_VAR] = {
                [META_ID(DEV)]                  = META_FUNC(var_dev),
                [META_ID(SK_BOUND_IF)]          = META_FUNC(var_sk_bound_if),
@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
        }
 };
 
-static inline struct meta_ops * meta_ops(struct meta_value *val)
+static inline struct meta_ops *meta_ops(struct meta_value *val)
 {
        return &__meta_ops[meta_type(val)][meta_id(val)];
 }
@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
        if (v->len == sizeof(unsigned long))
                NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
-       else if (v->len == sizeof(u32)) {
+       else if (v->len == sizeof(u32))
                NLA_PUT_U32(skb, tlv, v->val);
-       }
 
        return 0;
 
@@ -663,8 +658,7 @@ nla_put_failure:
  * Type specific operations table
  **************************************************************************/
 
-struct meta_type_ops
-{
+struct meta_type_ops {
        void    (*destroy)(struct meta_value *);
        int     (*compare)(struct meta_obj *, struct meta_obj *);
        int     (*change)(struct meta_value *, struct nlattr *);
@@ -672,7 +666,7 @@ struct meta_type_ops
        int     (*dump)(struct sk_buff *, struct meta_value *, int);
 };
 
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
+static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
        [TCF_META_TYPE_VAR] = {
                .destroy = meta_var_destroy,
                .compare = meta_var_compare,
@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
        }
 };
 
-static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
+static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
 {
        return &__meta_type_ops[meta_type(v)];
 }
@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
                return err;
 
        if (meta_type_ops(v)->apply_extras)
-           meta_type_ops(v)->apply_extras(v, dst);
+               meta_type_ops(v)->apply_extras(v, dst);
 
        return 0;
 }
@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
        r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
 
        switch (meta->lvalue.hdr.op) {
-               case TCF_EM_OPND_EQ:
-                       return !r;
-               case TCF_EM_OPND_LT:
-                       return r < 0;
-               case TCF_EM_OPND_GT:
-                       return r > 0;
+       case TCF_EM_OPND_EQ:
+               return !r;
+       case TCF_EM_OPND_LT:
+               return r < 0;
+       case TCF_EM_OPND_GT:
+               return r > 0;
        }
 
        return 0;
@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
 
 static inline int meta_is_supported(struct meta_value *val)
 {
-       return (!meta_id(val) || meta_ops(val)->get);
+       return !meta_id(val) || meta_ops(val)->get;
 }
 
 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
index 1a4176aee6e5c181749d522ebd2a258dbbf59762..a3bed07a008b2f746376ce0970cd4f15196adbdb 100644 (file)
@@ -18,8 +18,7 @@
 #include <linux/tc_ematch/tc_em_nbyte.h>
 #include <net/pkt_cls.h>
 
-struct nbyte_data
-{
+struct nbyte_data {
        struct tcf_em_nbyte     hdr;
        char                    pattern[0];
 };
index ea8f566e720c7bd9ed508647c080c5c554edba2b..15d353d2e4be0791859f533c8b816a584713fbcd 100644 (file)
@@ -19,8 +19,7 @@
 #include <linux/tc_ematch/tc_em_text.h>
 #include <net/pkt_cls.h>
 
-struct text_match
-{
+struct text_match {
        u16                     from_offset;
        u16                     to_offset;
        u8                      from_layer;
index 953f1479f7da2d4af4015440fc708ffda9d81670..797bdb88c010ce5b63ce24766d9e1870e6805107 100644 (file)
@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
        if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
                return 0;
 
-       return !(((*(__be32*) ptr)  ^ key->val) & key->mask);
+       return !(((*(__be32 *) ptr)  ^ key->val) & key->mask);
 }
 
 static struct tcf_ematch_ops em_u32_ops = {
index 5e37da961f804cfdf17fa30d6434332e32dc1332..88d93eb92507be2f80fd90db12a63b11a7e87aeb 100644 (file)
@@ -93,7 +93,7 @@
 static LIST_HEAD(ematch_ops);
 static DEFINE_RWLOCK(ematch_mod_lock);
 
-static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
+static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
 {
        struct tcf_ematch_ops *e = NULL;
 
@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
 }
 EXPORT_SYMBOL(tcf_em_unregister);
 
-static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
-                                                  int index)
+static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
+                                                 int index)
 {
        return &tree->matches[index];
 }
@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
 
        if (em_hdr->kind == TCF_EM_CONTAINER) {
                /* Special ematch called "container", carries an index
-                * referencing an external ematch sequence. */
+                * referencing an external ematch sequence.
+                */
                u32 ref;
 
                if (data_len < sizeof(ref))
@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                        goto errout;
 
                /* We do not allow backward jumps to avoid loops and jumps
-                * to our own position are of course illegal. */
+                * to our own position are of course illegal.
+                */
                if (ref <= idx)
                        goto errout;
 
@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                 * which automatically releases the reference again, therefore
                 * the module MUST not be given back under any circumstances
                 * here. Be aware, the destroy function assumes that the
-                * module is held if the ops field is non zero. */
+                * module is held if the ops field is non zero.
+                */
                em->ops = tcf_em_lookup(em_hdr->kind);
 
                if (em->ops == NULL) {
@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                        if (em->ops) {
                                /* We dropped the RTNL mutex in order to
                                 * perform the module load. Tell the caller
-                                * to replay the request. */
+                                * to replay the request.
+                                */
                                module_put(em->ops->owner);
                                err = -EAGAIN;
                        }
@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                }
 
                /* ematch module provides expected length of data, so we
-                * can do a basic sanity check. */
+                * can do a basic sanity check.
+                */
                if (em->ops->datalen && data_len < em->ops->datalen)
                        goto errout;
 
@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
                         * TCF_EM_SIMPLE may be specified stating that the
                         * data only consists of a u32 integer and the module
                         * does not expected a memory reference but rather
-                        * the value carried. */
+                        * the value carried.
+                        */
                        if (em_hdr->flags & TCF_EM_SIMPLE) {
                                if (data_len < sizeof(u32))
                                        goto errout;
@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
         * The array of rt attributes is parsed in the order as they are
         * provided, their type must be incremental from 1 to n. Even
         * if it does not serve any real purpose, a failure of sticking
-        * to this policy will result in parsing failure. */
+        * to this policy will result in parsing failure.
+        */
        for (idx = 0; nla_ok(rt_match, list_len); idx++) {
                err = -EINVAL;
 
@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
        /* Check if the number of matches provided by userspace actually
         * complies with the array of matches. The number was used for
         * the validation of references and a mismatch could lead to
-        * undefined references during the matching process. */
+        * undefined references during the matching process.
+        */
        if (idx != tree_hdr->nmatches) {
                err = -EINVAL;
                goto errout_abort;
@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
                        .flags = em->flags
                };
 
-               NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
+               NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
 
                if (em->ops && em->ops->dump) {
                        if (em->ops->dump(skb, em) < 0)
@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
                               struct tcf_pkt_info *info)
 {
        int r = em->ops->match(skb, em, info);
+
        return tcf_em_is_inverted(em) ? !r : r;
 }
 
@@ -527,8 +536,8 @@ pop_stack:
 
 stack_overflow:
        if (net_ratelimit())
-               printk(KERN_WARNING "tc ematch: local stack overflow,"
-                       " increase NET_EMATCH_STACK\n");
+               pr_warning("tc ematch: local stack overflow,"
+                          " increase NET_EMATCH_STACK\n");
        return -1;
 }
 EXPORT_SYMBOL(__tcf_em_tree_match);
index b22ca2d1cebca4e2495f1fd15502e7f39fe82342..7490f3f2db8bf50a8ac2ede5339687789d45ec44 100644 (file)
@@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
        int err = -ENOENT;
 
        write_lock(&qdisc_mod_lock);
-       for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
+       for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
                if (q == qops)
                        break;
        if (q) {
@@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
        if (!tab || --tab->refcnt)
                return;
 
-       for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
+       for (rtabp = &qdisc_rtab_list;
+            (rtab = *rtabp) != NULL;
+            rtabp = &rtab->next) {
                if (rtab == tab) {
                        *rtabp = rtab->next;
                        kfree(rtab);
@@ -396,6 +398,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
        return stab;
 }
 
+static void stab_kfree_rcu(struct rcu_head *head)
+{
+       kfree(container_of(head, struct qdisc_size_table, rcu));
+}
+
 void qdisc_put_stab(struct qdisc_size_table *tab)
 {
        if (!tab)
@@ -405,7 +412,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
 
        if (--tab->refcnt == 0) {
                list_del(&tab->list);
-               kfree(tab);
+               call_rcu_bh(&tab->rcu, stab_kfree_rcu);
        }
 
        spin_unlock(&qdisc_stab_lock);
@@ -428,7 +435,7 @@ nla_put_failure:
        return -1;
 }
 
-void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 {
        int pkt_len, slot;
 
@@ -454,14 +461,13 @@ out:
                pkt_len = 1;
        qdisc_skb_cb(skb)->pkt_len = pkt_len;
 }
-EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 
 void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
 {
        if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
-               printk(KERN_WARNING
-                      "%s: %s qdisc %X: is non-work-conserving?\n",
-                      txt, qdisc->ops->id, qdisc->handle >> 16);
+               pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+                       txt, qdisc->ops->id, qdisc->handle >> 16);
                qdisc->flags |= TCQ_F_WARN_NONWC;
        }
 }
@@ -472,7 +478,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
        struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
                                                 timer);
 
-       wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(wd->qdisc);
        __netif_schedule(qdisc_root(wd->qdisc));
 
        return HRTIMER_NORESTART;
@@ -494,7 +500,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
                     &qdisc_root_sleeping(wd->qdisc)->state))
                return;
 
-       wd->qdisc->flags |= TCQ_F_THROTTLED;
+       qdisc_throttled(wd->qdisc);
        time = ktime_set(0, 0);
        time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
        hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -504,7 +510,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule);
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
        hrtimer_cancel(&wd->timer);
-       wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(wd->qdisc);
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
@@ -625,7 +631,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
                        autohandle = TC_H_MAKE(0x80000000U, 0);
        } while (qdisc_lookup(dev, autohandle) && --i > 0);
 
-       return i>0 ? autohandle : 0;
+       return i > 0 ? autohandle : 0;
 }
 
 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
@@ -834,7 +840,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                                err = PTR_ERR(stab);
                                goto err_out4;
                        }
-                       sch->stab = stab;
+                       rcu_assign_pointer(sch->stab, stab);
                }
                if (tca[TCA_RATE]) {
                        spinlock_t *root_lock;
@@ -874,7 +880,7 @@ err_out4:
         * Any broken qdiscs that would require a ops->reset() here?
         * The qdisc was never in action so it shouldn't be necessary.
         */
-       qdisc_put_stab(sch->stab);
+       qdisc_put_stab(rtnl_dereference(sch->stab));
        if (ops->destroy)
                ops->destroy(sch);
        goto err_out3;
@@ -882,7 +888,7 @@ err_out4:
 
 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 {
-       struct qdisc_size_table *stab = NULL;
+       struct qdisc_size_table *ostab, *stab = NULL;
        int err = 0;
 
        if (tca[TCA_OPTIONS]) {
@@ -899,8 +905,9 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
                        return PTR_ERR(stab);
        }
 
-       qdisc_put_stab(sch->stab);
-       sch->stab = stab;
+       ostab = rtnl_dereference(sch->stab);
+       rcu_assign_pointer(sch->stab, stab);
+       qdisc_put_stab(ostab);
 
        if (tca[TCA_RATE]) {
                /* NB: ignores errors from replace_estimator
@@ -915,9 +922,8 @@ out:
        return 0;
 }
 
-struct check_loop_arg
-{
-       struct qdisc_walker     w;
+struct check_loop_arg {
+       struct qdisc_walker     w;
        struct Qdisc            *p;
        int                     depth;
 };
@@ -970,7 +976,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        struct Qdisc *p = NULL;
        int err;
 
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return -ENODEV;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -980,12 +987,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        if (clid) {
                if (clid != TC_H_ROOT) {
                        if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
-                               if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+                               p = qdisc_lookup(dev, TC_H_MAJ(clid));
+                               if (!p)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
-                       } else { /* ingress */
-                               if (dev_ingress_queue(dev))
-                                       q = dev_ingress_queue(dev)->qdisc_sleeping;
+                       } else if (dev_ingress_queue(dev)) {
+                               q = dev_ingress_queue(dev)->qdisc_sleeping;
                        }
                } else {
                        q = dev->qdisc;
@@ -996,7 +1003,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
                        return -EINVAL;
        } else {
-               if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+               q = qdisc_lookup(dev, tcm->tcm_handle);
+               if (!q)
                        return -ENOENT;
        }
 
@@ -1008,7 +1016,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        return -EINVAL;
                if (q->handle == 0)
                        return -ENOENT;
-               if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
+               err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+               if (err != 0)
                        return err;
        } else {
                qdisc_notify(net, skb, n, clid, NULL, q);
@@ -1017,7 +1026,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 }
 
 /*
  Create/change qdisc.
* Create/change qdisc.
  */
 
 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@@ -1036,7 +1045,8 @@ replay:
        clid = tcm->tcm_parent;
        q = p = NULL;
 
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return -ENODEV;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1046,12 +1056,12 @@ replay:
        if (clid) {
                if (clid != TC_H_ROOT) {
                        if (clid != TC_H_INGRESS) {
-                               if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+                               p = qdisc_lookup(dev, TC_H_MAJ(clid));
+                               if (!p)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
-                       } else { /* ingress */
-                               if (dev_ingress_queue_create(dev))
-                                       q = dev_ingress_queue(dev)->qdisc_sleeping;
+                       } else if (dev_ingress_queue_create(dev)) {
+                               q = dev_ingress_queue(dev)->qdisc_sleeping;
                        }
                } else {
                        q = dev->qdisc;
@@ -1063,13 +1073,14 @@ replay:
 
                if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
                        if (tcm->tcm_handle) {
-                               if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+                               if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
                                        return -EEXIST;
                                if (TC_H_MIN(tcm->tcm_handle))
                                        return -EINVAL;
-                               if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+                               q = qdisc_lookup(dev, tcm->tcm_handle);
+                               if (!q)
                                        goto create_n_graft;
-                               if (n->nlmsg_flags&NLM_F_EXCL)
+                               if (n->nlmsg_flags & NLM_F_EXCL)
                                        return -EEXIST;
                                if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
                                        return -EINVAL;
@@ -1079,7 +1090,7 @@ replay:
                                atomic_inc(&q->refcnt);
                                goto graft;
                        } else {
-                               if (q == NULL)
+                               if (!q)
                                        goto create_n_graft;
 
                                /* This magic test requires explanation.
@@ -1101,9 +1112,9 @@ replay:
                                 *   For now we select create/graft, if
                                 *   user gave KIND, which does not match existing.
                                 */
-                               if ((n->nlmsg_flags&NLM_F_CREATE) &&
-                                   (n->nlmsg_flags&NLM_F_REPLACE) &&
-                                   ((n->nlmsg_flags&NLM_F_EXCL) ||
+                               if ((n->nlmsg_flags & NLM_F_CREATE) &&
+                                   (n->nlmsg_flags & NLM_F_REPLACE) &&
+                                   ((n->nlmsg_flags & NLM_F_EXCL) ||
                                     (tca[TCA_KIND] &&
                                      nla_strcmp(tca[TCA_KIND], q->ops->id))))
                                        goto create_n_graft;
@@ -1118,7 +1129,7 @@ replay:
        /* Change qdisc parameters */
        if (q == NULL)
                return -ENOENT;
-       if (n->nlmsg_flags&NLM_F_EXCL)
+       if (n->nlmsg_flags & NLM_F_EXCL)
                return -EEXIST;
        if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
                return -EINVAL;
@@ -1128,7 +1139,7 @@ replay:
        return err;
 
 create_n_graft:
-       if (!(n->nlmsg_flags&NLM_F_CREATE))
+       if (!(n->nlmsg_flags & NLM_F_CREATE))
                return -ENOENT;
        if (clid == TC_H_INGRESS) {
                if (dev_ingress_queue(dev))
@@ -1175,6 +1186,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        struct nlmsghdr  *nlh;
        unsigned char *b = skb_tail_pointer(skb);
        struct gnet_dump d;
+       struct qdisc_size_table *stab;
 
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
        tcm = NLMSG_DATA(nlh);
@@ -1190,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                goto nla_put_failure;
        q->qstats.qlen = q->q.qlen;
 
-       if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+       stab = rtnl_dereference(q->stab);
+       if (stab && qdisc_dump_stab(skb, stab) < 0)
                goto nla_put_failure;
 
        if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
@@ -1234,16 +1247,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
                return -ENOBUFS;
 
        if (old && !tc_qdisc_dump_ignore(old)) {
-               if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+               if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+                                 0, RTM_DELQDISC) < 0)
                        goto err_out;
        }
        if (new && !tc_qdisc_dump_ignore(new)) {
-               if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+               if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+                                 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
                        goto err_out;
        }
 
        if (skb->len)
-               return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+               return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+                                     n->nlmsg_flags & NLM_F_ECHO);
 
 err_out:
        kfree_skb(skb);
@@ -1275,7 +1291,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                        q_idx++;
                        continue;
                }
-               if (!tc_qdisc_dump_ignore(q) && 
+               if (!tc_qdisc_dump_ignore(q) &&
                    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                        goto done;
@@ -1356,7 +1372,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        u32 qid = TC_H_MAJ(clid);
        int err;
 
-       if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return -ENODEV;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1391,9 +1408,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        qid = dev->qdisc->handle;
 
                /* Now qid is genuine qdisc handle consistent
-                  both with parent and child.
-
-                  TC_H_MAJ(pid) still may be unspecified, complete it now.
+                * both with parent and child.
+                *
+                * TC_H_MAJ(pid) still may be unspecified, complete it now.
                 */
                if (pid)
                        pid = TC_H_MAKE(qid, pid);
@@ -1403,7 +1420,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        }
 
        /* OK. Locate qdisc */
-       if ((q = qdisc_lookup(dev, qid)) == NULL)
+       q = qdisc_lookup(dev, qid);
+       if (!q)
                return -ENOENT;
 
        /* An check that it supports classes */
@@ -1423,13 +1441,14 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        if (cl == 0) {
                err = -ENOENT;
-               if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
+               if (n->nlmsg_type != RTM_NEWTCLASS ||
+                   !(n->nlmsg_flags & NLM_F_CREATE))
                        goto out;
        } else {
                switch (n->nlmsg_type) {
                case RTM_NEWTCLASS:
                        err = -EEXIST;
-                       if (n->nlmsg_flags&NLM_F_EXCL)
+                       if (n->nlmsg_flags & NLM_F_EXCL)
                                goto out;
                        break;
                case RTM_DELTCLASS:
@@ -1521,14 +1540,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
                return -EINVAL;
        }
 
-       return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+       return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+                             n->nlmsg_flags & NLM_F_ECHO);
 }
 
-struct qdisc_dump_args
-{
-       struct qdisc_walker w;
-       struct sk_buff *skb;
-       struct netlink_callback *cb;
+struct qdisc_dump_args {
+       struct qdisc_walker     w;
+       struct sk_buff          *skb;
+       struct netlink_callback *cb;
 };
 
 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
@@ -1590,7 +1609,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
 
 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
+       struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
        struct net *net = sock_net(skb->sk);
        struct netdev_queue *dev_queue;
        struct net_device *dev;
@@ -1598,7 +1617,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 
        if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
                return 0;
-       if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+       dev = dev_get_by_index(net, tcm->tcm_ifindex);
+       if (!dev)
                return 0;
 
        s_t = cb->args[0];
@@ -1621,19 +1641,22 @@ done:
 }
 
 /* Main classifier routine: scans classifier chain attached
  to this qdisc, (optionally) tests for protocol and asks
  specific classifiers.
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
  */
 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
                       struct tcf_result *res)
 {
        __be16 protocol = skb->protocol;
-       int err = 0;
+       int err;
 
        for (; tp; tp = tp->next) {
-               if ((tp->protocol == protocol ||
-                    tp->protocol == htons(ETH_P_ALL)) &&
-                   (err = tp->classify(skb, tp, res)) >= 0) {
+               if (tp->protocol != protocol &&
+                   tp->protocol != htons(ETH_P_ALL))
+                       continue;
+               err = tp->classify(skb, tp, res);
+
+               if (err >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
                        if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
                                skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@@ -1649,12 +1672,12 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
                struct tcf_result *res)
 {
        int err = 0;
-       __be16 protocol;
 #ifdef CONFIG_NET_CLS_ACT
+       __be16 protocol;
        struct tcf_proto *otp = tp;
 reclassify:
-#endif
        protocol = skb->protocol;
+#endif
 
        err = tc_classify_compat(skb, tp, res);
 #ifdef CONFIG_NET_CLS_ACT
@@ -1664,11 +1687,11 @@ reclassify:
 
                if (verd++ >= MAX_REC_LOOP) {
                        if (net_ratelimit())
-                               printk(KERN_NOTICE
-                                      "%s: packet reclassify loop"
+                               pr_notice("%s: packet reclassify loop"
                                          " rule prio %u protocol %02x\n",
-                                      tp->q->ops->id,
-                                      tp->prio & 0xffff, ntohs(tp->protocol));
+                                         tp->q->ops->id,
+                                         tp->prio & 0xffff,
+                                         ntohs(tp->protocol));
                        return TC_ACT_SHOT;
                }
                skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1761,7 +1784,7 @@ static int __init pktsched_init(void)
 
        err = register_pernet_subsys(&psched_net_ops);
        if (err) {
-               printk(KERN_ERR "pktsched_init: "
+               pr_err("pktsched_init: "
                       "cannot initialize per netns operations\n");
                return err;
        }
index 943d733409d08316e7bbf81749047c066b63d9bb..3f08158b8688e7128e9c363242eef43d99c3cfa9 100644 (file)
@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
         * creation), and one for the reference held when calling delete.
         */
        if (flow->ref < 2) {
-               printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
+               pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
                return -EINVAL;
        }
        if (flow->ref > 2)
@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                        }
                }
                flow = NULL;
-       done:
-               ;               
+done:
+               ;
        }
-       if (!flow)
+       if (!flow) {
                flow = &p->link;
-       else {
+       else {
                if (flow->vcc)
                        ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
                /*@@@ looks good ... but it's not supposed to work :-) */
@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
 
        list_for_each_entry_safe(flow, tmp, &p->flows, list) {
                if (flow->ref > 1)
-                       printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
-                              flow->ref);
+                       pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
                atm_tc_put(sch, (unsigned long)flow);
        }
        tasklet_kill(&p->task);
@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
        }
        if (flow->excess)
                NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
-       else {
+       else
                NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-       }
 
        nla_nest_end(skb, nest);
        return skb->len;
index 5f63ec58942c10ac5088c6e6fca6e40463235a0b..24d94c097b35f34bfee4ff5aed2a724e8549176d 100644 (file)
@@ -72,8 +72,7 @@
 struct cbq_sched_data;
 
 
-struct cbq_class
-{
+struct cbq_class {
        struct Qdisc_class_common common;
        struct cbq_class        *next_alive;    /* next class with backlog in this priority band */
 
@@ -139,19 +138,18 @@ struct cbq_class
        int                     refcnt;
        int                     filters;
 
-       struct cbq_class        *defaults[TC_PRIO_MAX+1];
+       struct cbq_class        *defaults[TC_PRIO_MAX + 1];
 };
 
-struct cbq_sched_data
-{
+struct cbq_sched_data {
        struct Qdisc_class_hash clhash;                 /* Hash table of all classes */
-       int                     nclasses[TC_CBQ_MAXPRIO+1];
-       unsigned                quanta[TC_CBQ_MAXPRIO+1];
+       int                     nclasses[TC_CBQ_MAXPRIO + 1];
+       unsigned int            quanta[TC_CBQ_MAXPRIO + 1];
 
        struct cbq_class        link;
 
-       unsigned                activemask;
-       struct cbq_class        *active[TC_CBQ_MAXPRIO+1];      /* List of all classes
+       unsigned int            activemask;
+       struct cbq_class        *active[TC_CBQ_MAXPRIO + 1];    /* List of all classes
                                                                   with backlog */
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@ struct cbq_sched_data
        int                     tx_len;
        psched_time_t           now;            /* Cached timestamp */
        psched_time_t           now_rt;         /* Cached real time */
-       unsigned                pmask;
+       unsigned int            pmask;
 
        struct hrtimer          delay_timer;
        struct qdisc_watchdog   watchdog;       /* Watchdog timer,
@@ -175,9 +173,9 @@ struct cbq_sched_data
 };
 
 
-#define L2T(cl,len)    qdisc_l2t((cl)->R_tab,len)
+#define L2T(cl, len)   qdisc_l2t((cl)->R_tab, len)
 
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
 {
        struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
 static struct cbq_class *
 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
 {
-       struct cbq_class *cl, *new;
+       struct cbq_class *cl;
 
-       for (cl = this->tparent; cl; cl = cl->tparent)
-               if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
-                       return new;
+       for (cl = this->tparent; cl; cl = cl->tparent) {
+               struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
 
+               if (new != NULL && new != this)
+                       return new;
+       }
        return NULL;
 }
 
 #endif
 
 /* Classify packet. The procedure is pretty complicated, but
  it allows us to combine link sharing and priority scheduling
  transparently.
-
  Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
  so that it resolves to split nodes. Then packets are classified
  by logical priority, or a more specific classifier may be attached
  to the split node.
* it allows us to combine link sharing and priority scheduling
* transparently.
+ *
* Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
* so that it resolves to split nodes. Then packets are classified
* by logical priority, or a more specific classifier may be attached
* to the split node.
  */
 
 static struct cbq_class *
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
        /*
         *  Step 1. If skb->priority points to one of our classes, use it.
         */
-       if (TC_H_MAJ(prio^sch->handle) == 0 &&
+       if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
            (cl = cbq_class_lookup(q, prio)) != NULL)
                return cl;
 
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                    (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
                        goto fallback;
 
-               if ((cl = (void*)res.class) == NULL) {
+               cl = (void *)res.class;
+               if (!cl) {
                        if (TC_H_MAJ(res.classid))
                                cl = cbq_class_lookup(q, res.classid);
-                       else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
+                       else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
                                cl = defmap[TC_PRIO_BESTEFFORT];
 
                        if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@ fallback:
         * Step 4. No success...
         */
        if (TC_H_MAJ(prio) == 0 &&
-           !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
+           !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
            !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
                return head;
 
@@ -290,12 +291,12 @@ fallback:
 }
 
 /*
  A packet has just been enqueued on the empty class.
  cbq_activate_class adds it to the tail of active class list
  of its priority band.
* A packet has just been enqueued on the empty class.
* cbq_activate_class adds it to the tail of active class list
* of its priority band.
  */
 
-static __inline__ void cbq_activate_class(struct cbq_class *cl)
+static inline void cbq_activate_class(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
        int prio = cl->cpriority;
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
 }
 
 /*
  Unlink class from active chain.
  Note that this same procedure is done directly in cbq_dequeue*
  during round-robin procedure.
* Unlink class from active chain.
* Note that this same procedure is done directly in cbq_dequeue*
* during round-robin procedure.
  */
 
 static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
 {
        int toplevel = q->toplevel;
 
-       if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
+       if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
                psched_time_t now;
                psched_tdiff_t incr;
 
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
                                q->toplevel = cl->level;
                                return;
                        }
-               } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
+               } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
        }
 }
 
@@ -417,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
                delay += cl->offtime;
 
                /*
-                  Class goes to sleep, so that it will have no
-                  chance to work avgidle. Let's forgive it 8)
-
-                  BTW cbq-2.0 has a crap in this
-                  place, apparently they forgot to shift it by cl->ewma_log.
+                * Class goes to sleep, so that it will have no
+                * chance to work avgidle. Let's forgive it 8)
+                *
+                * BTW cbq-2.0 has a crap in this
+                * place, apparently they forgot to shift it by cl->ewma_log.
                 */
                if (cl->avgidle < 0)
                        delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -438,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
                q->wd_expires = delay;
 
        /* Dirty work! We must schedule wakeups based on
-          real available rate, rather than leaf rate,
-          which may be tiny (even zero).
+        * real available rate, rather than leaf rate,
+        * which may be tiny (even zero).
         */
        if (q->toplevel == TC_CBQ_MAXLEVEL) {
                struct cbq_class *b;
@@ -459,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
 }
 
 /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
  they go overlimit
* they go overlimit
  */
 
 static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -594,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
        struct Qdisc *sch = q->watchdog.qdisc;
        psched_time_t now;
        psched_tdiff_t delay = 0;
-       unsigned pmask;
+       unsigned int pmask;
 
        now = psched_get_time();
 
@@ -623,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
                hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
        }
 
-       sch->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(sch);
        __netif_schedule(qdisc_root(sch));
        return HRTIMER_NORESTART;
 }
@@ -663,15 +664,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
 #endif
 
 /*
  It is mission critical procedure.
-
  We "regenerate" toplevel cutoff, if transmitting class
  has backlog and it is not regulated. It is not part of
  original CBQ description, but looks more reasonable.
  Probably, it is wrong. This question needs further investigation.
-*/
* It is mission critical procedure.
+ *
* We "regenerate" toplevel cutoff, if transmitting class
* has backlog and it is not regulated. It is not part of
* original CBQ description, but looks more reasonable.
* Probably, it is wrong. This question needs further investigation.
+ */
 
-static __inline__ void
+static inline void
 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
                    struct cbq_class *borrowed)
 {
@@ -682,7 +683,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
                                        q->toplevel = borrowed->level;
                                        return;
                                }
-                       } while ((borrowed=borrowed->borrow) != NULL);
+                       } while ((borrowed = borrowed->borrow) != NULL);
                }
 #if 0
        /* It is not necessary now. Uncommenting it
@@ -710,10 +711,10 @@ cbq_update(struct cbq_sched_data *q)
                cl->bstats.bytes += len;
 
                /*
-                  (now - last) is total time between packet right edges.
-                  (last_pktlen/rate) is "virtual" busy time, so that
-
-                        idle = (now - last) - last_pktlen/rate
+                * (now - last) is total time between packet right edges.
+                * (last_pktlen/rate) is "virtual" busy time, so that
+                *
+                *      idle = (now - last) - last_pktlen/rate
                 */
 
                idle = q->now - cl->last;
@@ -723,9 +724,9 @@ cbq_update(struct cbq_sched_data *q)
                        idle -= L2T(cl, len);
 
                /* true_avgidle := (1-W)*true_avgidle + W*idle,
-                  where W=2^{-ewma_log}. But cl->avgidle is scaled:
-                  cl->avgidle == true_avgidle/W,
-                  hence:
+                * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+                * cl->avgidle == true_avgidle/W,
+                * hence:
                 */
                        avgidle += idle - (avgidle>>cl->ewma_log);
                }
@@ -739,22 +740,22 @@ cbq_update(struct cbq_sched_data *q)
                        cl->avgidle = avgidle;
 
                        /* Calculate expected time, when this class
-                          will be allowed to send.
-                          It will occur, when:
-                          (1-W)*true_avgidle + W*delay = 0, i.e.
-                          idle = (1/W - 1)*(-true_avgidle)
-                          or
-                          idle = (1 - W)*(-cl->avgidle);
+                        * will be allowed to send.
+                        * It will occur, when:
+                        * (1-W)*true_avgidle + W*delay = 0, i.e.
+                        * idle = (1/W - 1)*(-true_avgidle)
+                        * or
+                        * idle = (1 - W)*(-cl->avgidle);
                         */
                        idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
 
                        /*
-                          That is not all.
-                          To maintain the rate allocated to the class,
-                          we add to undertime virtual clock,
-                          necessary to complete transmitted packet.
-                          (len/phys_bandwidth has been already passed
-                          to the moment of cbq_update)
+                        * That is not all.
+                        * To maintain the rate allocated to the class,
+                        * we add to undertime virtual clock,
+                        * necessary to complete transmitted packet.
+                        * (len/phys_bandwidth has been already passed
+                        * to the moment of cbq_update)
                         */
 
                        idle -= L2T(&q->link, len);
@@ -776,7 +777,7 @@ cbq_update(struct cbq_sched_data *q)
        cbq_update_toplevel(q, this, q->tx_borrowed);
 }
 
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
 cbq_under_limit(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -792,16 +793,17 @@ cbq_under_limit(struct cbq_class *cl)
 
        do {
                /* It is very suspicious place. Now overlimit
-                  action is generated for not bounded classes
-                  only if link is completely congested.
-                  Though it is in agree with ancestor-only paradigm,
-                  it looks very stupid. Particularly,
-                  it means that this chunk of code will either
-                  never be called or result in strong amplification
-                  of burstiness. Dangerous, silly, and, however,
-                  no another solution exists.
+                * action is generated for not bounded classes
+                * only if link is completely congested.
+                * Though it is in agree with ancestor-only paradigm,
+                * it looks very stupid. Particularly,
+                * it means that this chunk of code will either
+                * never be called or result in strong amplification
+                * of burstiness. Dangerous, silly, and, however,
+                * no another solution exists.
                 */
-               if ((cl = cl->borrow) == NULL) {
+               cl = cl->borrow;
+               if (!cl) {
                        this_cl->qstats.overlimits++;
                        this_cl->overlimit(this_cl);
                        return NULL;
@@ -814,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl)
        return cl;
 }
 
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
 cbq_dequeue_prio(struct Qdisc *sch, int prio)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
@@ -838,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
 
                        if (cl->deficit <= 0) {
                                /* Class exhausted its allotment per
-                                  this round. Switch to the next one.
+                                * this round. Switch to the next one.
                                 */
                                deficit = 1;
                                cl->deficit += cl->quantum;
@@ -848,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
                        skb = cl->q->dequeue(cl->q);
 
                        /* Class did not give us any skb :-(
-                          It could occur even if cl->q->q.qlen != 0
-                          f.e. if cl->q == "tbf"
+                        * It could occur even if cl->q->q.qlen != 0
+                        * f.e. if cl->q == "tbf"
                         */
                        if (skb == NULL)
                                goto skip_class;
@@ -878,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
 skip_class:
                        if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
                                /* Class is empty or penalized.
-                                  Unlink it from active chain.
+                                * Unlink it from active chain.
                                 */
                                cl_prev->next_alive = cl->next_alive;
                                cl->next_alive = NULL;
@@ -917,14 +919,14 @@ next_class:
        return NULL;
 }
 
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
 cbq_dequeue_1(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
-       unsigned activemask;
+       unsigned int activemask;
 
-       activemask = q->activemask&0xFF;
+       activemask = q->activemask & 0xFF;
        while (activemask) {
                int prio = ffz(~activemask);
                activemask &= ~(1<<prio);
@@ -949,11 +951,11 @@ cbq_dequeue(struct Qdisc *sch)
        if (q->tx_class) {
                psched_tdiff_t incr2;
                /* Time integrator. We calculate EOS time
-                  by adding expected packet transmission time.
-                  If real time is greater, we warp artificial clock,
-                  so that:
-
-                  cbq_time = max(real_time, work);
+                * by adding expected packet transmission time.
+                * If real time is greater, we warp artificial clock,
+                * so that:
+                *
+                * cbq_time = max(real_time, work);
                 */
                incr2 = L2T(&q->link, q->tx_len);
                q->now += incr2;
@@ -971,27 +973,27 @@ cbq_dequeue(struct Qdisc *sch)
                if (skb) {
                        qdisc_bstats_update(sch, skb);
                        sch->q.qlen--;
-                       sch->flags &= ~TCQ_F_THROTTLED;
+                       qdisc_unthrottled(sch);
                        return skb;
                }
 
                /* All the classes are overlimit.
-
-                  It is possible, if:
-
-                  1. Scheduler is empty.
-                  2. Toplevel cutoff inhibited borrowing.
-                  3. Root class is overlimit.
-
-                  Reset 2d and 3d conditions and retry.
-
-                  Note, that NS and cbq-2.0 are buggy, peeking
-                  an arbitrary class is appropriate for ancestor-only
-                  sharing, but not for toplevel algorithm.
-
-                  Our version is better, but slower, because it requires
-                  two passes, but it is unavoidable with top-level sharing.
-               */
+                *
+                * It is possible, if:
+                *
+                * 1. Scheduler is empty.
+                * 2. Toplevel cutoff inhibited borrowing.
+                * 3. Root class is overlimit.
+                *
+                * Reset 2d and 3d conditions and retry.
+                *
+                * Note, that NS and cbq-2.0 are buggy, peeking
+                * an arbitrary class is appropriate for ancestor-only
+                * sharing, but not for toplevel algorithm.
+                *
+                * Our version is better, but slower, because it requires
+                * two passes, but it is unavoidable with top-level sharing.
+                */
 
                if (q->toplevel == TC_CBQ_MAXLEVEL &&
                    q->link.undertime == PSCHED_PASTPERFECT)
@@ -1002,7 +1004,8 @@ cbq_dequeue(struct Qdisc *sch)
        }
 
        /* No packets in scheduler or nobody wants to give them to us :-(
-          Sigh... start watchdog timer in the last case. */
+        * Sigh... start watchdog timer in the last case.
+        */
 
        if (sch->q.qlen) {
                sch->qstats.overlimits++;
@@ -1024,13 +1027,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
                int level = 0;
                struct cbq_class *cl;
 
-               if ((cl = this->children) != NULL) {
+               cl = this->children;
+               if (cl) {
                        do {
                                if (cl->level > level)
                                        level = cl->level;
                        } while ((cl = cl->sibling) != this->children);
                }
-               this->level = level+1;
+               this->level = level + 1;
        } while ((this = this->tparent) != NULL);
 }
 
@@ -1046,14 +1050,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
        for (h = 0; h < q->clhash.hashsize; h++) {
                hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
                        /* BUGGGG... Beware! This expression suffer of
-                          arithmetic overflows!
+                        * arithmetic overflows!
                         */
                        if (cl->priority == prio) {
                                cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
                                        q->quanta[prio];
                        }
                        if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
-                               printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
+                               pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+                                          cl->common.classid, cl->quantum);
                                cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
                        }
                }
@@ -1064,18 +1069,18 @@ static void cbq_sync_defmap(struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
        struct cbq_class *split = cl->split;
-       unsigned h;
+       unsigned int h;
        int i;
 
        if (split == NULL)
                return;
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
-               if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
+       for (i = 0; i <= TC_PRIO_MAX; i++) {
+               if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
                        split->defaults[i] = NULL;
        }
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
+       for (i = 0; i <= TC_PRIO_MAX; i++) {
                int level = split->level;
 
                if (split->defaults[i])
@@ -1088,7 +1093,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
                        hlist_for_each_entry(c, n, &q->clhash.hash[h],
                                             common.hnode) {
                                if (c->split == split && c->level < level &&
-                                   c->defmap&(1<<i)) {
+                                   c->defmap & (1<<i)) {
                                        split->defaults[i] = c;
                                        level = c->level;
                                }
@@ -1102,7 +1107,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
        struct cbq_class *split = NULL;
 
        if (splitid == 0) {
-               if ((split = cl->split) == NULL)
+               split = cl->split;
+               if (!split)
                        return;
                splitid = split->common.classid;
        }
@@ -1120,9 +1126,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
                cl->defmap = 0;
                cbq_sync_defmap(cl);
                cl->split = split;
-               cl->defmap = def&mask;
+               cl->defmap = def & mask;
        } else
-               cl->defmap = (cl->defmap&~mask)|(def&mask);
+               cl->defmap = (cl->defmap & ~mask) | (def & mask);
 
        cbq_sync_defmap(cl);
 }
@@ -1135,7 +1141,7 @@ static void cbq_unlink_class(struct cbq_class *this)
        qdisc_class_hash_remove(&q->clhash, &this->common);
 
        if (this->tparent) {
-               clp=&this->sibling;
+               clp = &this->sibling;
                cl = *clp;
                do {
                        if (cl == this) {
@@ -1174,7 +1180,7 @@ static void cbq_link_class(struct cbq_class *this)
        }
 }
 
-static unsigned int cbq_drop(struct Qdiscsch)
+static unsigned int cbq_drop(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl, *cl_head;
@@ -1182,7 +1188,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
        unsigned int len;
 
        for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
-               if ((cl_head = q->active[prio]) == NULL)
+               cl_head = q->active[prio];
+               if (!cl_head)
                        continue;
 
                cl = cl_head;
@@ -1199,13 +1206,13 @@ static unsigned int cbq_drop(struct Qdisc* sch)
 }
 
 static void
-cbq_reset(struct Qdiscsch)
+cbq_reset(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl;
        struct hlist_node *n;
        int prio;
-       unsigned h;
+       unsigned int h;
 
        q->activemask = 0;
        q->pmask = 0;
@@ -1237,21 +1244,21 @@ cbq_reset(struct Qdisc* sch)
 
 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
 {
-       if (lss->change&TCF_CBQ_LSS_FLAGS) {
-               cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
-               cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+       if (lss->change & TCF_CBQ_LSS_FLAGS) {
+               cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+               cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
        }
-       if (lss->change&TCF_CBQ_LSS_EWMA)
+       if (lss->change & TCF_CBQ_LSS_EWMA)
                cl->ewma_log = lss->ewma_log;
-       if (lss->change&TCF_CBQ_LSS_AVPKT)
+       if (lss->change & TCF_CBQ_LSS_AVPKT)
                cl->avpkt = lss->avpkt;
-       if (lss->change&TCF_CBQ_LSS_MINIDLE)
+       if (lss->change & TCF_CBQ_LSS_MINIDLE)
                cl->minidle = -(long)lss->minidle;
-       if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
+       if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
                cl->maxidle = lss->maxidle;
                cl->avgidle = lss->maxidle;
        }
-       if (lss->change&TCF_CBQ_LSS_OFFTIME)
+       if (lss->change & TCF_CBQ_LSS_OFFTIME)
                cl->offtime = lss->offtime;
        return 0;
 }
@@ -1279,10 +1286,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
        if (wrr->weight)
                cl->weight = wrr->weight;
        if (wrr->priority) {
-               cl->priority = wrr->priority-1;
+               cl->priority = wrr->priority - 1;
                cl->cpriority = cl->priority;
                if (cl->priority >= cl->priority2)
-                       cl->priority2 = TC_CBQ_MAXPRIO-1;
+                       cl->priority2 = TC_CBQ_MAXPRIO - 1;
        }
 
        cbq_addprio(q, cl);
@@ -1299,10 +1306,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
                cl->overlimit = cbq_ovl_delay;
                break;
        case TC_CBQ_OVL_LOWPRIO:
-               if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
-                   ovl->priority2-1 <= cl->priority)
+               if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
+                   ovl->priority2 - 1 <= cl->priority)
                        return -EINVAL;
-               cl->priority2 = ovl->priority2-1;
+               cl->priority2 = ovl->priority2 - 1;
                cl->overlimit = cbq_ovl_lowprio;
                break;
        case TC_CBQ_OVL_DROP:
@@ -1381,9 +1388,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        if (!q->link.q)
                q->link.q = &noop_qdisc;
 
-       q->link.priority = TC_CBQ_MAXPRIO-1;
-       q->link.priority2 = TC_CBQ_MAXPRIO-1;
-       q->link.cpriority = TC_CBQ_MAXPRIO-1;
+       q->link.priority = TC_CBQ_MAXPRIO - 1;
+       q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+       q->link.cpriority = TC_CBQ_MAXPRIO - 1;
        q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
        q->link.overlimit = cbq_ovl_classic;
        q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1414,7 +1421,7 @@ put_rtab:
        return err;
 }
 
-static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
 
@@ -1426,7 +1433,7 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_lssopt opt;
@@ -1451,15 +1458,15 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_wrropt opt;
 
        opt.flags = 0;
        opt.allot = cl->allot;
-       opt.priority = cl->priority+1;
-       opt.cpriority = cl->cpriority+1;
+       opt.priority = cl->priority + 1;
+       opt.cpriority = cl->cpriority + 1;
        opt.weight = cl->weight;
        NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
        return skb->len;
@@ -1469,13 +1476,13 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_ovl opt;
 
        opt.strategy = cl->ovl_strategy;
-       opt.priority2 = cl->priority2+1;
+       opt.priority2 = cl->priority2 + 1;
        opt.pad = 0;
        opt.penalty = cl->penalty;
        NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1486,7 +1493,7 @@ nla_put_failure:
        return -1;
 }
 
-static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_fopt opt;
@@ -1505,7 +1512,7 @@ nla_put_failure:
 }
 
 #ifdef CONFIG_NET_CLS_ACT
-static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_cbq_police opt;
@@ -1569,7 +1576,7 @@ static int
 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
               struct sk_buff *skb, struct tcmsg *tcm)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
        struct nlattr *nest;
 
        if (cl->tparent)
@@ -1597,7 +1604,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        struct gnet_dump *d)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        cl->qstats.qlen = cl->q->q.qlen;
        cl->xstats.avgidle = cl->avgidle;
@@ -1617,7 +1624,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
                     struct Qdisc **old)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        if (new == NULL) {
                new = qdisc_create_dflt(sch->dev_queue,
@@ -1640,10 +1647,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
        return 0;
 }
 
-static struct Qdisc *
-cbq_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        return cl->q;
 }
@@ -1682,13 +1688,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
                kfree(cl);
 }
 
-static void
-cbq_destroy(struct Qdisc* sch)
+static void cbq_destroy(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct hlist_node *n, *next;
        struct cbq_class *cl;
-       unsigned h;
+       unsigned int h;
 
 #ifdef CONFIG_NET_CLS_ACT
        q->rx_class = NULL;
@@ -1712,7 +1717,7 @@ cbq_destroy(struct Qdisc* sch)
 
 static void cbq_put(struct Qdisc *sch, unsigned long arg)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        if (--cl->refcnt == 0) {
 #ifdef CONFIG_NET_CLS_ACT
@@ -1735,7 +1740,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 {
        int err;
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl = (struct cbq_class*)*arg;
+       struct cbq_class *cl = (struct cbq_class *)*arg;
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_CBQ_MAX + 1];
        struct cbq_class *parent;
@@ -1827,13 +1832,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
        if (classid) {
                err = -EINVAL;
-               if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
+               if (TC_H_MAJ(classid ^ sch->handle) ||
+                   cbq_class_lookup(q, classid))
                        goto failure;
        } else {
                int i;
-               classid = TC_H_MAKE(sch->handle,0x8000);
+               classid = TC_H_MAKE(sch->handle, 0x8000);
 
-               for (i=0; i<0x8000; i++) {
+               for (i = 0; i < 0x8000; i++) {
                        if (++q->hgenerator >= 0x8000)
                                q->hgenerator = 1;
                        if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1890,11 +1896,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        cl->minidle = -0x7FFFFFFF;
        cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
        cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
-       if (cl->ewma_log==0)
+       if (cl->ewma_log == 0)
                cl->ewma_log = q->link.ewma_log;
-       if (cl->maxidle==0)
+       if (cl->maxidle == 0)
                cl->maxidle = q->link.maxidle;
-       if (cl->avpkt==0)
+       if (cl->avpkt == 0)
                cl->avpkt = q->link.avpkt;
        cl->overlimit = cbq_ovl_classic;
        if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1920,7 +1926,7 @@ failure:
 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
        unsigned int qlen;
 
        if (cl->filters || cl->children || cl == &q->link)
@@ -1978,7 +1984,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
                                     u32 classid)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *p = (struct cbq_class*)parent;
+       struct cbq_class *p = (struct cbq_class *)parent;
        struct cbq_class *cl = cbq_class_lookup(q, classid);
 
        if (cl) {
@@ -1992,7 +1998,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
 
 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
 {
-       struct cbq_class *cl = (struct cbq_class*)arg;
+       struct cbq_class *cl = (struct cbq_class *)arg;
 
        cl->filters--;
 }
@@ -2002,7 +2008,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl;
        struct hlist_node *n;
-       unsigned h;
+       unsigned int h;
 
        if (arg->stop)
                return;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
new file mode 100644 (file)
index 0000000..06afbae
--- /dev/null
@@ -0,0 +1,688 @@
+/*
+ * net/sched/sch_choke.c       CHOKE scheduler
+ *
+ * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/red.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+/*
+   CHOKe stateless AQM for fair bandwidth allocation
+   =================================================
+
+   CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
+   unresponsive flows) is a variant of RED that penalizes misbehaving flows but
+   maintains no flow state. The difference from RED is an additional step
+   during the enqueuing process. If average queue size is over the
+   low threshold (qmin), a packet is chosen at random from the queue.
+   If both the new and chosen packet are from the same flow, both
+   are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
+   needs to access packets in queue randomly. It has a minimal class
+   interface to allow overriding the builtin flow classifier with
+   filters.
+
+   Source:
+   R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
+   Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
+   IEEE INFOCOM, 2000.
+
+   A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
+   Characteristics", IEEE/ACM Transactions on Networking, 2004
+
+ */
+
+/* Upper bound on size of sk_buff table (packets) */
+#define CHOKE_MAX_QUEUE        (128*1024 - 1)
+
+struct choke_sched_data {
+/* Parameters */
+       u32              limit;
+       unsigned char    flags;
+
+       struct red_parms parms;
+
+/* Variables */
+       struct tcf_proto *filter_list;
+       struct {
+               u32     prob_drop;      /* Early probability drops */
+               u32     prob_mark;      /* Early probability marks */
+               u32     forced_drop;    /* Forced drops, qavg > max_thresh */
+               u32     forced_mark;    /* Forced marks, qavg > max_thresh */
+               u32     pdrop;          /* Drops due to queue limits */
+               u32     other;          /* Drops due to drop() calls */
+               u32     matched;        /* Drops to flow match */
+       } stats;
+
+       unsigned int     head;
+       unsigned int     tail;
+
+       unsigned int     tab_mask; /* size - 1 */
+
+       struct sk_buff **tab;
+};
+
+/* deliver a random number between 0 and N - 1 */
+static u32 random_N(unsigned int N)
+{
+       return reciprocal_divide(random32(), N);
+}
+
+/* number of elements in queue including holes */
+static unsigned int choke_len(const struct choke_sched_data *q)
+{
+       return (q->tail - q->head) & q->tab_mask;
+}
+
+/* Is ECN parameter configured */
+static int use_ecn(const struct choke_sched_data *q)
+{
+       return q->flags & TC_RED_ECN;
+}
+
+/* Should packets over max just be dropped (versus marked) */
+static int use_harddrop(const struct choke_sched_data *q)
+{
+       return q->flags & TC_RED_HARDDROP;
+}
+
+/* Move head pointer forward to skip over holes */
+static void choke_zap_head_holes(struct choke_sched_data *q)
+{
+       do {
+               q->head = (q->head + 1) & q->tab_mask;
+               if (q->head == q->tail)
+                       break;
+       } while (q->tab[q->head] == NULL);
+}
+
+/* Move tail pointer backwards to reuse holes */
+static void choke_zap_tail_holes(struct choke_sched_data *q)
+{
+       do {
+               q->tail = (q->tail - 1) & q->tab_mask;
+               if (q->head == q->tail)
+                       break;
+       } while (q->tab[q->tail] == NULL);
+}
+
+/* Drop packet from queue array by creating a "hole" */
+static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb = q->tab[idx];
+
+       q->tab[idx] = NULL;
+
+       if (idx == q->head)
+               choke_zap_head_holes(q);
+       if (idx == q->tail)
+               choke_zap_tail_holes(q);
+
+       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_drop(skb, sch);
+       qdisc_tree_decrease_qlen(sch, 1);
+       --sch->q.qlen;
+}
+
+/*
+ * Compare flow of two packets
+ *  Returns true only if source and destination address and port match.
+ *          false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+                            struct sk_buff *skb2)
+{
+       int off1, off2, poff;
+       const u32 *ports1, *ports2;
+       u8 ip_proto;
+       __u32 hash1;
+
+       if (skb1->protocol != skb2->protocol)
+               return false;
+
+       /* Use hash value as quick check
+        * Assumes that __skb_get_rxhash makes IP header and ports linear
+        */
+       hash1 = skb_get_rxhash(skb1);
+       if (!hash1 || hash1 != skb_get_rxhash(skb2))
+               return false;
+
+       /* Probably match, but be sure to avoid hash collisions */
+       off1 = skb_network_offset(skb1);
+       off2 = skb_network_offset(skb2);
+
+       switch (skb1->protocol) {
+       case __constant_htons(ETH_P_IP): {
+               const struct iphdr *ip1, *ip2;
+
+               ip1 = (const struct iphdr *) (skb1->data + off1);
+               ip2 = (const struct iphdr *) (skb2->data + off2);
+
+               ip_proto = ip1->protocol;
+               if (ip_proto != ip2->protocol ||
+                   ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
+                       return false;
+
+               if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
+                       ip_proto = 0;
+               off1 += ip1->ihl * 4;
+               off2 += ip2->ihl * 4;
+               break;
+       }
+
+       case __constant_htons(ETH_P_IPV6): {
+               const struct ipv6hdr *ip1, *ip2;
+
+               ip1 = (const struct ipv6hdr *) (skb1->data + off1);
+               ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+
+               ip_proto = ip1->nexthdr;
+               if (ip_proto != ip2->nexthdr ||
+                   ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
+                   ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
+                       return false;
+               off1 += 40;
+               off2 += 40;
+       }
+
+       default: /* Maybe compare MAC header here? */
+               return false;
+       }
+
+       poff = proto_ports_offset(ip_proto);
+       if (poff < 0)
+               return true;
+
+       off1 += poff;
+       off2 += poff;
+
+       ports1 = (__force u32 *)(skb1->data + off1);
+       ports2 = (__force u32 *)(skb2->data + off2);
+       return *ports1 == *ports2;
+}
+
+struct choke_skb_cb {
+       u16 classid;
+};
+
+static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
+       return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
+{
+       choke_skb_cb(skb)->classid = classid;
+}
+
+static u16 choke_get_classid(const struct sk_buff *skb)
+{
+       return choke_skb_cb(skb)->classid;
+}
+
+/*
+ * Classify flow using either:
+ *  1. pre-existing classification result in skb
+ *  2. fast internal classification
+ *  3. use TC filter based classification
+ */
+static bool choke_classify(struct sk_buff *skb,
+                          struct Qdisc *sch, int *qerr)
+
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct tcf_result res;
+       int result;
+
+       result = tc_classify(skb, q->filter_list, &res);
+       if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+               switch (result) {
+               case TC_ACT_STOLEN:
+               case TC_ACT_QUEUED:
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+               case TC_ACT_SHOT:
+                       return false;
+               }
+#endif
+               choke_set_classid(skb, TC_H_MIN(res.classid));
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Select a packet at random from queue
+ * HACK: since queue can have holes from previous deletion; retry several
+ *   times to find a random skb but then just give up and return the head
+ * Will return NULL if queue is empty (q->head == q->tail)
+ */
+static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+                                        unsigned int *pidx)
+{
+       struct sk_buff *skb;
+       int retrys = 3;
+
+       do {
+               *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+               skb = q->tab[*pidx];
+               if (skb)
+                       return skb;
+       } while (--retrys > 0);
+
+       return q->tab[*pidx = q->head];
+}
+
+/*
+ * Compare new packet with random packet in queue
+ * returns true if matched and sets *pidx
+ */
+static bool choke_match_random(const struct choke_sched_data *q,
+                              struct sk_buff *nskb,
+                              unsigned int *pidx)
+{
+       struct sk_buff *oskb;
+
+       if (q->head == q->tail)
+               return false;
+
+       oskb = choke_peek_random(q, pidx);
+       if (q->filter_list)
+               return choke_get_classid(nskb) == choke_get_classid(oskb);
+
+       return choke_match_flow(oskb, nskb);
+}
+
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct red_parms *p = &q->parms;
+       int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+       if (q->filter_list) {
+               /* If using external classifiers, get result and record it. */
+               if (!choke_classify(skb, sch, &ret))
+                       goto other_drop;        /* Packet was eaten by filter */
+       }
+
+       /* Compute average queue usage (see RED) */
+       p->qavg = red_calc_qavg(p, sch->q.qlen);
+       if (red_is_idling(p))
+               red_end_of_idle_period(p);
+
+       /* Is queue small? */
+       if (p->qavg <= p->qth_min)
+               p->qcount = -1;
+       else {
+               unsigned int idx;
+
+               /* Draw a packet at random from queue and compare flow */
+               if (choke_match_random(q, skb, &idx)) {
+                       q->stats.matched++;
+                       choke_drop_by_idx(sch, idx);
+                       goto congestion_drop;
+               }
+
+               /* Queue is large, always mark/drop */
+               if (p->qavg > p->qth_max) {
+                       p->qcount = -1;
+
+                       sch->qstats.overlimits++;
+                       if (use_harddrop(q) || !use_ecn(q) ||
+                           !INET_ECN_set_ce(skb)) {
+                               q->stats.forced_drop++;
+                               goto congestion_drop;
+                       }
+
+                       q->stats.forced_mark++;
+               } else if (++p->qcount) {
+                       if (red_mark_probability(p, p->qavg)) {
+                               p->qcount = 0;
+                               p->qR = red_random(p);
+
+                               sch->qstats.overlimits++;
+                               if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
+                                       q->stats.prob_drop++;
+                                       goto congestion_drop;
+                               }
+
+                               q->stats.prob_mark++;
+                       }
+               } else
+                       p->qR = red_random(p);
+       }
+
+       /* Admit new packet */
+       if (sch->q.qlen < q->limit) {
+               q->tab[q->tail] = skb;
+               q->tail = (q->tail + 1) & q->tab_mask;
+               ++sch->q.qlen;
+               sch->qstats.backlog += qdisc_pkt_len(skb);
+               return NET_XMIT_SUCCESS;
+       }
+
+       q->stats.pdrop++;
+       sch->qstats.drops++;
+       kfree_skb(skb);
+       return NET_XMIT_DROP;
+
+ congestion_drop:
+       qdisc_drop(skb, sch);
+       return NET_XMIT_CN;
+
+ other_drop:
+       if (ret & __NET_XMIT_BYPASS)
+               sch->qstats.drops++;
+       kfree_skb(skb);
+       return ret;
+}
+
+static struct sk_buff *choke_dequeue(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb;
+
+       if (q->head == q->tail) {
+               if (!red_is_idling(&q->parms))
+                       red_start_of_idle_period(&q->parms);
+               return NULL;
+       }
+
+       skb = q->tab[q->head];
+       q->tab[q->head] = NULL;
+       choke_zap_head_holes(q);
+       --sch->q.qlen;
+       sch->qstats.backlog -= qdisc_pkt_len(skb);
+       qdisc_bstats_update(sch, skb);
+
+       return skb;
+}
+
+static unsigned int choke_drop(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       unsigned int len;
+
+       len = qdisc_queue_drop(sch);
+       if (len > 0)
+               q->stats.other++;
+       else {
+               if (!red_is_idling(&q->parms))
+                       red_start_of_idle_period(&q->parms);
+       }
+
+       return len;
+}
+
+static void choke_reset(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       red_restart(&q->parms);
+}
+
+static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
+       [TCA_CHOKE_PARMS]       = { .len = sizeof(struct tc_red_qopt) },
+       [TCA_CHOKE_STAB]        = { .len = RED_STAB_SIZE },
+};
+
+
+static void choke_free(void *addr)
+{
+       if (addr) {
+               if (is_vmalloc_addr(addr))
+                       vfree(addr);
+               else
+                       kfree(addr);
+       }
+}
+
+static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_CHOKE_MAX + 1];
+       const struct tc_red_qopt *ctl;
+       int err;
+       struct sk_buff **old = NULL;
+       unsigned int mask;
+
+       if (opt == NULL)
+               return -EINVAL;
+
+       err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_CHOKE_PARMS] == NULL ||
+           tb[TCA_CHOKE_STAB] == NULL)
+               return -EINVAL;
+
+       ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+
+       if (ctl->limit > CHOKE_MAX_QUEUE)
+               return -EINVAL;
+
+       mask = roundup_pow_of_two(ctl->limit + 1) - 1;
+       if (mask != q->tab_mask) {
+               struct sk_buff **ntab;
+
+               ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+               if (!ntab)
+                       ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+               if (!ntab)
+                       return -ENOMEM;
+
+               sch_tree_lock(sch);
+               old = q->tab;
+               if (old) {
+                       unsigned int oqlen = sch->q.qlen, tail = 0;
+
+                       while (q->head != q->tail) {
+                               struct sk_buff *skb = q->tab[q->head];
+
+                               q->head = (q->head + 1) & q->tab_mask;
+                               if (!skb)
+                                       continue;
+                               if (tail < mask) {
+                                       ntab[tail++] = skb;
+                                       continue;
+                               }
+                               sch->qstats.backlog -= qdisc_pkt_len(skb);
+                               --sch->q.qlen;
+                               qdisc_drop(skb, sch);
+                       }
+                       qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+                       q->head = 0;
+                       q->tail = tail;
+               }
+
+               q->tab_mask = mask;
+               q->tab = ntab;
+       } else
+               sch_tree_lock(sch);
+
+       q->flags = ctl->flags;
+       q->limit = ctl->limit;
+
+       red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+                     ctl->Plog, ctl->Scell_log,
+                     nla_data(tb[TCA_CHOKE_STAB]));
+
+       if (q->head == q->tail)
+               red_end_of_idle_period(&q->parms);
+
+       sch_tree_unlock(sch);
+       choke_free(old);
+       return 0;
+}
+
+static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       return choke_change(sch, opt);
+}
+
+static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts = NULL;
+       struct tc_red_qopt opt = {
+               .limit          = q->limit,
+               .flags          = q->flags,
+               .qth_min        = q->parms.qth_min >> q->parms.Wlog,
+               .qth_max        = q->parms.qth_max >> q->parms.Wlog,
+               .Wlog           = q->parms.Wlog,
+               .Plog           = q->parms.Plog,
+               .Scell_log      = q->parms.Scell_log,
+       };
+
+       opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
+
+       NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       nla_nest_cancel(skb, opts);
+       return -EMSGSIZE;
+}
+
+static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+       struct tc_choke_xstats st = {
+               .early  = q->stats.prob_drop + q->stats.forced_drop,
+               .marked = q->stats.prob_mark + q->stats.forced_mark,
+               .pdrop  = q->stats.pdrop,
+               .other  = q->stats.other,
+               .matched = q->stats.matched,
+       };
+
+       return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void choke_destroy(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       tcf_destroy_chain(&q->filter_list);
+       choke_free(q->tab);
+}
+
+static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       return NULL;
+}
+
+static unsigned long choke_get(struct Qdisc *sch, u32 classid)
+{
+       return 0;
+}
+
+static void choke_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
+                               u32 classid)
+{
+       return 0;
+}
+
+static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       if (cl)
+               return NULL;
+       return &q->filter_list;
+}
+
+static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
+                         struct sk_buff *skb, struct tcmsg *tcm)
+{
+       tcm->tcm_handle |= TC_H_MIN(cl);
+       return 0;
+}
+
+static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       if (!arg->stop) {
+               if (arg->fn(sch, 1, arg) < 0) {
+                       arg->stop = 1;
+                       return;
+               }
+               arg->count++;
+       }
+}
+
+static const struct Qdisc_class_ops choke_class_ops = {
+       .leaf           =       choke_leaf,
+       .get            =       choke_get,
+       .put            =       choke_put,
+       .tcf_chain      =       choke_find_tcf,
+       .bind_tcf       =       choke_bind,
+       .unbind_tcf     =       choke_put,
+       .dump           =       choke_dump_class,
+       .walk           =       choke_walk,
+};
+
+static struct sk_buff *choke_peek_head(struct Qdisc *sch)
+{
+       struct choke_sched_data *q = qdisc_priv(sch);
+
+       return (q->head != q->tail) ? q->tab[q->head] : NULL;
+}
+
+static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
+       .id             =       "choke",
+       .priv_size      =       sizeof(struct choke_sched_data),
+
+       .enqueue        =       choke_enqueue,
+       .dequeue        =       choke_dequeue,
+       .peek           =       choke_peek_head,
+       .drop           =       choke_drop,
+       .init           =       choke_init,
+       .destroy        =       choke_destroy,
+       .reset          =       choke_reset,
+       .change         =       choke_change,
+       .dump           =       choke_dump,
+       .dump_stats     =       choke_dump_stats,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init choke_module_init(void)
+{
+       return register_qdisc(&choke_qdisc_ops);
+}
+
+static void __exit choke_module_exit(void)
+{
+       unregister_qdisc(&choke_qdisc_ops);
+}
+
+module_init(choke_module_init)
+module_exit(choke_module_exit)
+
+MODULE_LICENSE("GPL");
index 0f7bf3fdfea5a63683d86887e4d9fad99ff40a82..2c790204d042e3ac5ec4f2b322b8a0429628dbb5 100644 (file)
@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
                mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
 
        if (tb[TCA_DSMARK_VALUE])
-               p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+               p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
 
        if (tb[TCA_DSMARK_MASK])
-               p->mask[*arg-1] = mask;
+               p->mask[*arg - 1] = mask;
 
        err = 0;
 
@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
        if (!dsmark_valid_index(p, arg))
                return -EINVAL;
 
-       p->mask[arg-1] = 0xff;
-       p->value[arg-1] = 0;
+       p->mask[arg - 1] = 0xff;
+       p->value[arg - 1] = 0;
 
        return 0;
 }
@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
                if (p->mask[i] == 0xff && !p->value[i])
                        goto ignore;
                if (walker->count >= walker->skip) {
-                       if (walker->fn(sch, i+1, walker) < 0) {
+                       if (walker->fn(sch, i + 1, walker) < 0) {
                                walker->stop = 1;
                                break;
                        }
@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 * and don't need yet another qdisc as a bypass.
                 */
                if (p->mask[index] != 0xff || p->value[index])
-                       printk(KERN_WARNING
-                              "dsmark_dequeue: unsupported protocol %d\n",
-                              ntohs(skb->protocol));
+                       pr_warning("dsmark_dequeue: unsupported protocol %d\n",
+                                  ntohs(skb->protocol));
                break;
        }
 
@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
        if (!dsmark_valid_index(p, cl))
                return -EINVAL;
 
-       tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+       tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
        tcm->tcm_info = p->q->handle;
 
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
-       NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
+       NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
+       NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
 
        return nla_nest_end(skb, opts);
 
index d468b479aa937f410a665045eb4018bfc08de255..66effe2da8e0e65775c563a4fc4771429db45411 100644 (file)
 
 /* 1 band FIFO pseudo-"scheduler" */
 
-struct fifo_sched_data
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       u32 limit;
-};
-
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
-{
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
+       if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        return qdisc_reshape_fail(skb, sch);
 }
 
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (likely(skb_queue_len(&sch->q) < q->limit))
+       if (likely(skb_queue_len(&sch->q) < sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        return qdisc_reshape_fail(skb, sch);
 }
 
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
-
-       if (likely(skb_queue_len(&sch->q) < q->limit))
+       if (likely(skb_queue_len(&sch->q) < sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
        /* queue full, remove one skb to fulfill the limit */
@@ -61,31 +50,40 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 
 static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
+       bool bypass;
+       bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
        if (opt == NULL) {
                u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
 
-               if (sch->ops == &bfifo_qdisc_ops)
+               if (is_bfifo)
                        limit *= psched_mtu(qdisc_dev(sch));
 
-               q->limit = limit;
+               sch->limit = limit;
        } else {
                struct tc_fifo_qopt *ctl = nla_data(opt);
 
                if (nla_len(opt) < sizeof(*ctl))
                        return -EINVAL;
 
-               q->limit = ctl->limit;
+               sch->limit = ctl->limit;
        }
 
+       if (is_bfifo)
+               bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
+       else
+               bypass = sch->limit >= 1;
+
+       if (bypass)
+               sch->flags |= TCQ_F_CAN_BYPASS;
+       else
+               sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
-       struct fifo_sched_data *q = qdisc_priv(sch);
-       struct tc_fifo_qopt opt = { .limit = q->limit };
+       struct tc_fifo_qopt opt = { .limit = sch->limit };
 
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
        return skb->len;
@@ -96,7 +94,7 @@ nla_put_failure:
 
 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
        .id             =       "pfifo",
-       .priv_size      =       sizeof(struct fifo_sched_data),
+       .priv_size      =       0,
        .enqueue        =       pfifo_enqueue,
        .dequeue        =       qdisc_dequeue_head,
        .peek           =       qdisc_peek_head,
@@ -111,7 +109,7 @@ EXPORT_SYMBOL(pfifo_qdisc_ops);
 
 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
        .id             =       "bfifo",
-       .priv_size      =       sizeof(struct fifo_sched_data),
+       .priv_size      =       0,
        .enqueue        =       bfifo_enqueue,
        .dequeue        =       qdisc_dequeue_head,
        .peek           =       qdisc_peek_head,
@@ -126,7 +124,7 @@ EXPORT_SYMBOL(bfifo_qdisc_ops);
 
 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
        .id             =       "pfifo_head_drop",
-       .priv_size      =       sizeof(struct fifo_sched_data),
+       .priv_size      =       0,
        .enqueue        =       pfifo_tail_enqueue,
        .dequeue        =       qdisc_dequeue_head,
        .peek           =       qdisc_peek_head,
index 1bc698039ae2e647d670d53b83b976858cdf2a09..c84b65920d1b7962abe5aeb4ff222a75be21c82c 100644 (file)
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
                 */
                kfree_skb(skb);
                if (net_ratelimit())
-                       printk(KERN_WARNING "Dead loop on netdevice %s, "
-                              "fix it urgently!\n", dev_queue->dev->name);
+                       pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
+                                  dev_queue->dev->name);
                ret = qdisc_qlen(q);
        } else {
                /*
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        } else {
                /* Driver returned NETDEV_TX_BUSY - requeue skb */
                if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
-                       printk(KERN_WARNING "BUG %s code %d qlen %d\n",
-                              dev->name, ret, q->q.qlen);
+                       pr_warning("BUG %s code %d qlen %d\n",
+                                  dev->name, ret, q->q.qlen);
 
                ret = dev_requeue_skb(skb, q);
        }
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
 };
 
 
-static const u8 prio2band[TC_PRIO_MAX+1] =
-       { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+       1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
 
 /* 3-band FIFO queue: old style, but should be a bit faster than
    generic prio+fifo combination.
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
        return priv->q + band;
 }
 
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdiscqdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
 {
        if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
                int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
        return qdisc_drop(skb, qdisc);
 }
 
-static struct sk_buff *pfifo_fast_dequeue(struct Qdiscqdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 {
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
        int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
        return NULL;
 }
 
-static struct sk_buff *pfifo_fast_peek(struct Qdiscqdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 {
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
        int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
        return NULL;
 }
 
-static void pfifo_fast_reset(struct Qdiscqdisc)
+static void pfifo_fast_reset(struct Qdisc *qdisc)
 {
        int prio;
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 {
        struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 
-       memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+       memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
        return skb->len;
 
@@ -526,6 +527,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
                skb_queue_head_init(band2list(priv, prio));
 
+       /* Can by-pass the queue discipline */
+       qdisc->flags |= TCQ_F_CAN_BYPASS;
        return 0;
 }
 
@@ -540,27 +543,32 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
        .dump           =       pfifo_fast_dump,
        .owner          =       THIS_MODULE,
 };
+EXPORT_SYMBOL(pfifo_fast_ops);
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          struct Qdisc_ops *ops)
 {
        void *p;
        struct Qdisc *sch;
-       unsigned int size;
+       unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
        int err = -ENOBUFS;
 
-       /* ensure that the Qdisc and the private data are 64-byte aligned */
-       size = QDISC_ALIGN(sizeof(*sch));
-       size += ops->priv_size + (QDISC_ALIGNTO - 1);
-
        p = kzalloc_node(size, GFP_KERNEL,
                         netdev_queue_numa_node_read(dev_queue));
 
        if (!p)
                goto errout;
        sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
-       sch->padded = (char *) sch - (char *) p;
-
+       /* if we got non aligned memory, ask more and do alignment ourself */
+       if (sch != p) {
+               kfree(p);
+               p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
+                                netdev_queue_numa_node_read(dev_queue));
+               if (!p)
+                       goto errout;
+               sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
+               sch->padded = (char *) sch - (char *) p;
+       }
        INIT_LIST_HEAD(&sch->list);
        skb_queue_head_init(&sch->q);
        spin_lock_init(&sch->busylock);
@@ -630,7 +638,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
 #ifdef CONFIG_NET_SCHED
        qdisc_list_del(qdisc);
 
-       qdisc_put_stab(qdisc->stab);
+       qdisc_put_stab(rtnl_dereference(qdisc->stab));
 #endif
        gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
        if (ops->reset)
@@ -674,25 +682,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 
        return oqdisc;
 }
+EXPORT_SYMBOL(dev_graft_qdisc);
 
 static void attach_one_default_qdisc(struct net_device *dev,
                                     struct netdev_queue *dev_queue,
                                     void *_unused)
 {
-       struct Qdisc *qdisc;
+       struct Qdisc *qdisc = &noqueue_qdisc;
 
        if (dev->tx_queue_len) {
                qdisc = qdisc_create_dflt(dev_queue,
                                          &pfifo_fast_ops, TC_H_ROOT);
                if (!qdisc) {
-                       printk(KERN_INFO "%s: activation failed\n", dev->name);
+                       netdev_info(dev, "activation failed\n");
                        return;
                }
-
-               /* Can by-pass the queue discipline for default qdisc */
-               qdisc->flags |= TCQ_F_CAN_BYPASS;
-       } else {
-               qdisc =  &noqueue_qdisc;
        }
        dev_queue->qdisc_sleeping = qdisc;
 }
@@ -761,6 +765,7 @@ void dev_activate(struct net_device *dev)
                dev_watchdog_up(dev);
        }
 }
+EXPORT_SYMBOL(dev_activate);
 
 static void dev_deactivate_queue(struct net_device *dev,
                                 struct netdev_queue *dev_queue,
@@ -841,6 +846,7 @@ void dev_deactivate(struct net_device *dev)
        dev_deactivate_many(&single);
        list_del(&single);
 }
+EXPORT_SYMBOL(dev_deactivate);
 
 static void dev_init_scheduler_queue(struct net_device *dev,
                                     struct netdev_queue *dev_queue,
index 51dcc2aa5c926b735fa683135bddaf3c66114355..b9493a09a870343fe90444bea4b1fac547d42e46 100644 (file)
@@ -32,8 +32,7 @@
 struct gred_sched_data;
 struct gred_sched;
 
-struct gred_sched_data
-{
+struct gred_sched_data {
        u32             limit;          /* HARD maximal queue length    */
        u32             DP;             /* the drop pramaters */
        u32             bytesin;        /* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@ enum {
        GRED_RIO_MODE,
 };
 
-struct gred_sched
-{
+struct gred_sched {
        struct gred_sched_data *tab[MAX_DPs];
        unsigned long   flags;
        u32             red_flags;
@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
        return t->red_flags & TC_RED_HARDDROP;
 }
 
-static int gred_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-       struct gred_sched_data *q=NULL;
-       struct gred_sched *t= qdisc_priv(sch);
+       struct gred_sched_data *q = NULL;
+       struct gred_sched *t = qdisc_priv(sch);
        unsigned long qavg = 0;
        u16 dp = tc_index_to_dp(skb);
 
-       if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
+       if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
                dp = t->def;
 
-               if ((q = t->tab[dp]) == NULL) {
+               q = t->tab[dp];
+               if (!q) {
                        /* Pass through packets not assigned to a DP
                         * if no default DP has been configured. This
                         * allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                for (i = 0; i < t->DPs; i++) {
                        if (t->tab[i] && t->tab[i]->prio < q->prio &&
                            !red_is_idling(&t->tab[i]->parms))
-                               qavg +=t->tab[i]->parms.qavg;
+                               qavg += t->tab[i]->parms.qavg;
                }
 
        }
@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                gred_store_wred_set(t, q);
 
        switch (red_action(&q->parms, q->parms.qavg + qavg)) {
-               case RED_DONT_MARK:
-                       break;
-
-               case RED_PROB_MARK:
-                       sch->qstats.overlimits++;
-                       if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
-                               q->stats.prob_drop++;
-                               goto congestion_drop;
-                       }
-
-                       q->stats.prob_mark++;
-                       break;
-
-               case RED_HARD_MARK:
-                       sch->qstats.overlimits++;
-                       if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
-                           !INET_ECN_set_ce(skb)) {
-                               q->stats.forced_drop++;
-                               goto congestion_drop;
-                       }
-                       q->stats.forced_mark++;
-                       break;
+       case RED_DONT_MARK:
+               break;
+
+       case RED_PROB_MARK:
+               sch->qstats.overlimits++;
+               if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+                       q->stats.prob_drop++;
+                       goto congestion_drop;
+               }
+
+               q->stats.prob_mark++;
+               break;
+
+       case RED_HARD_MARK:
+               sch->qstats.overlimits++;
+               if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+                   !INET_ECN_set_ce(skb)) {
+                       q->stats.forced_drop++;
+                       goto congestion_drop;
+               }
+               q->stats.forced_mark++;
+               break;
        }
 
        if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@ congestion_drop:
        return NET_XMIT_CN;
 }
 
-static struct sk_buff *gred_dequeue(struct Qdiscsch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
 {
        struct sk_buff *skb;
        struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
 
                if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
                        if (net_ratelimit())
-                               printk(KERN_WARNING "GRED: Unable to relocate "
-                                      "VQ 0x%x after dequeue, screwing up "
-                                      "backlog.\n", tc_index_to_dp(skb));
+                               pr_warning("GRED: Unable to relocate VQ 0x%x "
+                                          "after dequeue, screwing up "
+                                          "backlog.\n", tc_index_to_dp(skb));
                } else {
                        q->backlog -= qdisc_pkt_len(skb);
 
@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
        return NULL;
 }
 
-static unsigned int gred_drop(struct Qdiscsch)
+static unsigned int gred_drop(struct Qdisc *sch)
 {
        struct sk_buff *skb;
        struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
 
                if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
                        if (net_ratelimit())
-                               printk(KERN_WARNING "GRED: Unable to relocate "
-                                      "VQ 0x%x while dropping, screwing up "
-                                      "backlog.\n", tc_index_to_dp(skb));
+                               pr_warning("GRED: Unable to relocate VQ 0x%x "
+                                          "while dropping, screwing up "
+                                          "backlog.\n", tc_index_to_dp(skb));
                } else {
                        q->backlog -= len;
                        q->stats.other++;
@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
 
 }
 
-static void gred_reset(struct Qdiscsch)
+static void gred_reset(struct Qdisc *sch)
 {
        int i;
        struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 
        for (i = table->DPs; i < MAX_DPs; i++) {
                if (table->tab[i]) {
-                       printk(KERN_WARNING "GRED: Warning: Destroying "
-                              "shadowed VQ 0x%x\n", i);
+                       pr_warning("GRED: Warning: Destroying "
+                                  "shadowed VQ 0x%x\n", i);
                        gred_destroy_vq(table->tab[i]);
                        table->tab[i] = NULL;
                }
index 14a799de1c3535ed99c9af448080265fa406fe88..6488e64256521f69512ee3be26589dd265e3f9bb 100644 (file)
@@ -81,8 +81,7 @@
  *   that are expensive on 32-bit architectures.
  */
 
-struct internal_sc
-{
+struct internal_sc {
        u64     sm1;    /* scaled slope of the 1st segment */
        u64     ism1;   /* scaled inverse-slope of the 1st segment */
        u64     dx;     /* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@ struct internal_sc
 };
 
 /* runtime service curve */
-struct runtime_sc
-{
+struct runtime_sc {
        u64     x;      /* current starting position on x-axis */
        u64     y;      /* current starting position on y-axis */
        u64     sm1;    /* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@ struct runtime_sc
        u64     ism2;   /* scaled inverse-slope of the 2nd segment */
 };
 
-enum hfsc_class_flags
-{
+enum hfsc_class_flags {
        HFSC_RSC = 0x1,
        HFSC_FSC = 0x2,
        HFSC_USC = 0x4
 };
 
-struct hfsc_class
-{
+struct hfsc_class {
        struct Qdisc_class_common cl_common;
        unsigned int    refcnt;         /* usage count */
 
@@ -140,8 +136,8 @@ struct hfsc_class
        u64     cl_cumul;               /* cumulative work in bytes done by
                                           real-time criteria */
 
-       u64     cl_d;                   /* deadline*/
-       u64     cl_e;                   /* eligible time */
+       u64     cl_d;                   /* deadline*/
+       u64     cl_e;                   /* eligible time */
        u64     cl_vt;                  /* virtual time */
        u64     cl_f;                   /* time when this class will fit for
                                           link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@ struct hfsc_class
        unsigned long   cl_nactive;     /* number of active children */
 };
 
-struct hfsc_sched
-{
+struct hfsc_sched {
        u16     defcls;                         /* default class id */
        struct hfsc_class root;                 /* root class */
        struct Qdisc_class_hash clhash;         /* class hash */
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
                if (go_active) {
                        n = rb_last(&cl->cl_parent->vt_tree);
                        if (n != NULL) {
-                               max_cl = rb_entry(n, struct hfsc_class,vt_node);
+                               max_cl = rb_entry(n, struct hfsc_class, vt_node);
                                /*
                                 * set vt to the average of the min and max
                                 * classes.  if the parent's period didn't
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                        return NULL;
                }
 #endif
-               if ((cl = (struct hfsc_class *)res.class) == NULL) {
-                       if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
+               cl = (struct hfsc_class *)res.class;
+               if (!cl) {
+                       cl = hfsc_find_class(res.classid, sch);
+                       if (!cl)
                                break; /* filter selected invalid classid */
                        if (cl->level >= head->level)
                                break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
        return -1;
 }
 
-static inline int
+static int
 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
 {
        if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
        struct hfsc_class *cl;
        u64 next_time = 0;
 
-       if ((cl = eltree_get_minel(q)) != NULL)
+       cl = eltree_get_minel(q);
+       if (cl)
                next_time = cl->cl_e;
        if (q->root.cl_cfmin != 0) {
                if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1625,7 +1623,8 @@ hfsc_dequeue(struct Qdisc *sch)
         * find the class with the minimum deadline among
         * the eligible classes.
         */
-       if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
+       cl = eltree_get_mindl(q, cur_time);
+       if (cl) {
                realtime = 1;
        } else {
                /*
@@ -1664,7 +1663,7 @@ hfsc_dequeue(struct Qdisc *sch)
                set_passive(cl);
        }
 
-       sch->flags &= ~TCQ_F_THROTTLED;
+       qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);
        sch->q.qlen--;
 
index fc12fe6f559796d11aaa8b4e769b26a6a28643e9..e1429a85091fc271c85948fff6e0914739ef93ee 100644 (file)
@@ -99,9 +99,10 @@ struct htb_class {
                        struct rb_root feed[TC_HTB_NUMPRIO];    /* feed trees */
                        struct rb_node *ptr[TC_HTB_NUMPRIO];    /* current class ptr */
                        /* When class changes from state 1->2 and disconnects from
-                          parent's feed then we lost ptr value and start from the
-                          first child again. Here we store classid of the
-                          last valid ptr (used when ptr is NULL). */
+                        * parent's feed then we lost ptr value and start from the
+                        * first child again. Here we store classid of the
+                        * last valid ptr (used when ptr is NULL).
+                        */
                        u32 last_ptr_id[TC_HTB_NUMPRIO];
                } inner;
        } un;
@@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
  * then finish and return direct queue.
  */
-#define HTB_DIRECT (struct htb_class*)-1
+#define HTB_DIRECT ((struct htb_class *)-1L)
 
 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                                      int *qerr)
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
        int result;
 
        /* allow to select class by setting skb->priority to valid classid;
-          note that nfmark can be used too by attaching filter fw with no
-          rules in it */
+        * note that nfmark can be used too by attaching filter fw with no
+        * rules in it
+        */
        if (skb->priority == sch->handle)
                return HTB_DIRECT;      /* X:0 (direct flow) selected */
-       if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
+       cl = htb_find(skb->priority, sch);
+       if (cl && cl->level == 0)
                return cl;
 
        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                        return NULL;
                }
 #endif
-               if ((cl = (void *)res.class) == NULL) {
+               cl = (void *)res.class;
+               if (!cl) {
                        if (res.classid == sch->handle)
                                return HTB_DIRECT;      /* X:0 (direct flow) */
-                       if ((cl = htb_find(res.classid, sch)) == NULL)
+                       cl = htb_find(res.classid, sch);
+                       if (!cl)
                                break;  /* filter selected invalid classid */
                }
                if (!cl->level)
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 
                        if (p->un.inner.feed[prio].rb_node)
                                /* parent already has its feed in use so that
-                                  reset bit in mask as parent is already ok */
+                                * reset bit in mask as parent is already ok
+                                */
                                mask &= ~(1 << prio);
 
                        htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 
                        if (p->un.inner.ptr[prio] == cl->node + prio) {
                                /* we are removing child which is pointed to from
-                                  parent feed - forget the pointer but remember
-                                  classid */
+                                * parent feed - forget the pointer but remember
+                                * classid
+                                */
                                p->un.inner.last_ptr_id[prio] = cl->common.classid;
                                p->un.inner.ptr[prio] = NULL;
                        }
@@ -663,8 +670,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
                                   unsigned long start)
 {
        /* don't run for longer than 2 jiffies; 2 is used instead of
-          1 to simplify things when jiffy is going to be incremented
-          too soon */
+        * 1 to simplify things when jiffy is going to be incremented
+        * too soon
+        */
        unsigned long stop_at = start + 2;
        while (time_before(jiffies, stop_at)) {
                struct htb_class *cl;
@@ -687,7 +695,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
 
        /* too much load - let's continue after a break for scheduling */
        if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
-               printk(KERN_WARNING "htb: too many events!\n");
+               pr_warning("htb: too many events!\n");
                q->warned |= HTB_WARN_TOOMANYEVENTS;
        }
 
@@ -695,7 +703,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
 }
 
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
-   is no such one exists. */
+ * is no such one exists.
+ */
 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
                                              u32 id)
 {
@@ -739,12 +748,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
        for (i = 0; i < 65535; i++) {
                if (!*sp->pptr && *sp->pid) {
                        /* ptr was invalidated but id is valid - try to recover
-                          the original or next ptr */
+                        * the original or next ptr
+                        */
                        *sp->pptr =
                            htb_id_find_next_upper(prio, sp->root, *sp->pid);
                }
                *sp->pid = 0;   /* ptr is valid now so that remove this hint as it
-                                  can become out of date quickly */
+                                * can become out of date quickly
+                                */
                if (!*sp->pptr) {       /* we are at right end; rewind & go up */
                        *sp->pptr = sp->root;
                        while ((*sp->pptr)->rb_left)
@@ -772,7 +783,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
 }
 
 /* dequeues packet at given priority and level; call only if
-   you are sure that there is active class at prio/level */
+ * you are sure that there is active class at prio/level
+ */
 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
                                        int level)
 {
@@ -789,9 +801,10 @@ next:
                        return NULL;
 
                /* class can be empty - it is unlikely but can be true if leaf
-                  qdisc drops packets in enqueue routine or if someone used
-                  graft operation on the leaf since last dequeue;
-                  simply deactivate and skip such class */
+                * qdisc drops packets in enqueue routine or if someone used
+                * graft operation on the leaf since last dequeue;
+                * simply deactivate and skip such class
+                */
                if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
                        struct htb_class *next;
                        htb_deactivate(q, cl);
@@ -831,7 +844,8 @@ next:
                                          ptr[0]) + prio);
                }
                /* this used to be after charge_class but this constelation
-                  gives us slightly better performance */
+                * gives us slightly better performance
+                */
                if (!cl->un.leaf.q->q.qlen)
                        htb_deactivate(q, cl);
                htb_charge_class(q, cl, level, skb);
@@ -852,7 +866,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
        if (skb != NULL) {
 ok:
                qdisc_bstats_update(sch, skb);
-               sch->flags &= ~TCQ_F_THROTTLED;
+               qdisc_unthrottled(sch);
                sch->q.qlen--;
                return skb;
        }
@@ -883,6 +897,7 @@ ok:
                m = ~q->row_mask[level];
                while (m != (int)(-1)) {
                        int prio = ffz(m);
+
                        m |= 1 << prio;
                        skb = htb_dequeue_tree(q, prio, level);
                        if (likely(skb != NULL))
@@ -987,13 +1002,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
                return err;
 
        if (tb[TCA_HTB_INIT] == NULL) {
-               printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
+               pr_err("HTB: hey probably you have bad tc tool ?\n");
                return -EINVAL;
        }
        gopt = nla_data(tb[TCA_HTB_INIT]);
        if (gopt->version != HTB_VER >> 16) {
-               printk(KERN_ERR
-                      "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+               pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
                       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
                return -EINVAL;
        }
@@ -1206,9 +1220,10 @@ static void htb_destroy(struct Qdisc *sch)
        cancel_work_sync(&q->work);
        qdisc_watchdog_cancel(&q->watchdog);
        /* This line used to be after htb_destroy_class call below
-          and surprisingly it worked in 2.4. But it must precede it
-          because filter need its target class alive to be able to call
-          unbind_filter on it (without Oops). */
+        * and surprisingly it worked in 2.4. But it must precede it
+        * because filter need its target class alive to be able to call
+        * unbind_filter on it (without Oops).
+        */
        tcf_destroy_chain(&q->filter_list);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1342,11 +1357,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
                /* check maximal depth */
                if (parent && parent->parent && parent->parent->level < 2) {
-                       printk(KERN_ERR "htb: tree is too deep\n");
+                       pr_err("htb: tree is too deep\n");
                        goto failure;
                }
                err = -ENOBUFS;
-               if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+               cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+               if (!cl)
                        goto failure;
 
                err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1366,8 +1382,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        RB_CLEAR_NODE(&cl->node[prio]);
 
                /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
-                  so that can't be used inside of sch_tree_lock
-                  -- thanks to Karlis Peisenieks */
+                * so that can't be used inside of sch_tree_lock
+                * -- thanks to Karlis Peisenieks
+                */
                new_q = qdisc_create_dflt(sch->dev_queue,
                                          &pfifo_qdisc_ops, classid);
                sch_tree_lock(sch);
@@ -1419,17 +1436,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        }
 
        /* it used to be a nasty bug here, we have to check that node
-          is really leaf before changing cl->un.leaf ! */
+        * is really leaf before changing cl->un.leaf !
+        */
        if (!cl->level) {
                cl->quantum = rtab->rate.rate / q->rate2quantum;
                if (!hopt->quantum && cl->quantum < 1000) {
-                       printk(KERN_WARNING
+                       pr_warning(
                               "HTB: quantum of class %X is small. Consider r2q change.\n",
                               cl->common.classid);
                        cl->quantum = 1000;
                }
                if (!hopt->quantum && cl->quantum > 200000) {
-                       printk(KERN_WARNING
+                       pr_warning(
                               "HTB: quantum of class %X is big. Consider r2q change.\n",
                               cl->common.classid);
                        cl->quantum = 200000;
@@ -1478,13 +1496,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
        struct htb_class *cl = htb_find(classid, sch);
 
        /*if (cl && !cl->level) return 0;
-          The line above used to be there to prevent attaching filters to
-          leaves. But at least tc_index filter uses this just to get class
-          for other reasons so that we have to allow for it.
-          ----
-          19.6.2002 As Werner explained it is ok - bind filter is just
-          another way to "lock" the class - unlike "get" this lock can
-          be broken by class during destroy IIUC.
+        * The line above used to be there to prevent attaching filters to
+        * leaves. But at least tc_index filter uses this just to get class
+        * for other reasons so that we have to allow for it.
+        * ----
+        * 19.6.2002 As Werner explained it is ok - bind filter is just
+        * another way to "lock" the class - unlike "get" this lock can
+        * be broken by class during destroy IIUC.
         */
        if (cl)
                cl->filter_cnt++;
index ecc302f4d2a1ce35dde04a15fd810617a76807ee..ec5cbc8489636738b7c4bc02c9f9e0096d0d7e92 100644 (file)
@@ -61,7 +61,6 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                                                    TC_H_MIN(ntx + 1)));
                if (qdisc == NULL)
                        goto err;
-               qdisc->flags |= TCQ_F_CAN_BYPASS;
                priv->qdiscs[ntx] = qdisc;
        }
 
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
new file mode 100644 (file)
index 0000000..ea17cbe
--- /dev/null
@@ -0,0 +1,418 @@
+/*
+ * net/sched/sch_mqprio.c
+ *
+ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+
+struct mqprio_sched {
+       struct Qdisc            **qdiscs;
+       int hw_owned;
+};
+
+static void mqprio_destroy(struct Qdisc *sch)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       unsigned int ntx;
+
+       if (priv->qdiscs) {
+               for (ntx = 0;
+                    ntx < dev->num_tx_queues && priv->qdiscs[ntx];
+                    ntx++)
+                       qdisc_destroy(priv->qdiscs[ntx]);
+               kfree(priv->qdiscs);
+       }
+
+       if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+               dev->netdev_ops->ndo_setup_tc(dev, 0);
+       else
+               netdev_set_num_tc(dev, 0);
+}
+
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+       int i, j;
+
+       /* Verify num_tc is not out of max range */
+       if (qopt->num_tc > TC_MAX_QUEUE)
+               return -EINVAL;
+
+       /* Verify priority mapping uses valid tcs */
+       for (i = 0; i < TC_BITMASK + 1; i++) {
+               if (qopt->prio_tc_map[i] >= qopt->num_tc)
+                       return -EINVAL;
+       }
+
+       /* net_device does not support requested operation */
+       if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+               return -EINVAL;
+
+       /* if hw owned qcount and qoffset are taken from LLD so
+        * no reason to verify them here
+        */
+       if (qopt->hw)
+               return 0;
+
+       for (i = 0; i < qopt->num_tc; i++) {
+               unsigned int last = qopt->offset[i] + qopt->count[i];
+
+               /* Verify the queue count is in tx range being equal to the
+                * real_num_tx_queues indicates the last queue is in use.
+                */
+               if (qopt->offset[i] >= dev->real_num_tx_queues ||
+                   !qopt->count[i] ||
+                   last > dev->real_num_tx_queues)
+                       return -EINVAL;
+
+               /* Verify that the offset and counts do not overlap */
+               for (j = i + 1; j < qopt->num_tc; j++) {
+                       if (last > qopt->offset[j])
+                               return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       struct netdev_queue *dev_queue;
+       struct Qdisc *qdisc;
+       int i, err = -EOPNOTSUPP;
+       struct tc_mqprio_qopt *qopt = NULL;
+
+       BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+       BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
+
+       if (sch->parent != TC_H_ROOT)
+               return -EOPNOTSUPP;
+
+       if (!netif_is_multiqueue(dev))
+               return -EOPNOTSUPP;
+
+       if (nla_len(opt) < sizeof(*qopt))
+               return -EINVAL;
+
+       qopt = nla_data(opt);
+       if (mqprio_parse_opt(dev, qopt))
+               return -EINVAL;
+
+       /* pre-allocate qdisc, attachment can't fail */
+       priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+                              GFP_KERNEL);
+       if (priv->qdiscs == NULL) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               dev_queue = netdev_get_tx_queue(dev, i);
+               qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+                                         TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                                   TC_H_MIN(i + 1)));
+               if (qdisc == NULL) {
+                       err = -ENOMEM;
+                       goto err;
+               }
+               priv->qdiscs[i] = qdisc;
+       }
+
+       /* If the mqprio options indicate that hardware should own
+        * the queue mapping then run ndo_setup_tc otherwise use the
+        * supplied and verified mapping
+        */
+       if (qopt->hw) {
+               priv->hw_owned = 1;
+               err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+               if (err)
+                       goto err;
+       } else {
+               netdev_set_num_tc(dev, qopt->num_tc);
+               for (i = 0; i < qopt->num_tc; i++)
+                       netdev_set_tc_queue(dev, i,
+                                           qopt->count[i], qopt->offset[i]);
+       }
+
+       /* Always use supplied priority mappings */
+       for (i = 0; i < TC_BITMASK + 1; i++)
+               netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+
+       sch->flags |= TCQ_F_MQROOT;
+       return 0;
+
+err:
+       mqprio_destroy(sch);
+       return err;
+}
+
+static void mqprio_attach(struct Qdisc *sch)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       struct Qdisc *qdisc;
+       unsigned int ntx;
+
+       /* Attach underlying qdisc */
+       for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+               qdisc = priv->qdiscs[ntx];
+               qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+               if (qdisc)
+                       qdisc_destroy(qdisc);
+       }
+       kfree(priv->qdiscs);
+       priv->qdiscs = NULL;
+}
+
+static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
+                                            unsigned long cl)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
+
+       if (ntx >= dev->num_tx_queues)
+               return NULL;
+       return netdev_get_tx_queue(dev, ntx);
+}
+
+static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+                   struct Qdisc **old)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+       if (!dev_queue)
+               return -EINVAL;
+
+       if (dev->flags & IFF_UP)
+               dev_deactivate(dev);
+
+       *old = dev_graft_qdisc(dev_queue, new);
+
+       if (dev->flags & IFF_UP)
+               dev_activate(dev);
+
+       return 0;
+}
+
+static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       struct mqprio_sched *priv = qdisc_priv(sch);
+       unsigned char *b = skb_tail_pointer(skb);
+       struct tc_mqprio_qopt opt = { 0 };
+       struct Qdisc *qdisc;
+       unsigned int i;
+
+       sch->q.qlen = 0;
+       memset(&sch->bstats, 0, sizeof(sch->bstats));
+       memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+               spin_lock_bh(qdisc_lock(qdisc));
+               sch->q.qlen             += qdisc->q.qlen;
+               sch->bstats.bytes       += qdisc->bstats.bytes;
+               sch->bstats.packets     += qdisc->bstats.packets;
+               sch->qstats.qlen        += qdisc->qstats.qlen;
+               sch->qstats.backlog     += qdisc->qstats.backlog;
+               sch->qstats.drops       += qdisc->qstats.drops;
+               sch->qstats.requeues    += qdisc->qstats.requeues;
+               sch->qstats.overlimits  += qdisc->qstats.overlimits;
+               spin_unlock_bh(qdisc_lock(qdisc));
+       }
+
+       opt.num_tc = netdev_get_num_tc(dev);
+       memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+       opt.hw = priv->hw_owned;
+
+       for (i = 0; i < netdev_get_num_tc(dev); i++) {
+               opt.count[i] = dev->tc_to_txq[i].count;
+               opt.offset[i] = dev->tc_to_txq[i].offset;
+       }
+
+       NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+       return skb->len;
+nla_put_failure:
+       nlmsg_trim(skb, b);
+       return -1;
+}
+
+static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+       struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+       if (!dev_queue)
+               return NULL;
+
+       return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned int ntx = TC_H_MIN(classid);
+
+       if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
+               return 0;
+       return ntx;
+}
+
+static void mqprio_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+                        struct sk_buff *skb, struct tcmsg *tcm)
+{
+       struct net_device *dev = qdisc_dev(sch);
+
+       if (cl <= netdev_get_num_tc(dev)) {
+               tcm->tcm_parent = TC_H_ROOT;
+               tcm->tcm_info = 0;
+       } else {
+               int i;
+               struct netdev_queue *dev_queue;
+
+               dev_queue = mqprio_queue_get(sch, cl);
+               tcm->tcm_parent = 0;
+               for (i = 0; i < netdev_get_num_tc(dev); i++) {
+                       struct netdev_tc_txq tc = dev->tc_to_txq[i];
+                       int q_idx = cl - netdev_get_num_tc(dev);
+
+                       if (q_idx > tc.offset &&
+                           q_idx <= tc.offset + tc.count) {
+                               tcm->tcm_parent =
+                                       TC_H_MAKE(TC_H_MAJ(sch->handle),
+                                                 TC_H_MIN(i + 1));
+                               break;
+                       }
+               }
+               tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+       }
+       tcm->tcm_handle |= TC_H_MIN(cl);
+       return 0;
+}
+
+static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+                                  struct gnet_dump *d)
+       __releases(d->lock)
+       __acquires(d->lock)
+{
+       struct net_device *dev = qdisc_dev(sch);
+
+       if (cl <= netdev_get_num_tc(dev)) {
+               int i;
+               struct Qdisc *qdisc;
+               struct gnet_stats_queue qstats = {0};
+               struct gnet_stats_basic_packed bstats = {0};
+               struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
+
+               /* Drop lock here it will be reclaimed before touching
+                * statistics this is required because the d->lock we
+                * hold here is the look on dev_queue->qdisc_sleeping
+                * also acquired below.
+                */
+               spin_unlock_bh(d->lock);
+
+               for (i = tc.offset; i < tc.offset + tc.count; i++) {
+                       qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+                       spin_lock_bh(qdisc_lock(qdisc));
+                       bstats.bytes      += qdisc->bstats.bytes;
+                       bstats.packets    += qdisc->bstats.packets;
+                       qstats.qlen       += qdisc->qstats.qlen;
+                       qstats.backlog    += qdisc->qstats.backlog;
+                       qstats.drops      += qdisc->qstats.drops;
+                       qstats.requeues   += qdisc->qstats.requeues;
+                       qstats.overlimits += qdisc->qstats.overlimits;
+                       spin_unlock_bh(qdisc_lock(qdisc));
+               }
+               /* Reclaim root sleeping lock before completing stats */
+               spin_lock_bh(d->lock);
+               if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+                   gnet_stats_copy_queue(d, &qstats) < 0)
+                       return -1;
+       } else {
+               struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+               sch = dev_queue->qdisc_sleeping;
+               sch->qstats.qlen = sch->q.qlen;
+               if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+                   gnet_stats_copy_queue(d, &sch->qstats) < 0)
+                       return -1;
+       }
+       return 0;
+}
+
+static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned long ntx;
+
+       if (arg->stop)
+               return;
+
+       /* Walk hierarchy with a virtual class per tc */
+       arg->count = arg->skip;
+       for (ntx = arg->skip;
+            ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
+            ntx++) {
+               if (arg->fn(sch, ntx + 1, arg) < 0) {
+                       arg->stop = 1;
+                       break;
+               }
+               arg->count++;
+       }
+}
+
+static const struct Qdisc_class_ops mqprio_class_ops = {
+       .graft          = mqprio_graft,
+       .leaf           = mqprio_leaf,
+       .get            = mqprio_get,
+       .put            = mqprio_put,
+       .walk           = mqprio_walk,
+       .dump           = mqprio_dump_class,
+       .dump_stats     = mqprio_dump_class_stats,
+};
+
+static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
+       .cl_ops         = &mqprio_class_ops,
+       .id             = "mqprio",
+       .priv_size      = sizeof(struct mqprio_sched),
+       .init           = mqprio_init,
+       .destroy        = mqprio_destroy,
+       .attach         = mqprio_attach,
+       .dump           = mqprio_dump,
+       .owner          = THIS_MODULE,
+};
+
+static int __init mqprio_module_init(void)
+{
+       return register_qdisc(&mqprio_qdisc_ops);
+}
+
+static void __exit mqprio_module_exit(void)
+{
+       unregister_qdisc(&mqprio_qdisc_ops);
+}
+
+module_init(mqprio_module_init);
+module_exit(mqprio_module_exit);
+
+MODULE_LICENSE("GPL");
index 436a2e75b322db524764a2483cd385e905aa4534..edc1950e0e7722d77e8b67a2e298bb4d1038e479 100644 (file)
@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
        unsigned int len;
        struct Qdisc *qdisc;
 
-       for (band = q->bands-1; band >= 0; band--) {
+       for (band = q->bands - 1; band >= 0; band--) {
                qdisc = q->queues[band];
                if (qdisc->ops->drop) {
                        len = qdisc->ops->drop(qdisc);
@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
        for (i = 0; i < q->max_bands; i++)
                q->queues[i] = &noop_qdisc;
 
-       err = multiq_tune(sch,opt);
+       err = multiq_tune(sch, opt);
 
        if (err)
                kfree(q->queues);
@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
        struct multiq_sched_data *q = qdisc_priv(sch);
 
        tcm->tcm_handle |= TC_H_MIN(cl);
-       tcm->tcm_info = q->queues[cl-1]->handle;
+       tcm->tcm_info = q->queues[cl - 1]->handle;
        return 0;
 }
 
@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
                        arg->count++;
                        continue;
                }
-               if (arg->fn(sch, band+1, arg) < 0) {
+               if (arg->fn(sch, band + 1, arg) < 0) {
                        arg->stop = 1;
                        break;
                }
index 6a3006b38dc55e65c4629a967a326eed684d7fd1..edbbf7ad662387506fd3ba070dd628705566e804 100644 (file)
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-#define VERSION "1.2"
+#define VERSION "1.3"
 
 /*     Network Emulation Queuing algorithm.
        ====================================
         layering other disciplines.  It does not need to do bandwidth
         control either since that can be handled by using token
         bucket or other rate control.
+
+     Correlated Loss Generator models
+
+       Added generation of correlated loss according to the
+       "Gilbert-Elliot" model, a 4-state markov model.
+
+       References:
+       [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
+       [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
+       and intuitive loss model for packet networks and its implementation
+       in the Netem module in the Linux kernel", available in [1]
+
+       Authors: Stefano Salsano <stefano.salsano at uniroma2.it
+                Fabio Ludovici <fabio.ludovici at yahoo.it>
 */
 
 struct netem_sched_data {
@@ -73,6 +88,26 @@ struct netem_sched_data {
                u32  size;
                s16 table[0];
        } *delay_dist;
+
+       enum  {
+               CLG_RANDOM,
+               CLG_4_STATES,
+               CLG_GILB_ELL,
+       } loss_model;
+
+       /* Correlated Loss Generation models */
+       struct clgstate {
+               /* state of the Markov chain */
+               u8 state;
+
+               /* 4-states and Gilbert-Elliot models */
+               u32 a1; /* p13 for 4-states or p for GE */
+               u32 a2; /* p31 for 4-states or r for GE */
+               u32 a3; /* p32 for 4-states or h for GE */
+               u32 a4; /* p14 for 4-states or 1-k for GE */
+               u32 a5; /* p23 used only in 4-states */
+       } clg;
+
 };
 
 /* Time stamp put into socket buffer control block */
@@ -115,6 +150,122 @@ static u32 get_crandom(struct crndstate *state)
        return answer;
 }
 
+/* loss_4state - 4-state model loss generator
+ * Generates losses according to the 4-state Markov chain adopted in
+ * the GI (General and Intuitive) loss model.
+ */
+static bool loss_4state(struct netem_sched_data *q)
+{
+       struct clgstate *clg = &q->clg;
+       u32 rnd = net_random();
+
+       /*
+        * Makes a comparision between rnd and the transition
+        * probabilities outgoing from the current state, then decides the
+        * next state and if the next packet has to be transmitted or lost.
+        * The four states correspond to:
+        *   1 => successfully transmitted packets within a gap period
+        *   4 => isolated losses within a gap period
+        *   3 => lost packets within a burst period
+        *   2 => successfully transmitted packets within a burst period
+        */
+       switch (clg->state) {
+       case 1:
+               if (rnd < clg->a4) {
+                       clg->state = 4;
+                       return true;
+               } else if (clg->a4 < rnd && rnd < clg->a1) {
+                       clg->state = 3;
+                       return true;
+               } else if (clg->a1 < rnd)
+                       clg->state = 1;
+
+               break;
+       case 2:
+               if (rnd < clg->a5) {
+                       clg->state = 3;
+                       return true;
+               } else
+                       clg->state = 2;
+
+               break;
+       case 3:
+               if (rnd < clg->a3)
+                       clg->state = 2;
+               else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
+                       clg->state = 1;
+                       return true;
+               } else if (clg->a2 + clg->a3 < rnd) {
+                       clg->state = 3;
+                       return true;
+               }
+               break;
+       case 4:
+               clg->state = 1;
+               break;
+       }
+
+       return false;
+}
+
+/* loss_gilb_ell - Gilbert-Elliot model loss generator
+ * Generates losses according to the Gilbert-Elliot loss model or
+ * its special cases  (Gilbert or Simple Gilbert)
+ *
+ * Makes a comparision between random number and the transition
+ * probabilities outgoing from the current state, then decides the
+ * next state. A second random number is extracted and the comparision
+ * with the loss probability of the current state decides if the next
+ * packet will be transmitted or lost.
+ */
+static bool loss_gilb_ell(struct netem_sched_data *q)
+{
+       struct clgstate *clg = &q->clg;
+
+       switch (clg->state) {
+       case 1:
+               if (net_random() < clg->a1)
+                       clg->state = 2;
+               if (net_random() < clg->a4)
+                       return true;
+       case 2:
+               if (net_random() < clg->a2)
+                       clg->state = 1;
+               if (clg->a3 > net_random())
+                       return true;
+       }
+
+       return false;
+}
+
+static bool loss_event(struct netem_sched_data *q)
+{
+       switch (q->loss_model) {
+       case CLG_RANDOM:
+               /* Random packet drop 0 => none, ~0 => all */
+               return q->loss && q->loss >= get_crandom(&q->loss_cor);
+
+       case CLG_4_STATES:
+               /* 4state loss model algorithm (used also for GI model)
+               * Extracts a value from the markov 4 state loss generator,
+               * if it is 1 drops a packet and if needed writes the event in
+               * the kernel logs
+               */
+               return loss_4state(q);
+
+       case CLG_GILB_ELL:
+               /* Gilbert-Elliot loss model algorithm
+               * Extracts a value from the Gilbert-Elliot loss generator,
+               * if it is 1 drops a packet and if needed writes the event in
+               * the kernel logs
+               */
+               return loss_gilb_ell(q);
+       }
+
+       return false;   /* not reached */
+}
+
+
 /* tabledist - return a pseudo-randomly distributed value with mean mu and
  * std deviation sigma.  Uses table lookup to approximate the desired
  * distribution, and a uniformly-distributed pseudo-random source.
@@ -161,14 +312,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        int ret;
        int count = 1;
 
-       pr_debug("netem_enqueue skb=%p\n", skb);
-
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
                ++count;
 
-       /* Random packet drop 0 => none, ~0 => all */
-       if (q->loss && q->loss >= get_crandom(&q->loss_cor))
+       /* Drop packet? */
+       if (loss_event(q))
                --count;
 
        if (count == 0) {
@@ -211,8 +360,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        cb = netem_skb_cb(skb);
-       if (q->gap == 0 ||              /* not doing reordering */
-           q->counter < q->gap ||      /* inside last reordering gap */
+       if (q->gap == 0 ||              /* not doing reordering */
+           q->counter < q->gap ||      /* inside last reordering gap */
            q->reorder < get_crandom(&q->reorder_cor)) {
                psched_time_t now;
                psched_tdiff_t delay;
@@ -238,17 +387,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                ret = NET_XMIT_SUCCESS;
        }
 
-       if (likely(ret == NET_XMIT_SUCCESS)) {
-               sch->q.qlen++;
-       } else if (net_xmit_drop_count(ret)) {
-               sch->qstats.drops++;
+       if (ret != NET_XMIT_SUCCESS) {
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       return ret;
+               }
        }
 
-       pr_debug("netem: enqueue ret %d\n", ret);
-       return ret;
+       sch->q.qlen++;
+       return NET_XMIT_SUCCESS;
 }
 
-static unsigned int netem_drop(struct Qdiscsch)
+static unsigned int netem_drop(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
        unsigned int len = 0;
@@ -265,7 +415,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
 
-       if (sch->flags & TCQ_F_THROTTLED)
+       if (qdisc_is_throttled(sch))
                return NULL;
 
        skb = q->qdisc->ops->peek(q->qdisc);
@@ -287,9 +437,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
                        if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
                                skb->tstamp.tv64 = 0;
 #endif
-                       pr_debug("netem_dequeue: return skb=%p\n", skb);
-                       qdisc_bstats_update(sch, skb);
+
                        sch->q.qlen--;
+                       qdisc_unthrottled(sch);
+                       qdisc_bstats_update(sch, skb);
                        return skb;
                }
 
@@ -308,6 +459,16 @@ static void netem_reset(struct Qdisc *sch)
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
+static void dist_free(struct disttable *d)
+{
+       if (d) {
+               if (is_vmalloc_addr(d))
+                       vfree(d);
+               else
+                       kfree(d);
+       }
+}
+
 /*
  * Distribution data is a variable size payload containing
  * signed 16 bit values.
@@ -315,16 +476,20 @@ static void netem_reset(struct Qdisc *sch)
 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
-       unsigned long n = nla_len(attr)/sizeof(__s16);
+       size_t n = nla_len(attr)/sizeof(__s16);
        const __s16 *data = nla_data(attr);
        spinlock_t *root_lock;
        struct disttable *d;
        int i;
+       size_t s;
 
-       if (n > 65536)
+       if (n > NETEM_DIST_MAX)
                return -EINVAL;
 
-       d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
+       s = sizeof(struct disttable) + n * sizeof(s16);
+       d = kmalloc(s, GFP_KERNEL);
+       if (!d)
+               d = vmalloc(s);
        if (!d)
                return -ENOMEM;
 
@@ -335,7 +500,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       kfree(q->delay_dist);
+       dist_free(q->delay_dist);
        q->delay_dist = d;
        spin_unlock_bh(root_lock);
        return 0;
@@ -369,10 +534,66 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
        init_crandom(&q->corrupt_cor, r->correlation);
 }
 
+static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       const struct nlattr *la;
+       int rem;
+
+       nla_for_each_nested(la, attr, rem) {
+               u16 type = nla_type(la);
+
+               switch(type) {
+               case NETEM_LOSS_GI: {
+                       const struct tc_netem_gimodel *gi = nla_data(la);
+
+                       if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+                               pr_info("netem: incorrect gi model size\n");
+                               return -EINVAL;
+                       }
+
+                       q->loss_model = CLG_4_STATES;
+
+                       q->clg.state = 1;
+                       q->clg.a1 = gi->p13;
+                       q->clg.a2 = gi->p31;
+                       q->clg.a3 = gi->p32;
+                       q->clg.a4 = gi->p14;
+                       q->clg.a5 = gi->p23;
+                       break;
+               }
+
+               case NETEM_LOSS_GE: {
+                       const struct tc_netem_gemodel *ge = nla_data(la);
+
+                       if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
+                               pr_info("netem: incorrect gi model size\n");
+                               return -EINVAL;
+                       }
+
+                       q->loss_model = CLG_GILB_ELL;
+                       q->clg.state = 1;
+                       q->clg.a1 = ge->p;
+                       q->clg.a2 = ge->r;
+                       q->clg.a3 = ge->h;
+                       q->clg.a4 = ge->k1;
+                       break;
+               }
+
+               default:
+                       pr_info("netem: unknown loss type %u\n", type);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
        [TCA_NETEM_CORR]        = { .len = sizeof(struct tc_netem_corr) },
        [TCA_NETEM_REORDER]     = { .len = sizeof(struct tc_netem_reorder) },
        [TCA_NETEM_CORRUPT]     = { .len = sizeof(struct tc_netem_corrupt) },
+       [TCA_NETEM_LOSS]        = { .type = NLA_NESTED },
 };
 
 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -380,11 +601,15 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 {
        int nested_len = nla_len(nla) - NLA_ALIGN(len);
 
-       if (nested_len < 0)
+       if (nested_len < 0) {
+               pr_info("netem: invalid attributes len %d\n", nested_len);
                return -EINVAL;
+       }
+
        if (nested_len >= nla_attr_size(0))
                return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
                                 nested_len, policy);
+
        memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
        return 0;
 }
@@ -407,7 +632,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 
        ret = fifo_set_limit(q->qdisc, qopt->limit);
        if (ret) {
-               pr_debug("netem: can't set fifo limit\n");
+               pr_info("netem: can't set fifo limit\n");
                return ret;
        }
 
@@ -440,7 +665,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_NETEM_CORRUPT])
                get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 
-       return 0;
+       q->loss_model = CLG_RANDOM;
+       if (tb[TCA_NETEM_LOSS])
+               ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
+
+       return ret;
 }
 
 /*
@@ -535,16 +764,17 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
+       q->loss_model = CLG_RANDOM;
        q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
                                     TC_H_MAKE(sch->handle, 1));
        if (!q->qdisc) {
-               pr_debug("netem: qdisc create failed\n");
+               pr_notice("netem: qdisc create tfifo qdisc failed\n");
                return -ENOMEM;
        }
 
        ret = netem_change(sch, opt);
        if (ret) {
-               pr_debug("netem: change failed\n");
+               pr_info("netem: change failed\n");
                qdisc_destroy(q->qdisc);
        }
        return ret;
@@ -556,14 +786,61 @@ static void netem_destroy(struct Qdisc *sch)
 
        qdisc_watchdog_cancel(&q->watchdog);
        qdisc_destroy(q->qdisc);
-       kfree(q->delay_dist);
+       dist_free(q->delay_dist);
+}
+
+static int dump_loss_model(const struct netem_sched_data *q,
+                          struct sk_buff *skb)
+{
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, TCA_NETEM_LOSS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       switch (q->loss_model) {
+       case CLG_RANDOM:
+               /* legacy loss model */
+               nla_nest_cancel(skb, nest);
+               return 0;       /* no data */
+
+       case CLG_4_STATES: {
+               struct tc_netem_gimodel gi = {
+                       .p13 = q->clg.a1,
+                       .p31 = q->clg.a2,
+                       .p32 = q->clg.a3,
+                       .p14 = q->clg.a4,
+                       .p23 = q->clg.a5,
+               };
+
+               NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+               break;
+       }
+       case CLG_GILB_ELL: {
+               struct tc_netem_gemodel ge = {
+                       .p = q->clg.a1,
+                       .r = q->clg.a2,
+                       .h = q->clg.a3,
+                       .k1 = q->clg.a4,
+               };
+
+               NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+               break;
+       }
+       }
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+       return -1;
 }
 
 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        const struct netem_sched_data *q = qdisc_priv(sch);
-       unsigned char *b = skb_tail_pointer(skb);
-       struct nlattr *nla = (struct nlattr *) b;
+       struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
        struct tc_netem_qopt qopt;
        struct tc_netem_corr cor;
        struct tc_netem_reorder reorder;
@@ -590,17 +867,87 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        corrupt.correlation = q->corrupt_cor.rho;
        NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
-       nla->nla_len = skb_tail_pointer(skb) - b;
+       if (dump_loss_model(q, skb) != 0)
+               goto nla_put_failure;
 
-       return skb->len;
+       return nla_nest_end(skb, nla);
 
 nla_put_failure:
-       nlmsg_trim(skb, b);
+       nlmsg_trim(skb, nla);
        return -1;
 }
 
+static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
+                         struct sk_buff *skb, struct tcmsg *tcm)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+
+       if (cl != 1)    /* only one class */
+               return -ENOENT;
+
+       tcm->tcm_handle |= TC_H_MIN(1);
+       tcm->tcm_info = q->qdisc->handle;
+
+       return 0;
+}
+
+static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+                    struct Qdisc **old)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+
+       if (new == NULL)
+               new = &noop_qdisc;
+
+       sch_tree_lock(sch);
+       *old = q->qdisc;
+       q->qdisc = new;
+       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+       qdisc_reset(*old);
+       sch_tree_unlock(sch);
+
+       return 0;
+}
+
+static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       return q->qdisc;
+}
+
+static unsigned long netem_get(struct Qdisc *sch, u32 classid)
+{
+       return 1;
+}
+
+static void netem_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+       if (!walker->stop) {
+               if (walker->count >= walker->skip)
+                       if (walker->fn(sch, 1, walker) < 0) {
+                               walker->stop = 1;
+                               return;
+                       }
+               walker->count++;
+       }
+}
+
+static const struct Qdisc_class_ops netem_class_ops = {
+       .graft          =       netem_graft,
+       .leaf           =       netem_leaf,
+       .get            =       netem_get,
+       .put            =       netem_put,
+       .walk           =       netem_walk,
+       .dump           =       netem_dump_class,
+};
+
 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
        .id             =       "netem",
+       .cl_ops         =       &netem_class_ops,
        .priv_size      =       sizeof(struct netem_sched_data),
        .enqueue        =       netem_enqueue,
        .dequeue        =       netem_dequeue,
index fbd710d619bf372535d21ef38c750c7e6e6c9cf0..2a318f2dc3e532a90149a640d60848d10160a52b 100644 (file)
@@ -22,8 +22,7 @@
 #include <net/pkt_sched.h>
 
 
-struct prio_sched_data
-{
+struct prio_sched_data {
        int bands;
        struct tcf_proto *filter_list;
        u8  prio2band[TC_PRIO_MAX+1];
@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                if (!q->filter_list || err < 0) {
                        if (TC_H_MAJ(band))
                                band = 0;
-                       return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+                       return q->queues[q->prio2band[band & TC_PRIO_MAX]];
                }
                band = res.classid;
        }
@@ -106,7 +105,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
        return NULL;
 }
 
-static struct sk_buff *prio_dequeue(struct Qdiscsch)
+static struct sk_buff *prio_dequeue(struct Qdisc *sch)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        int prio;
@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
 
 }
 
-static unsigned int prio_drop(struct Qdiscsch)
+static unsigned int prio_drop(struct Qdisc *sch)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        int prio;
@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
 
 
 static void
-prio_reset(struct Qdiscsch)
+prio_reset(struct Qdisc *sch)
 {
        int prio;
        struct prio_sched_data *q = qdisc_priv(sch);
 
-       for (prio=0; prio<q->bands; prio++)
+       for (prio = 0; prio < q->bands; prio++)
                qdisc_reset(q->queues[prio]);
        sch->q.qlen = 0;
 }
 
 static void
-prio_destroy(struct Qdiscsch)
+prio_destroy(struct Qdisc *sch)
 {
        int prio;
        struct prio_sched_data *q = qdisc_priv(sch);
 
        tcf_destroy_chain(&q->filter_list);
-       for (prio=0; prio<q->bands; prio++)
+       for (prio = 0; prio < q->bands; prio++)
                qdisc_destroy(q->queues[prio]);
 }
 
@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
                return -EINVAL;
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
+       for (i = 0; i <= TC_PRIO_MAX; i++) {
                if (qopt->priomap[i] >= qopt->bands)
                        return -EINVAL;
        }
@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+       for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
                struct Qdisc *child = q->queues[i];
                q->queues[i] = &noop_qdisc;
                if (child != &noop_qdisc) {
@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        }
        sch_tree_unlock(sch);
 
-       for (i=0; i<q->bands; i++) {
+       for (i = 0; i < q->bands; i++) {
                if (q->queues[i] == &noop_qdisc) {
                        struct Qdisc *child, *old;
+
                        child = qdisc_create_dflt(sch->dev_queue,
                                                  &pfifo_qdisc_ops,
                                                  TC_H_MAKE(sch->handle, i + 1));
@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
        struct prio_sched_data *q = qdisc_priv(sch);
        int i;
 
-       for (i=0; i<TCQ_PRIO_BANDS; i++)
+       for (i = 0; i < TCQ_PRIO_BANDS; i++)
                q->queues[i] = &noop_qdisc;
 
        if (opt == NULL) {
@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
        } else {
                int err;
 
-               if ((err= prio_tune(sch, opt)) != 0)
+               if ((err = prio_tune(sch, opt)) != 0)
                        return err;
        }
        return 0;
@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct tc_prio_qopt opt;
 
        opt.bands = q->bands;
-       memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
+       memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
 
@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
                        arg->count++;
                        continue;
                }
-               if (arg->fn(sch, prio+1, arg) < 0) {
+               if (arg->fn(sch, prio + 1, arg) < 0) {
                        arg->stop = 1;
                        break;
                }
@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        }
 }
 
-static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
 
index 9f98dbd32d4c8176d6c940dd098f71fa0c00cb4d..6649463da1b68e6e59e6709c7e1d0b527a60ab63 100644 (file)
@@ -36,8 +36,7 @@
        if RED works correctly.
  */
 
-struct red_sched_data
-{
+struct red_sched_data {
        u32                     limit;          /* HARD maximal queue length */
        unsigned char           flags;
        struct red_parms        parms;
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
        return q->flags & TC_RED_HARDDROP;
 }
 
-static int red_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                red_end_of_idle_period(&q->parms);
 
        switch (red_action(&q->parms, q->parms.qavg)) {
-               case RED_DONT_MARK:
-                       break;
-
-               case RED_PROB_MARK:
-                       sch->qstats.overlimits++;
-                       if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
-                               q->stats.prob_drop++;
-                               goto congestion_drop;
-                       }
-
-                       q->stats.prob_mark++;
-                       break;
-
-               case RED_HARD_MARK:
-                       sch->qstats.overlimits++;
-                       if (red_use_harddrop(q) || !red_use_ecn(q) ||
-                           !INET_ECN_set_ce(skb)) {
-                               q->stats.forced_drop++;
-                               goto congestion_drop;
-                       }
-
-                       q->stats.forced_mark++;
-                       break;
+       case RED_DONT_MARK:
+               break;
+
+       case RED_PROB_MARK:
+               sch->qstats.overlimits++;
+               if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+                       q->stats.prob_drop++;
+                       goto congestion_drop;
+               }
+
+               q->stats.prob_mark++;
+               break;
+
+       case RED_HARD_MARK:
+               sch->qstats.overlimits++;
+               if (red_use_harddrop(q) || !red_use_ecn(q) ||
+                   !INET_ECN_set_ce(skb)) {
+                       q->stats.forced_drop++;
+                       goto congestion_drop;
+               }
+
+               q->stats.forced_mark++;
+               break;
        }
 
        ret = qdisc_enqueue(skb, child);
@@ -106,7 +105,7 @@ congestion_drop:
        return NET_XMIT_CN;
 }
 
-static struct sk_buff * red_dequeue(struct Qdisc* sch)
+static struct sk_buff *red_dequeue(struct Qdisc *sch)
 {
        struct sk_buff *skb;
        struct red_sched_data *q = qdisc_priv(sch);
@@ -123,7 +122,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
        return skb;
 }
 
-static struct sk_buff * red_peek(struct Qdisc* sch)
+static struct sk_buff *red_peek(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
@@ -131,7 +130,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
        return child->ops->peek(child);
 }
 
-static unsigned int red_drop(struct Qdiscsch)
+static unsigned int red_drop(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child = q->qdisc;
@@ -150,7 +149,7 @@ static unsigned int red_drop(struct Qdisc* sch)
        return 0;
 }
 
-static void red_reset(struct Qdiscsch)
+static void red_reset(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
@@ -217,7 +216,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int red_init(struct Qdiscsch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
new file mode 100644 (file)
index 0000000..0a833d0
--- /dev/null
@@ -0,0 +1,709 @@
+/*
+ * net/sched/sch_sfb.c   Stochastic Fair Blue
+ *
+ * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
+ * A New Class of Active Queue Management Algorithms.
+ * U. Michigan CSE-TR-387-99, April 1999.
+ *
+ * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <net/ip.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/*
+ * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
+ * This implementation uses L = 8 and N = 16
+ * This permits us to split one 32bit hash (provided per packet by rxhash or
+ * external classifier) into 8 subhashes of 4 bits.
+ */
+#define SFB_BUCKET_SHIFT 4
+#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
+#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
+#define SFB_LEVELS     (32 / SFB_BUCKET_SHIFT) /* L */
+
+/* SFB algo uses a virtual queue, named "bin" */
+struct sfb_bucket {
+       u16             qlen; /* length of virtual queue */
+       u16             p_mark; /* marking probability */
+};
+
+/* We use a double buffering right before hash change
+ * (Section 4.4 of SFB reference : moving hash functions)
+ */
+struct sfb_bins {
+       u32               perturbation; /* jhash perturbation */
+       struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
+};
+
+struct sfb_sched_data {
+       struct Qdisc    *qdisc;
+       struct tcf_proto *filter_list;
+       unsigned long   rehash_interval;
+       unsigned long   warmup_time;    /* double buffering warmup time in jiffies */
+       u32             max;
+       u32             bin_size;       /* maximum queue length per bin */
+       u32             increment;      /* d1 */
+       u32             decrement;      /* d2 */
+       u32             limit;          /* HARD maximal queue length */
+       u32             penalty_rate;
+       u32             penalty_burst;
+       u32             tokens_avail;
+       unsigned long   rehash_time;
+       unsigned long   token_time;
+
+       u8              slot;           /* current active bins (0 or 1) */
+       bool            double_buffering;
+       struct sfb_bins bins[2];
+
+       struct {
+               u32     earlydrop;
+               u32     penaltydrop;
+               u32     bucketdrop;
+               u32     queuedrop;
+               u32     childdrop;      /* drops in child qdisc */
+               u32     marked;         /* ECN mark */
+       } stats;
+};
+
+/*
+ * Each queued skb might be hashed on one or two bins
+ * We store in skb_cb the two hash values.
+ * (A zero value means double buffering was not used)
+ */
+struct sfb_skb_cb {
+       u32 hashes[2];
+};
+
+static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(skb->cb) <
+               sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
+       return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+/*
+ * If using 'internal' SFB flow classifier, hash comes from skb rxhash
+ * If using external classifier, hash comes from the classid.
+ */
+static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
+{
+       return sfb_skb_cb(skb)->hashes[slot];
+}
+
+/* Probabilities are coded as Q0.16 fixed-point values,
+ * with 0xFFFF representing 65535/65536 (almost 1.0)
+ * Addition and subtraction are saturating in [0, 65535]
+ */
+static u32 prob_plus(u32 p1, u32 p2)
+{
+       u32 res = p1 + p2;
+
+       return min_t(u32, res, SFB_MAX_PROB);
+}
+
+static u32 prob_minus(u32 p1, u32 p2)
+{
+       return p1 > p2 ? p1 - p2 : 0;
+}
+
+static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
+{
+       int i;
+       struct sfb_bucket *b = &q->bins[slot].bins[0][0];
+
+       for (i = 0; i < SFB_LEVELS; i++) {
+               u32 hash = sfbhash & SFB_BUCKET_MASK;
+
+               sfbhash >>= SFB_BUCKET_SHIFT;
+               if (b[hash].qlen < 0xFFFF)
+                       b[hash].qlen++;
+               b += SFB_NUMBUCKETS; /* next level */
+       }
+}
+
+static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+{
+       u32 sfbhash;
+
+       sfbhash = sfb_hash(skb, 0);
+       if (sfbhash)
+               increment_one_qlen(sfbhash, 0, q);
+
+       sfbhash = sfb_hash(skb, 1);
+       if (sfbhash)
+               increment_one_qlen(sfbhash, 1, q);
+}
+
+static void decrement_one_qlen(u32 sfbhash, u32 slot,
+                              struct sfb_sched_data *q)
+{
+       int i;
+       struct sfb_bucket *b = &q->bins[slot].bins[0][0];
+
+       for (i = 0; i < SFB_LEVELS; i++) {
+               u32 hash = sfbhash & SFB_BUCKET_MASK;
+
+               sfbhash >>= SFB_BUCKET_SHIFT;
+               if (b[hash].qlen > 0)
+                       b[hash].qlen--;
+               b += SFB_NUMBUCKETS; /* next level */
+       }
+}
+
+static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+{
+       u32 sfbhash;
+
+       sfbhash = sfb_hash(skb, 0);
+       if (sfbhash)
+               decrement_one_qlen(sfbhash, 0, q);
+
+       sfbhash = sfb_hash(skb, 1);
+       if (sfbhash)
+               decrement_one_qlen(sfbhash, 1, q);
+}
+
+static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
+{
+       b->p_mark = prob_minus(b->p_mark, q->decrement);
+}
+
+static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
+{
+       b->p_mark = prob_plus(b->p_mark, q->increment);
+}
+
+static void sfb_zero_all_buckets(struct sfb_sched_data *q)
+{
+       memset(&q->bins, 0, sizeof(q->bins));
+}
+
+/*
+ * compute max qlen, max p_mark, and avg p_mark
+ */
+static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
+{
+       int i;
+       u32 qlen = 0, prob = 0, totalpm = 0;
+       const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
+
+       for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
+               if (qlen < b->qlen)
+                       qlen = b->qlen;
+               totalpm += b->p_mark;
+               if (prob < b->p_mark)
+                       prob = b->p_mark;
+               b++;
+       }
+       *prob_r = prob;
+       *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
+       return qlen;
+}
+
+
+static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
+{
+       q->bins[slot].perturbation = net_random();
+}
+
+static void sfb_swap_slot(struct sfb_sched_data *q)
+{
+       sfb_init_perturbation(q->slot, q);
+       q->slot ^= 1;
+       q->double_buffering = false;
+}
+
+/* Non elastic flows are allowed to use part of the bandwidth, expressed
+ * in "penalty_rate" packets per second, with "penalty_burst" burst
+ */
+static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
+{
+       if (q->penalty_rate == 0 || q->penalty_burst == 0)
+               return true;
+
+       if (q->tokens_avail < 1) {
+               unsigned long age = min(10UL * HZ, jiffies - q->token_time);
+
+               q->tokens_avail = (age * q->penalty_rate) / HZ;
+               if (q->tokens_avail > q->penalty_burst)
+                       q->tokens_avail = q->penalty_burst;
+               q->token_time = jiffies;
+               if (q->tokens_avail < 1)
+                       return true;
+       }
+
+       q->tokens_avail--;
+       return false;
+}
+
+static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
+                        int *qerr, u32 *salt)
+{
+       struct tcf_result res;
+       int result;
+
+       result = tc_classify(skb, q->filter_list, &res);
+       if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+               switch (result) {
+               case TC_ACT_STOLEN:
+               case TC_ACT_QUEUED:
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+               case TC_ACT_SHOT:
+                       return false;
+               }
+#endif
+               *salt = TC_H_MIN(res.classid);
+               return true;
+       }
+       return false;
+}
+
+static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
+       int i;
+       u32 p_min = ~0;
+       u32 minqlen = ~0;
+       u32 r, slot, salt, sfbhash;
+       int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+       if (q->rehash_interval > 0) {
+               unsigned long limit = q->rehash_time + q->rehash_interval;
+
+               if (unlikely(time_after(jiffies, limit))) {
+                       sfb_swap_slot(q);
+                       q->rehash_time = jiffies;
+               } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
+                                   time_after(jiffies, limit - q->warmup_time))) {
+                       q->double_buffering = true;
+               }
+       }
+
+       if (q->filter_list) {
+               /* If using external classifiers, get result and record it. */
+               if (!sfb_classify(skb, q, &ret, &salt))
+                       goto other_drop;
+       } else {
+               salt = skb_get_rxhash(skb);
+       }
+
+       slot = q->slot;
+
+       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+       if (!sfbhash)
+               sfbhash = 1;
+       sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+
+       for (i = 0; i < SFB_LEVELS; i++) {
+               u32 hash = sfbhash & SFB_BUCKET_MASK;
+               struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
+
+               sfbhash >>= SFB_BUCKET_SHIFT;
+               if (b->qlen == 0)
+                       decrement_prob(b, q);
+               else if (b->qlen >= q->bin_size)
+                       increment_prob(b, q);
+               if (minqlen > b->qlen)
+                       minqlen = b->qlen;
+               if (p_min > b->p_mark)
+                       p_min = b->p_mark;
+       }
+
+       slot ^= 1;
+       sfb_skb_cb(skb)->hashes[slot] = 0;
+
+       if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) {
+               sch->qstats.overlimits++;
+               if (minqlen >= q->max)
+                       q->stats.bucketdrop++;
+               else
+                       q->stats.queuedrop++;
+               goto drop;
+       }
+
+       if (unlikely(p_min >= SFB_MAX_PROB)) {
+               /* Inelastic flow */
+               if (q->double_buffering) {
+                       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+                       if (!sfbhash)
+                               sfbhash = 1;
+                       sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+
+                       for (i = 0; i < SFB_LEVELS; i++) {
+                               u32 hash = sfbhash & SFB_BUCKET_MASK;
+                               struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
+
+                               sfbhash >>= SFB_BUCKET_SHIFT;
+                               if (b->qlen == 0)
+                                       decrement_prob(b, q);
+                               else if (b->qlen >= q->bin_size)
+                                       increment_prob(b, q);
+                       }
+               }
+               if (sfb_rate_limit(skb, q)) {
+                       sch->qstats.overlimits++;
+                       q->stats.penaltydrop++;
+                       goto drop;
+               }
+               goto enqueue;
+       }
+
+       r = net_random() & SFB_MAX_PROB;
+
+       if (unlikely(r < p_min)) {
+               if (unlikely(p_min > SFB_MAX_PROB / 2)) {
+                       /* If we're marking that many packets, then either
+                        * this flow is unresponsive, or we're badly congested.
+                        * In either case, we want to start dropping packets.
+                        */
+                       if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
+                               q->stats.earlydrop++;
+                               goto drop;
+                       }
+               }
+               if (INET_ECN_set_ce(skb)) {
+                       q->stats.marked++;
+               } else {
+                       q->stats.earlydrop++;
+                       goto drop;
+               }
+       }
+
+enqueue:
+       ret = qdisc_enqueue(skb, child);
+       if (likely(ret == NET_XMIT_SUCCESS)) {
+               sch->q.qlen++;
+               increment_qlen(skb, q);
+       } else if (net_xmit_drop_count(ret)) {
+               q->stats.childdrop++;
+               sch->qstats.drops++;
+       }
+       return ret;
+
+drop:
+       qdisc_drop(skb, sch);
+       return NET_XMIT_CN;
+other_drop:
+       if (ret & __NET_XMIT_BYPASS)
+               sch->qstats.drops++;
+       kfree_skb(skb);
+       return ret;
+}
+
+static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
+       struct sk_buff *skb;
+
+       skb = child->dequeue(q->qdisc);
+
+       if (skb) {
+               qdisc_bstats_update(sch, skb);
+               sch->q.qlen--;
+               decrement_qlen(skb, q);
+       }
+
+       return skb;
+}
+
+static struct sk_buff *sfb_peek(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
+
+       return child->ops->peek(child);
+}
+
+/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
+
+static void sfb_reset(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       qdisc_reset(q->qdisc);
+       sch->q.qlen = 0;
+       q->slot = 0;
+       q->double_buffering = false;
+       sfb_zero_all_buckets(q);
+       sfb_init_perturbation(0, q);
+}
+
+static void sfb_destroy(struct Qdisc *sch)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       tcf_destroy_chain(&q->filter_list);
+       qdisc_destroy(q->qdisc);
+}
+
+static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
+       [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
+};
+
+static const struct tc_sfb_qopt sfb_default_ops = {
+       .rehash_interval = 600 * MSEC_PER_SEC,
+       .warmup_time = 60 * MSEC_PER_SEC,
+       .limit = 0,
+       .max = 25,
+       .bin_size = 20,
+       .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
+       .decrement = (SFB_MAX_PROB + 3000) / 6000,
+       .penalty_rate = 10,
+       .penalty_burst = 20,
+};
+
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child;
+       struct nlattr *tb[TCA_SFB_MAX + 1];
+       const struct tc_sfb_qopt *ctl = &sfb_default_ops;
+       u32 limit;
+       int err;
+
+       if (opt) {
+               err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
+               if (err < 0)
+                       return -EINVAL;
+
+               if (tb[TCA_SFB_PARMS] == NULL)
+                       return -EINVAL;
+
+               ctl = nla_data(tb[TCA_SFB_PARMS]);
+       }
+
+       limit = ctl->limit;
+       if (limit == 0)
+               limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+
+       child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+       if (IS_ERR(child))
+               return PTR_ERR(child);
+
+       sch_tree_lock(sch);
+
+       qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
+       qdisc_destroy(q->qdisc);
+       q->qdisc = child;
+
+       q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
+       q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
+       q->rehash_time = jiffies;
+       q->limit = limit;
+       q->increment = ctl->increment;
+       q->decrement = ctl->decrement;
+       q->max = ctl->max;
+       q->bin_size = ctl->bin_size;
+       q->penalty_rate = ctl->penalty_rate;
+       q->penalty_burst = ctl->penalty_burst;
+       q->tokens_avail = ctl->penalty_burst;
+       q->token_time = jiffies;
+
+       q->slot = 0;
+       q->double_buffering = false;
+       sfb_zero_all_buckets(q);
+       sfb_init_perturbation(0, q);
+       sfb_init_perturbation(1, q);
+
+       sch_tree_unlock(sch);
+
+       return 0;
+}
+
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       q->qdisc = &noop_qdisc;
+       return sfb_change(sch, opt);
+}
+
+static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts;
+       struct tc_sfb_qopt opt = {
+               .rehash_interval = jiffies_to_msecs(q->rehash_interval),
+               .warmup_time = jiffies_to_msecs(q->warmup_time),
+               .limit = q->limit,
+               .max = q->max,
+               .bin_size = q->bin_size,
+               .increment = q->increment,
+               .decrement = q->decrement,
+               .penalty_rate = q->penalty_rate,
+               .penalty_burst = q->penalty_burst,
+       };
+
+       sch->qstats.backlog = q->qdisc->qstats.backlog;
+       opts = nla_nest_start(skb, TCA_OPTIONS);
+       NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       nla_nest_cancel(skb, opts);
+       return -EMSGSIZE;
+}
+
+static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+       struct tc_sfb_xstats st = {
+               .earlydrop = q->stats.earlydrop,
+               .penaltydrop = q->stats.penaltydrop,
+               .bucketdrop = q->stats.bucketdrop,
+               .queuedrop = q->stats.queuedrop,
+               .childdrop = q->stats.childdrop,
+               .marked = q->stats.marked,
+       };
+
+       st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
+
+       return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
+                         struct sk_buff *skb, struct tcmsg *tcm)
+{
+       return -ENOSYS;
+}
+
+static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+                    struct Qdisc **old)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       if (new == NULL)
+               new = &noop_qdisc;
+
+       sch_tree_lock(sch);
+       *old = q->qdisc;
+       q->qdisc = new;
+       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+       qdisc_reset(*old);
+       sch_tree_unlock(sch);
+       return 0;
+}
+
+static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       return q->qdisc;
+}
+
+static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
+{
+       return 1;
+}
+
+static void sfb_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+                           struct nlattr **tca, unsigned long *arg)
+{
+       return -ENOSYS;
+}
+
+static int sfb_delete(struct Qdisc *sch, unsigned long cl)
+{
+       return -ENOSYS;
+}
+
+static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+       if (!walker->stop) {
+               if (walker->count >= walker->skip)
+                       if (walker->fn(sch, 1, walker) < 0) {
+                               walker->stop = 1;
+                               return;
+                       }
+               walker->count++;
+       }
+}
+
+static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+       struct sfb_sched_data *q = qdisc_priv(sch);
+
+       if (cl)
+               return NULL;
+       return &q->filter_list;
+}
+
+static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
+                             u32 classid)
+{
+       return 0;
+}
+
+
+static const struct Qdisc_class_ops sfb_class_ops = {
+       .graft          =       sfb_graft,
+       .leaf           =       sfb_leaf,
+       .get            =       sfb_get,
+       .put            =       sfb_put,
+       .change         =       sfb_change_class,
+       .delete         =       sfb_delete,
+       .walk           =       sfb_walk,
+       .tcf_chain      =       sfb_find_tcf,
+       .bind_tcf       =       sfb_bind,
+       .unbind_tcf     =       sfb_put,
+       .dump           =       sfb_dump_class,
+};
+
+static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
+       .id             =       "sfb",
+       .priv_size      =       sizeof(struct sfb_sched_data),
+       .cl_ops         =       &sfb_class_ops,
+       .enqueue        =       sfb_enqueue,
+       .dequeue        =       sfb_dequeue,
+       .peek           =       sfb_peek,
+       .init           =       sfb_init,
+       .reset          =       sfb_reset,
+       .destroy        =       sfb_destroy,
+       .change         =       sfb_change,
+       .dump           =       sfb_dump,
+       .dump_stats     =       sfb_dump_stats,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init sfb_module_init(void)
+{
+       return register_qdisc(&sfb_qdisc_ops);
+}
+
+static void __exit sfb_module_exit(void)
+{
+       unregister_qdisc(&sfb_qdisc_ops);
+}
+
+module_init(sfb_module_init)
+module_exit(sfb_module_exit)
+
+MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
+MODULE_AUTHOR("Juliusz Chroboczek");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
index edea8cefec6c9502e0a841e4bb4bd3ef11003495..c2e628dfaaccb0f2dacffa9bbc5dec34bbb5f291 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
@@ -76,7 +77,8 @@
 #define SFQ_DEPTH              128 /* max number of packets per flow */
 #define SFQ_SLOTS              128 /* max number of flows */
 #define SFQ_EMPTY_SLOT         255
-#define SFQ_HASH_DIVISOR       1024
+#define SFQ_DEFAULT_HASH_DIVISOR 1024
+
 /* We use 16 bits to store allot, and want to handle packets up to 64K
  * Scale allot by 8 (1<<3) so that no overflow occurs.
  */
@@ -92,8 +94,7 @@ typedef unsigned char sfq_index;
  * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
  * are 'pointers' to dep[] array
  */
-struct sfq_head
-{
+struct sfq_head {
        sfq_index       next;
        sfq_index       prev;
 };
@@ -108,13 +109,12 @@ struct sfq_slot {
        short           allot; /* credit for this slot */
 };
 
-struct sfq_sched_data
-{
+struct sfq_sched_data {
 /* Parameters */
        int             perturb_period;
-       unsigned        quantum;        /* Allotment per round: MUST BE >= MTU */
+       unsigned int    quantum;        /* Allotment per round: MUST BE >= MTU */
        int             limit;
-
+       unsigned int    divisor;        /* number of slots in hash table */
 /* Variables */
        struct tcf_proto *filter_list;
        struct timer_list perturb_timer;
@@ -122,7 +122,7 @@ struct sfq_sched_data
        sfq_index       cur_depth;      /* depth of longest slot */
        unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
        struct sfq_slot *tail;          /* current slot in round */
-       sfq_index       ht[SFQ_HASH_DIVISOR];   /* Hash table */
+       sfq_index       *ht;            /* Hash table (divisor slots) */
        struct sfq_slot slots[SFQ_SLOTS];
        struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
 };
@@ -137,12 +137,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
        return &q->dep[val - SFQ_SLOTS];
 }
 
-static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
 {
-       return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
+       return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
 }
 
-static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
 {
        u32 h, h2;
 
@@ -157,13 +157,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                iph = ip_hdr(skb);
                h = (__force u32)iph->daddr;
                h2 = (__force u32)iph->saddr ^ iph->protocol;
-               if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+               if (iph->frag_off & htons(IP_MF | IP_OFFSET))
                        break;
                poff = proto_ports_offset(iph->protocol);
                if (poff >= 0 &&
                    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
                        iph = ip_hdr(skb);
-                       h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+                       h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
                }
                break;
        }
@@ -181,7 +181,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                if (poff >= 0 &&
                    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
                        iph = ipv6_hdr(skb);
-                       h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+                       h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
                }
                break;
        }
@@ -203,7 +203,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
 
        if (TC_H_MAJ(skb->priority) == sch->handle &&
            TC_H_MIN(skb->priority) > 0 &&
-           TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
+           TC_H_MIN(skb->priority) <= q->divisor)
                return TC_H_MIN(skb->priority);
 
        if (!q->filter_list)
@@ -221,7 +221,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
                        return 0;
                }
 #endif
-               if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
+               if (TC_H_MIN(res.classid) <= q->divisor)
                        return TC_H_MIN(res.classid);
        }
        return 0;
@@ -491,13 +491,18 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
                return -EINVAL;
 
+       if (ctl->divisor &&
+           (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+               return -EINVAL;
+
        sch_tree_lock(sch);
        q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
        q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
        q->perturb_period = ctl->perturb_period * HZ;
        if (ctl->limit)
                q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-
+       if (ctl->divisor)
+               q->divisor = ctl->divisor;
        qlen = sch->q.qlen;
        while (sch->q.qlen > q->limit)
                sfq_drop(sch);
@@ -515,15 +520,13 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
+       size_t sz;
        int i;
 
        q->perturb_timer.function = sfq_perturbation;
        q->perturb_timer.data = (unsigned long)sch;
        init_timer_deferrable(&q->perturb_timer);
 
-       for (i = 0; i < SFQ_HASH_DIVISOR; i++)
-               q->ht[i] = SFQ_EMPTY_SLOT;
-
        for (i = 0; i < SFQ_DEPTH; i++) {
                q->dep[i].next = i + SFQ_SLOTS;
                q->dep[i].prev = i + SFQ_SLOTS;
@@ -532,6 +535,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->limit = SFQ_DEPTH - 1;
        q->cur_depth = 0;
        q->tail = NULL;
+       q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
        if (opt == NULL) {
                q->quantum = psched_mtu(qdisc_dev(sch));
                q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
@@ -543,10 +547,23 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
                        return err;
        }
 
+       sz = sizeof(q->ht[0]) * q->divisor;
+       q->ht = kmalloc(sz, GFP_KERNEL);
+       if (!q->ht && sz > PAGE_SIZE)
+               q->ht = vmalloc(sz);
+       if (!q->ht)
+               return -ENOMEM;
+       for (i = 0; i < q->divisor; i++)
+               q->ht[i] = SFQ_EMPTY_SLOT;
+
        for (i = 0; i < SFQ_SLOTS; i++) {
                slot_queue_init(&q->slots[i]);
                sfq_link(q, i);
        }
+       if (q->limit >= 1)
+               sch->flags |= TCQ_F_CAN_BYPASS;
+       else
+               sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
@@ -557,6 +574,10 @@ static void sfq_destroy(struct Qdisc *sch)
        tcf_destroy_chain(&q->filter_list);
        q->perturb_period = 0;
        del_timer_sync(&q->perturb_timer);
+       if (is_vmalloc_addr(q->ht))
+               vfree(q->ht);
+       else
+               kfree(q->ht);
 }
 
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -569,7 +590,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
        opt.perturb_period = q->perturb_period / HZ;
 
        opt.limit = q->limit;
-       opt.divisor = SFQ_HASH_DIVISOR;
+       opt.divisor = q->divisor;
        opt.flows = q->limit;
 
        NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -594,6 +615,8 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
                              u32 classid)
 {
+       /* we cannot bypass queue discipline anymore */
+       sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
 }
 
@@ -647,7 +670,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        if (arg->stop)
                return;
 
-       for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
+       for (i = 0; i < q->divisor; i++) {
                if (q->ht[i] == SFQ_EMPTY_SLOT ||
                    arg->count < arg->skip) {
                        arg->count++;
index e93165820c3f02ec4edc072b26b04dcdd4645ce0..1dcfb5223a861fec4e5e4e41200b86f070363baf 100644 (file)
@@ -97,8 +97,7 @@
        changed the limit is not effective anymore.
 */
 
-struct tbf_sched_data
-{
+struct tbf_sched_data {
 /* Parameters */
        u32             limit;          /* Maximal length of backlog: bytes */
        u32             buffer;         /* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -115,10 +114,10 @@ struct tbf_sched_data
        struct qdisc_watchdog watchdog; /* Watchdog timer */
 };
 
-#define L2T(q,L)   qdisc_l2t((q)->R_tab,L)
-#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
+#define L2T(q, L)   qdisc_l2t((q)->R_tab, L)
+#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
 
-static int tbf_enqueue(struct sk_buff *skb, struct Qdiscsch)
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        int ret;
@@ -137,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
        return NET_XMIT_SUCCESS;
 }
 
-static unsigned int tbf_drop(struct Qdiscsch)
+static unsigned int tbf_drop(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        unsigned int len = 0;
@@ -149,7 +148,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
        return len;
 }
 
-static struct sk_buff *tbf_dequeue(struct Qdiscsch)
+static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb;
@@ -185,7 +184,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
                        q->tokens = toks;
                        q->ptokens = ptoks;
                        sch->q.qlen--;
-                       sch->flags &= ~TCQ_F_THROTTLED;
+                       qdisc_unthrottled(sch);
                        qdisc_bstats_update(sch, skb);
                        return skb;
                }
@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
        return NULL;
 }
 
-static void tbf_reset(struct Qdiscsch)
+static void tbf_reset(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
        [TCA_TBF_PTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 };
 
-static int tbf_change(struct Qdiscsch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 {
        int err;
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
        struct qdisc_rate_table *rtab = NULL;
        struct qdisc_rate_table *ptab = NULL;
        struct Qdisc *child = NULL;
-       int max_size,n;
+       int max_size, n;
 
        err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
        if (err < 0)
@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
        }
 
        for (n = 0; n < 256; n++)
-               if (rtab->data[n] > qopt->buffer) break;
-       max_size = (n << qopt->rate.cell_log)-1;
+               if (rtab->data[n] > qopt->buffer)
+                       break;
+       max_size = (n << qopt->rate.cell_log) - 1;
        if (ptab) {
                int size;
 
                for (n = 0; n < 256; n++)
-                       if (ptab->data[n] > qopt->mtu) break;
-               size = (n << qopt->peakrate.cell_log)-1;
-               if (size < max_size) max_size = size;
+                       if (ptab->data[n] > qopt->mtu)
+                               break;
+               size = (n << qopt->peakrate.cell_log) - 1;
+               if (size < max_size)
+                       max_size = size;
        }
        if (max_size < 0)
                goto done;
@@ -310,7 +312,7 @@ done:
        return err;
 }
 
-static int tbf_init(struct Qdiscsch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
        }
 }
 
-static const struct Qdisc_class_ops tbf_class_ops =
-{
+static const struct Qdisc_class_ops tbf_class_ops = {
        .graft          =       tbf_graft,
        .leaf           =       tbf_leaf,
        .get            =       tbf_get,
index d84e7329660fb5f21e5e87d11fae435e27907a8b..45cd30098e34800ddb0ab5caf880f81340531636 100644 (file)
@@ -53,8 +53,7 @@
       which will not break load balancing, though native slave
       traffic will have the highest priority.  */
 
-struct teql_master
-{
+struct teql_master {
        struct Qdisc_ops qops;
        struct net_device *dev;
        struct Qdisc *slaves;
@@ -65,22 +64,21 @@ struct teql_master
        unsigned long   tx_dropped;
 };
 
-struct teql_sched_data
-{
+struct teql_sched_data {
        struct Qdisc *next;
        struct teql_master *m;
        struct neighbour *ncache;
        struct sk_buff_head q;
 };
 
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
 
-#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
+#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
 
 /* "teql*" qdisc routines */
 
 static int
-teql_enqueue(struct sk_buff *skb, struct Qdiscsch)
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct teql_sched_data *q = qdisc_priv(sch);
@@ -96,7 +94,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 }
 
 static struct sk_buff *
-teql_dequeue(struct Qdiscsch)
+teql_dequeue(struct Qdisc *sch)
 {
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct netdev_queue *dat_queue;
@@ -118,13 +116,13 @@ teql_dequeue(struct Qdisc* sch)
 }
 
 static struct sk_buff *
-teql_peek(struct Qdiscsch)
+teql_peek(struct Qdisc *sch)
 {
        /* teql is meant to be used as root qdisc */
        return NULL;
 }
 
-static __inline__ void
+static inline void
 teql_neigh_release(struct neighbour *n)
 {
        if (n)
@@ -132,7 +130,7 @@ teql_neigh_release(struct neighbour *n)
 }
 
 static void
-teql_reset(struct Qdiscsch)
+teql_reset(struct Qdisc *sch)
 {
        struct teql_sched_data *dat = qdisc_priv(sch);
 
@@ -142,13 +140,14 @@ teql_reset(struct Qdisc* sch)
 }
 
 static void
-teql_destroy(struct Qdiscsch)
+teql_destroy(struct Qdisc *sch)
 {
        struct Qdisc *q, *prev;
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct teql_master *master = dat->m;
 
-       if ((prev = master->slaves) != NULL) {
+       prev = master->slaves;
+       if (prev) {
                do {
                        q = NEXT_SLAVE(prev);
                        if (q == sch) {
@@ -180,7 +179,7 @@ teql_destroy(struct Qdisc* sch)
 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct net_device *dev = qdisc_dev(sch);
-       struct teql_master *m = (struct teql_master*)sch->ops;
+       struct teql_master *m = (struct teql_master *)sch->ops;
        struct teql_sched_data *q = qdisc_priv(sch);
 
        if (dev->hard_header_len > m->dev->hard_header_len)
@@ -291,7 +290,8 @@ restart:
        nores = 0;
        busy = 0;
 
-       if ((q = start) == NULL)
+       q = start;
+       if (!q)
                goto drop;
 
        do {
@@ -356,10 +356,10 @@ drop:
 
 static int teql_master_open(struct net_device *dev)
 {
-       struct Qdisc * q;
+       struct Qdisc *q;
        struct teql_master *m = netdev_priv(dev);
        int mtu = 0xFFFE;
-       unsigned flags = IFF_NOARP|IFF_MULTICAST;
+       unsigned int flags = IFF_NOARP | IFF_MULTICAST;
 
        if (m->slaves == NULL)
                return -EUNATCH;
@@ -427,7 +427,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
                do {
                        if (new_mtu > qdisc_dev(q)->mtu)
                                return -EINVAL;
-               } while ((q=NEXT_SLAVE(q)) != m->slaves);
+               } while ((q = NEXT_SLAVE(q)) != m->slaves);
        }
 
        dev->mtu = new_mtu;
index 5f1fb8bd862dea391a2930f34d59d2129743ebaf..6b04287913cda3dcdc3573357d4856880eefb0bb 100644 (file)
@@ -1089,7 +1089,6 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                             base.inqueue.immediate);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
-       struct sock *sk;
        struct sctp_inq *inqueue;
        int state;
        sctp_subtype_t subtype;
@@ -1097,7 +1096,6 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
 
        /* The association should be held so we should be safe. */
        ep = asoc->ep;
-       sk = asoc->base.sk;
 
        inqueue = &asoc->base.inqueue;
        sctp_association_hold(asoc);
index ea2192444ce66413261d20ce8ec236279496d141..826661be73e70f9e05824cc4f65f9e3d1e1cd712 100644 (file)
@@ -948,14 +948,11 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
        union sctp_addr addr;
        union sctp_addr *paddr = &addr;
        struct sctphdr *sh = sctp_hdr(skb);
-       sctp_chunkhdr_t *ch;
        union sctp_params params;
        sctp_init_chunk_t *init;
        struct sctp_transport *transport;
        struct sctp_af *af;
 
-       ch = (sctp_chunkhdr_t *) skb->data;
-
        /*
         * This code will NOT touch anything inside the chunk--it is
         * strictly READ-ONLY.
index 8c6d379b4bb682634c27c3fef9f4bfc17786e507..26dc005113a0972feb1f7f0a8eaa8503ee09504c 100644 (file)
@@ -545,13 +545,11 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
        struct sctp_transport *transport = pkt->transport;
        sctp_xmit_t status;
        struct sctp_chunk *chunk, *chunk1;
-       struct sctp_association *asoc;
        int fast_rtx;
        int error = 0;
        int timer = 0;
        int done = 0;
 
-       asoc = q->asoc;
        lqueue = &q->retransmit;
        fast_rtx = q->fast_rtx;
 
index e58f9476f29c571cb516acd53516bdc46707a416..4e55e6c49ec95c0beed46b09e2ae261e4a3db669 100644 (file)
@@ -491,9 +491,9 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
        SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
                          __func__, &fl.fl4_dst, &fl.fl4_src);
 
-       if (!ip_route_output_key(&init_net, &rt, &fl)) {
+       rt = ip_route_output_key(&init_net, &fl);
+       if (!IS_ERR(rt))
                dst = &rt->dst;
-       }
 
        /* If there is no association or if a source address is passed, no
         * more validation is required.
@@ -535,7 +535,8 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
                    (AF_INET == laddr->a.sa.sa_family)) {
                        fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
                        fl.fl_ip_sport = laddr->a.v4.sin_port;
-                       if (!ip_route_output_key(&init_net, &rt, &fl)) {
+                       rt = ip_route_output_key(&init_net, &fl);
+                       if (!IS_ERR(rt)) {
                                dst = &rt->dst;
                                goto out_unlock;
                        }
index b23428f3c0dde3657187645e47c191a33a5dd7de..de98665db52440b61b9ca5b688f3812ec96a6d79 100644 (file)
@@ -3375,7 +3375,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
                                    struct sctp_fwdtsn_skip *skiplist)
 {
        struct sctp_chunk *retval = NULL;
-       struct sctp_fwdtsn_chunk *ftsn_chunk;
        struct sctp_fwdtsn_hdr ftsn_hdr;
        struct sctp_fwdtsn_skip skip;
        size_t hint;
@@ -3388,8 +3387,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
        if (!retval)
                return NULL;
 
-       ftsn_chunk = (struct sctp_fwdtsn_chunk *)retval->subh.fwdtsn_hdr;
-
        ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
        retval->subh.fwdtsn_hdr =
                sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
index 8e02550ff3e88d9153a0d610dbdadb864ae06da7..3951a10605bc483a4eac1787ffb08cef97e7d8a2 100644 (file)
@@ -2928,7 +2928,6 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
                                             unsigned int optlen)
 {
        struct sctp_sock        *sp;
-       struct sctp_endpoint    *ep;
        struct sctp_association *asoc = NULL;
        struct sctp_setpeerprim prim;
        struct sctp_chunk       *chunk;
@@ -2936,7 +2935,6 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
        int                     err;
 
        sp = sctp_sk(sk);
-       ep = sp->ep;
 
        if (!sctp_addip_enable)
                return -EPERM;
@@ -6102,15 +6100,16 @@ static void __sctp_write_space(struct sctp_association *asoc)
                        wake_up_interruptible(&asoc->wait);
 
                if (sctp_writeable(sk)) {
-                       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-                               wake_up_interruptible(sk_sleep(sk));
+                       wait_queue_head_t *wq = sk_sleep(sk);
+
+                       if (wq && waitqueue_active(wq))
+                               wake_up_interruptible(wq);
 
                        /* Note that we try to include the Async I/O support
                         * here by modeling from the current TCP/UDP code.
                         * We have not tested with it yet.
                         */
-                       if (sock->wq->fasync_list &&
-                           !(sk->sk_shutdown & SEND_SHUTDOWN))
+                       if (!(sk->sk_shutdown & SEND_SHUTDOWN))
                                sock_wake_async(sock,
                                                SOCK_WAKE_SPACE, POLL_OUT);
                }
index 747d5412c463ee1492f37485bb7bf4e4181e1b4e..f1e40cebc981ae7962bb9cd20ae84823b70d43cf 100644 (file)
@@ -344,7 +344,7 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
 
        /* Refresh the gap ack information. */
        if (sctp_tsnmap_has_gap(map)) {
-               __u16 start, end;
+               __u16 start = 0, end = 0;
                sctp_tsnmap_iter_init(map, &iter);
                while (sctp_tsnmap_next_gap_ack(map, &iter,
                                                &start,
index c7f7e49609cbf4e1732a99b51686f73f03616f45..17678189d0540df2895b2e867f5537d8192b9cdb 100644 (file)
@@ -105,11 +105,8 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                        gfp_t gfp)
 {
        struct sk_buff_head temp;
-       sctp_data_chunk_t *hdr;
        struct sctp_ulpevent *event;
 
-       hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
-
        /* Create an event from the incoming chunk. */
        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
        if (!event)
@@ -743,11 +740,9 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
        struct sctp_stream *in;
-       __u16 sid, csid;
-       __u16 ssn, cssn;
+       __u16 sid, csid, cssn;
 
        sid = event->stream;
-       ssn = event->ssn;
        in  = &ulpq->asoc->ssnmap->in;
 
        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
index ac2219f90d5dd45bed66ab827b26a5f96cf7ca09..937d0fcf74bc67d010d6a7ed1cf47b3a7307adbb 100644 (file)
@@ -240,17 +240,19 @@ static struct kmem_cache *sock_inode_cachep __read_mostly;
 static struct inode *sock_alloc_inode(struct super_block *sb)
 {
        struct socket_alloc *ei;
+       struct socket_wq *wq;
 
        ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
-       ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
-       if (!ei->socket.wq) {
+       wq = kmalloc(sizeof(*wq), GFP_KERNEL);
+       if (!wq) {
                kmem_cache_free(sock_inode_cachep, ei);
                return NULL;
        }
-       init_waitqueue_head(&ei->socket.wq->wait);
-       ei->socket.wq->fasync_list = NULL;
+       init_waitqueue_head(&wq->wait);
+       wq->fasync_list = NULL;
+       RCU_INIT_POINTER(ei->socket.wq, wq);
 
        ei->socket.state = SS_UNCONNECTED;
        ei->socket.flags = 0;
@@ -273,9 +275,11 @@ static void wq_free_rcu(struct rcu_head *head)
 static void sock_destroy_inode(struct inode *inode)
 {
        struct socket_alloc *ei;
+       struct socket_wq *wq;
 
        ei = container_of(inode, struct socket_alloc, vfs_inode);
-       call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+       wq = rcu_dereference_protected(ei->socket.wq, 1);
+       call_rcu(&wq->rcu, wq_free_rcu);
        kmem_cache_free(sock_inode_cachep, ei);
 }
 
@@ -524,7 +528,7 @@ void sock_release(struct socket *sock)
                module_put(owner);
        }
 
-       if (sock->wq->fasync_list)
+       if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
                printk(KERN_ERR "sock_release: fasync list not empty!\n");
 
        percpu_sub(sockets_in_use, 1);
@@ -1108,15 +1112,16 @@ static int sock_fasync(int fd, struct file *filp, int on)
 {
        struct socket *sock = filp->private_data;
        struct sock *sk = sock->sk;
+       struct socket_wq *wq;
 
        if (sk == NULL)
                return -EINVAL;
 
        lock_sock(sk);
+       wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
+       fasync_helper(fd, filp, on, &wq->fasync_list);
 
-       fasync_helper(fd, filp, on, &sock->wq->fasync_list);
-
-       if (!sock->wq->fasync_list)
+       if (!wq->fasync_list)
                sock_reset_flag(sk, SOCK_FASYNC);
        else
                sock_set_flag(sk, SOCK_FASYNC);
@@ -2643,7 +2648,8 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
 
                old_fs = get_fs();
                set_fs(KERNEL_DS);
-               err = dev_ioctl(net, cmd, &kifr);
+               err = dev_ioctl(net, cmd,
+                               (struct ifreq __user __force *) &kifr);
                set_fs(old_fs);
 
                return err;
@@ -2752,7 +2758,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
 
        old_fs = get_fs();
        set_fs(KERNEL_DS);
-       err = dev_ioctl(net, cmd, (void __user *)&ifr);
+       err = dev_ioctl(net, cmd, (void  __user __force *)&ifr);
        set_fs(old_fs);
 
        if (cmd == SIOCGIFMAP && !err) {
@@ -2857,7 +2863,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
                ret |= __get_user(rtdev, &(ur4->rt_dev));
                if (rtdev) {
                        ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
-                       r4.rt_dev = devname; devname[15] = 0;
+                       r4.rt_dev = (char __user __force *)devname;
+                       devname[15] = 0;
                } else
                        r4.rt_dev = NULL;
 
index d802e941d365e9b53bf7c6be04bd1555bc0d1144..b7d435c3f19ec537275cb8b413d6c929c4dc8fa6 100644 (file)
@@ -420,6 +420,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
 static void svc_udp_data_ready(struct sock *sk, int count)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        if (svsk) {
                dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
@@ -428,8 +429,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
                set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible(sk_sleep(sk));
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible(wq);
 }
 
 /*
@@ -438,6 +439,7 @@ static void svc_udp_data_ready(struct sock *sk, int count)
 static void svc_write_space(struct sock *sk)
 {
        struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        if (svsk) {
                dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -445,10 +447,10 @@ static void svc_write_space(struct sock *sk)
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
 
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
+       if (wq && waitqueue_active(wq)) {
                dprintk("RPC svc_write_space: someone sleeping on %p\n",
                       svsk);
-               wake_up_interruptible(sk_sleep(sk));
+               wake_up_interruptible(wq);
        }
 }
 
@@ -739,6 +741,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
 static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq;
 
        dprintk("svc: socket %p TCP (listen) state change %d\n",
                sk, sk->sk_state);
@@ -761,8 +764,9 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
                        printk("svc: socket %p: no user data\n", sk);
        }
 
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible_all(sk_sleep(sk));
+       wq = sk_sleep(sk);
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible_all(wq);
 }
 
 /*
@@ -771,6 +775,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
 static void svc_tcp_state_change(struct sock *sk)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
                sk, sk->sk_state, sk->sk_user_data);
@@ -781,13 +786,14 @@ static void svc_tcp_state_change(struct sock *sk)
                set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible_all(sk_sleep(sk));
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible_all(wq);
 }
 
 static void svc_tcp_data_ready(struct sock *sk, int count)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+       wait_queue_head_t *wq = sk_sleep(sk);
 
        dprintk("svc: socket %p TCP data ready (svsk %p)\n",
                sk, sk->sk_user_data);
@@ -795,8 +801,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
                set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible(sk_sleep(sk));
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible(wq);
 }
 
 /*
@@ -1531,6 +1537,7 @@ static void svc_sock_detach(struct svc_xprt *xprt)
 {
        struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
        struct sock *sk = svsk->sk_sk;
+       wait_queue_head_t *wq;
 
        dprintk("svc: svc_sock_detach(%p)\n", svsk);
 
@@ -1539,8 +1546,9 @@ static void svc_sock_detach(struct svc_xprt *xprt)
        sk->sk_data_ready = svsk->sk_odata;
        sk->sk_write_space = svsk->sk_owspace;
 
-       if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-               wake_up_interruptible(sk_sleep(sk));
+       wq = sk_sleep(sk);
+       if (wq && waitqueue_active(wq))
+               wake_up_interruptible(wq);
 }
 
 /*
index 70ab5ef487666b3e73b31eb5ba371e0814098afa..7dc1dc7151ea876a4baef3afa547ad1f098523c6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2004-2006, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,8 +61,8 @@
  */
 
 struct bcbearer_pair {
-       struct bearer *primary;
-       struct bearer *secondary;
+       struct tipc_bearer *primary;
+       struct tipc_bearer *secondary;
 };
 
 /**
@@ -81,7 +81,7 @@ struct bcbearer_pair {
  */
 
 struct bcbearer {
-       struct bearer bearer;
+       struct tipc_bearer bearer;
        struct media media;
        struct bcbearer_pair bpairs[MAX_BEARERS];
        struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
@@ -93,6 +93,7 @@ struct bcbearer {
  * struct bclink - link used for broadcast messages
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
+ * @retransmit_to: node that most recently requested a retransmit
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
@@ -100,6 +101,7 @@ struct bcbearer {
 struct bclink {
        struct link link;
        struct tipc_node node;
+       struct tipc_node *retransmit_to;
 };
 
 
@@ -183,6 +185,17 @@ static int bclink_ack_allowed(u32 n)
 }
 
 
+/**
+ * tipc_bclink_retransmit_to - get most recent node to request retransmission
+ *
+ * Called with bc_lock locked
+ */
+
+struct tipc_node *tipc_bclink_retransmit_to(void)
+{
+       return bclink->retransmit_to;
+}
+
 /**
  * bclink_retransmit_pkt - retransmit broadcast packets
  * @after: sequence number of last packet to *not* retransmit
@@ -285,6 +298,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
                msg = buf_msg(buf);
                tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
                         INT_H_SIZE, n_ptr->addr);
+               msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tipc_net_id);
                msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
                msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
@@ -405,8 +419,6 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
        else
                bclink_set_last_sent();
 
-       if (bcl->out_queue_size > bcl->stats.max_queue_sz)
-               bcl->stats.max_queue_sz = bcl->out_queue_size;
        bcl->stats.queue_sz_counts++;
        bcl->stats.accu_queue_sz += bcl->out_queue_size;
 
@@ -444,10 +456,9 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
                        tipc_node_unlock(node);
                        spin_lock_bh(&bc_lock);
                        bcl->stats.recv_nacks++;
-                       bcl->owner->next = node;   /* remember requestor */
+                       bclink->retransmit_to = node;
                        bclink_retransmit_pkt(msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       bcl->owner->next = NULL;
                        spin_unlock_bh(&bc_lock);
                } else {
                        tipc_bclink_peek_nack(msg_destnode(msg),
@@ -574,8 +585,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
        bcbearer->remains = tipc_bcast_nmap;
 
        for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
-               struct bearer *p = bcbearer->bpairs[bp_index].primary;
-               struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+               struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
+               struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
 
                if (!p)
                        break;  /* no more bearers to try */
@@ -584,11 +595,11 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
                if (bcbearer->remains_new.count == bcbearer->remains.count)
                        continue;       /* bearer pair doesn't add anything */
 
-               if (p->publ.blocked ||
-                   p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+               if (p->blocked ||
+                   p->media->send_msg(buf, p, &p->media->bcast_addr)) {
                        /* unable to send on primary bearer */
-                       if (!s || s->publ.blocked ||
-                           s->media->send_msg(buf, &s->publ,
+                       if (!s || s->blocked ||
+                           s->media->send_msg(buf, s,
                                               &s->media->bcast_addr)) {
                                /* unable to send on either bearer */
                                continue;
@@ -633,7 +644,7 @@ void tipc_bcbearer_sort(void)
        memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
 
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               struct bearer *b = &tipc_bearers[b_index];
+               struct tipc_bearer *b = &tipc_bearers[b_index];
 
                if (!b->active || !b->nodes.count)
                        continue;
@@ -682,12 +693,12 @@ void tipc_bcbearer_sort(void)
 
 void tipc_bcbearer_push(void)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
 
        spin_lock_bh(&bc_lock);
        b_ptr = &bcbearer->bearer;
-       if (b_ptr->publ.blocked) {
-               b_ptr->publ.blocked = 0;
+       if (b_ptr->blocked) {
+               b_ptr->blocked = 0;
                tipc_bearer_lock_push(b_ptr);
        }
        spin_unlock_bh(&bc_lock);
index 51f8c5326ce62a5029b24963bead9ef2f2e3f33d..500c97f1c8596b07386689e7448c4fb9bdc23a7a 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/bcast.h: Include file for TIPC broadcast code
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,7 @@ void tipc_port_list_free(struct port_list *pl_ptr);
 
 int  tipc_bclink_init(void);
 void tipc_bclink_stop(void);
+struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
 int  tipc_bclink_send_msg(struct sk_buff *buf);
 void tipc_bclink_recv_pkt(struct sk_buff *buf);
index 837b7a467885735b3308e612ae07f0c6b3cbafdb..f2839b0f6b65c4e23bce2e8dcd49b55444ac9c55 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/bearer.c: TIPC bearer code
  *
  * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2004-2006, Wind River Systems
+ * Copyright (c) 2004-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,7 @@
 static struct media media_list[MAX_MEDIA];
 static u32 media_count;
 
-struct bearer tipc_bearers[MAX_BEARERS];
+struct tipc_bearer tipc_bearers[MAX_BEARERS];
 
 /**
  * media_name_valid - validate media name
@@ -278,13 +278,13 @@ static int bearer_name_validate(const char *name,
  * bearer_find - locates bearer object with matching bearer name
  */
 
-static struct bearer *bearer_find(const char *name)
+static struct tipc_bearer *bearer_find(const char *name)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
-               if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+               if (b_ptr->active && (!strcmp(b_ptr->name, name)))
                        return b_ptr;
        }
        return NULL;
@@ -294,16 +294,16 @@ static struct bearer *bearer_find(const char *name)
  * tipc_bearer_find_interface - locates bearer object with matching interface name
  */
 
-struct bearer *tipc_bearer_find_interface(const char *if_name)
+struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        char *b_if_name;
        u32 i;
 
        for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
                if (!b_ptr->active)
                        continue;
-               b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+               b_if_name = strchr(b_ptr->name, ':') + 1;
                if (!strcmp(b_if_name, if_name))
                        return b_ptr;
        }
@@ -318,7 +318,7 @@ struct sk_buff *tipc_bearer_get_names(void)
 {
        struct sk_buff *buf;
        struct media *m_ptr;
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        int i, j;
 
        buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
@@ -331,8 +331,8 @@ struct sk_buff *tipc_bearer_get_names(void)
                        b_ptr = &tipc_bearers[j];
                        if (b_ptr->active && (b_ptr->media == m_ptr)) {
                                tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
-                                                   b_ptr->publ.name,
-                                                   strlen(b_ptr->publ.name) + 1);
+                                                   b_ptr->name,
+                                                   strlen(b_ptr->name) + 1);
                        }
                }
        }
@@ -340,14 +340,14 @@ struct sk_buff *tipc_bearer_get_names(void)
        return buf;
 }
 
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
 {
        tipc_nmap_add(&b_ptr->nodes, dest);
        tipc_disc_update_link_req(b_ptr->link_req);
        tipc_bcbearer_sort();
 }
 
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
 {
        tipc_nmap_remove(&b_ptr->nodes, dest);
        tipc_disc_update_link_req(b_ptr->link_req);
@@ -362,12 +362,12 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
  * bearer.lock must be taken before calling
  * Returns binary true(1) ore false(0)
  */
-static int bearer_push(struct bearer *b_ptr)
+static int bearer_push(struct tipc_bearer *b_ptr)
 {
        u32 res = 0;
        struct link *ln, *tln;
 
-       if (b_ptr->publ.blocked)
+       if (b_ptr->blocked)
                return 0;
 
        while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
@@ -382,13 +382,13 @@ static int bearer_push(struct bearer *b_ptr)
        return list_empty(&b_ptr->cong_links);
 }
 
-void tipc_bearer_lock_push(struct bearer *b_ptr)
+void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
 {
        int res;
 
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        res = bearer_push(b_ptr);
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
        if (res)
                tipc_bcbearer_push();
 }
@@ -398,16 +398,14 @@ void tipc_bearer_lock_push(struct bearer *b_ptr)
  * Interrupt enabling new requests after bearer congestion or blocking:
  * See bearer_send().
  */
-void tipc_continue(struct tipc_bearer *tb_ptr)
+void tipc_continue(struct tipc_bearer *b_ptr)
 {
-       struct bearer *b_ptr = (struct bearer *)tb_ptr;
-
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        b_ptr->continue_count++;
        if (!list_empty(&b_ptr->cong_links))
                tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
-       b_ptr->publ.blocked = 0;
-       spin_unlock_bh(&b_ptr->publ.lock);
+       b_ptr->blocked = 0;
+       spin_unlock_bh(&b_ptr->lock);
 }
 
 /*
@@ -418,7 +416,7 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
  * bearer.lock is busy
  */
 
-static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
        list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
 }
@@ -431,11 +429,11 @@ static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_p
  * bearer.lock is free
  */
 
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
 }
 
 
@@ -444,18 +442,18 @@ void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
  * and if there is, try to resolve it before returning.
  * 'tipc_net_lock' is read_locked when this function is called
  */
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
        int res = 1;
 
        if (list_empty(&b_ptr->cong_links))
                return 1;
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        if (!bearer_push(b_ptr)) {
                tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
                res = 0;
        }
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
        return res;
 }
 
@@ -463,9 +461,9 @@ int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
  * tipc_bearer_congested - determines if bearer is currently congested
  */
 
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
 {
-       if (unlikely(b_ptr->publ.blocked))
+       if (unlikely(b_ptr->blocked))
                return 1;
        if (likely(list_empty(&b_ptr->cong_links)))
                return 0;
@@ -478,7 +476,7 @@ int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
 
 int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        struct media *m_ptr;
        struct bearer_name b_name;
        char addr_string[16];
@@ -528,7 +526,7 @@ restart:
                        bearer_id = i;
                        continue;
                }
-               if (!strcmp(name, tipc_bearers[i].publ.name)) {
+               if (!strcmp(name, tipc_bearers[i].name)) {
                        warn("Bearer <%s> rejected, already enabled\n", name);
                        goto failed;
                }
@@ -551,8 +549,8 @@ restart:
        }
 
        b_ptr = &tipc_bearers[bearer_id];
-       strcpy(b_ptr->publ.name, name);
-       res = m_ptr->enable_bearer(&b_ptr->publ);
+       strcpy(b_ptr->name, name);
+       res = m_ptr->enable_bearer(b_ptr);
        if (res) {
                warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
                goto failed;
@@ -568,9 +566,9 @@ restart:
        INIT_LIST_HEAD(&b_ptr->links);
        if (m_ptr->bcast) {
                b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
-                                                         bcast_scope, 2);
+                                                         bcast_scope);
        }
-       spin_lock_init(&b_ptr->publ.lock);
+       spin_lock_init(&b_ptr->lock);
        write_unlock_bh(&tipc_net_lock);
        info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
             name, tipc_addr_string_fill(addr_string, bcast_scope), priority);
@@ -587,7 +585,7 @@ failed:
 
 int tipc_block_bearer(const char *name)
 {
-       struct bearer *b_ptr = NULL;
+       struct tipc_bearer *b_ptr = NULL;
        struct link *l_ptr;
        struct link *temp_l_ptr;
 
@@ -600,8 +598,8 @@ int tipc_block_bearer(const char *name)
        }
 
        info("Blocking bearer <%s>\n", name);
-       spin_lock_bh(&b_ptr->publ.lock);
-       b_ptr->publ.blocked = 1;
+       spin_lock_bh(&b_ptr->lock);
+       b_ptr->blocked = 1;
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                struct tipc_node *n_ptr = l_ptr->owner;
 
@@ -609,7 +607,7 @@ int tipc_block_bearer(const char *name)
                tipc_link_reset(l_ptr);
                spin_unlock_bh(&n_ptr->lock);
        }
-       spin_unlock_bh(&b_ptr->publ.lock);
+       spin_unlock_bh(&b_ptr->lock);
        read_unlock_bh(&tipc_net_lock);
        return 0;
 }
@@ -620,27 +618,27 @@ int tipc_block_bearer(const char *name)
  * Note: This routine assumes caller holds tipc_net_lock.
  */
 
-static void bearer_disable(struct bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr)
 {
        struct link *l_ptr;
        struct link *temp_l_ptr;
 
-       info("Disabling bearer <%s>\n", b_ptr->publ.name);
+       info("Disabling bearer <%s>\n", b_ptr->name);
        tipc_disc_stop_link_req(b_ptr->link_req);
-       spin_lock_bh(&b_ptr->publ.lock);
+       spin_lock_bh(&b_ptr->lock);
        b_ptr->link_req = NULL;
-       b_ptr->publ.blocked = 1;
-       b_ptr->media->disable_bearer(&b_ptr->publ);
+       b_ptr->blocked = 1;
+       b_ptr->media->disable_bearer(b_ptr);
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
-       spin_unlock_bh(&b_ptr->publ.lock);
-       memset(b_ptr, 0, sizeof(struct bearer));
+       spin_unlock_bh(&b_ptr->lock);
+       memset(b_ptr, 0, sizeof(struct tipc_bearer));
 }
 
 int tipc_disable_bearer(const char *name)
 {
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        int res;
 
        write_lock_bh(&tipc_net_lock);
index 85f451d5aacf565e603592d6aed78267054d19fa..255dea64f7bd02cb0fb70700cd71885e7262e057 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/bearer.h: Include file for TIPC bearer code
  *
  * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,26 +61,7 @@ struct tipc_media_addr {
        } dev_addr;
 };
 
-/**
- * struct tipc_bearer - TIPC bearer info available to media code
- * @usr_handle: pointer to additional media-specific information about bearer
- * @mtu: max packet size bearer can support
- * @blocked: non-zero if bearer is blocked
- * @lock: spinlock for controlling access to bearer
- * @addr: media-specific address associated with bearer
- * @name: bearer name (format = media:interface)
- *
- * Note: TIPC initializes "name" and "lock" fields; media code is responsible
- * for initialization all other fields when a bearer is enabled.
- */
-struct tipc_bearer {
-       void *usr_handle;
-       u32 mtu;
-       int blocked;
-       spinlock_t lock;
-       struct tipc_media_addr addr;
-       char name[TIPC_MAX_BEARER_NAME];
-};
+struct tipc_bearer;
 
 /**
  * struct media - TIPC media information available to internal users
@@ -115,8 +96,13 @@ struct media {
 };
 
 /**
- * struct bearer - TIPC bearer information available to internal users
- * @publ: bearer information available to privileged users
+ * struct tipc_bearer - TIPC bearer structure
+ * @usr_handle: pointer to additional media-specific information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
  * @priority: default link priority for bearer
  * @detect_scope: network address mask used during automatic link creation
@@ -128,10 +114,18 @@ struct media {
  * @active: non-zero if bearer structure is represents a bearer
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
  * @nodes: indicates which nodes in cluster can be reached through bearer
+ *
+ * Note: media-specific code is responsible for initialization of the fields
+ * indicated below when a bearer is enabled; TIPC's generic bearer code takes
+ * care of initializing all other fields.
  */
-
-struct bearer {
-       struct tipc_bearer publ;
+struct tipc_bearer {
+       void *usr_handle;                       /* initalized by media */
+       u32 mtu;                                /* initalized by media */
+       int blocked;                            /* initalized by media */
+       struct tipc_media_addr addr;            /* initalized by media */
+       char name[TIPC_MAX_BEARER_NAME];
+       spinlock_t lock;
        struct media *media;
        u32 priority;
        u32 detect_scope;
@@ -152,7 +146,7 @@ struct bearer_name {
 
 struct link;
 
-extern struct bearer tipc_bearers[];
+extern struct tipc_bearer tipc_bearers[];
 
 /*
  * TIPC routines available to supported media types
@@ -186,14 +180,14 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
 
 struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
-struct bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr);
+struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr);
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr);
 void tipc_bearer_stop(void);
-void tipc_bearer_lock_push(struct bearer *b_ptr);
+void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
 
 
 /**
@@ -214,10 +208,11 @@ void tipc_bearer_lock_push(struct bearer *b_ptr);
  * and let TIPC's link code deal with the undelivered message.
  */
 
-static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
+                                  struct sk_buff *buf,
                                   struct tipc_media_addr *dest)
 {
-       return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+       return !b_ptr->media->send_msg(buf, b_ptr, dest);
 }
 
 #endif /* _TIPC_BEARER_H */
index e071579e08503c35aa22b6feed83fa8200fb08e2..2da1fc75ad6576a392bb6aba7bdf9fe7197fb6e3 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/core.c: TIPC module code
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,6 @@
 
 int tipc_mode = TIPC_NOT_RUNNING;
 int tipc_random;
-atomic_t tipc_user_count = ATOMIC_INIT(0);
 
 const char tipc_alphabet[] =
        "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
index 997158546e257d912791d172359483a96fd38b69..37544d9f73e18e381ec4cef5503a561c0effff0f 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/core.h: Include file for TIPC global declarations
  *
  * Copyright (c) 2005-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -161,7 +161,6 @@ extern int tipc_remote_management;
 extern int tipc_mode;
 extern int tipc_random;
 extern const char tipc_alphabet[];
-extern atomic_t tipc_user_count;
 
 
 /*
index fa026bd91a68e88f7e8e1125dd232cf27e628316..09ce2318b89e5f0dab2cee89dbcbf89b15582122 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/discover.c
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@
  * @timer_intv: current interval between requests (in ms)
  */
 struct link_req {
-       struct bearer *bearer;
+       struct tipc_bearer *bearer;
        struct tipc_media_addr dest;
        struct sk_buff *buf;
        struct timer_list timer;
@@ -67,15 +67,13 @@ struct link_req {
 /**
  * tipc_disc_init_msg - initialize a link setup message
  * @type: message type (request or response)
- * @req_links: number of links associated with message
  * @dest_domain: network domain of node(s) which should respond to message
  * @b_ptr: ptr to bearer issuing message
  */
 
 static struct sk_buff *tipc_disc_init_msg(u32 type,
-                                         u32 req_links,
                                          u32 dest_domain,
-                                         struct bearer *b_ptr)
+                                         struct tipc_bearer *b_ptr)
 {
        struct sk_buff *buf = tipc_buf_acquire(DSC_H_SIZE);
        struct tipc_msg *msg;
@@ -84,10 +82,9 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
                msg = buf_msg(buf);
                tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
                msg_set_non_seq(msg, 1);
-               msg_set_req_links(msg, req_links);
                msg_set_dest_domain(msg, dest_domain);
                msg_set_bc_netid(msg, tipc_net_id);
-               msg_set_media_addr(msg, &b_ptr->publ.addr);
+               msg_set_media_addr(msg, &b_ptr->addr);
        }
        return buf;
 }
@@ -99,7 +96,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
  * @media_addr: media address advertised by duplicated node
  */
 
-static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
+static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
                            struct tipc_media_addr *media_addr)
 {
        char node_addr_str[16];
@@ -111,7 +108,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
        tipc_media_addr_printf(&pb, media_addr);
        tipc_printbuf_validate(&pb);
        warn("Duplicate %s using %s seen on <%s>\n",
-            node_addr_str, media_addr_str, b_ptr->publ.name);
+            node_addr_str, media_addr_str, b_ptr->name);
 }
 
 /**
@@ -120,7 +117,7 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
  * @b_ptr: bearer that message arrived on
  */
 
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
+void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 {
        struct link *link;
        struct tipc_media_addr media_addr;
@@ -140,7 +137,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
        if (!tipc_addr_node_valid(orig))
                return;
        if (orig == tipc_own_addr) {
-               if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
+               if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
                        disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
                return;
        }
@@ -191,9 +188,9 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
                spin_unlock_bh(&n_ptr->lock);
                if ((type == DSC_RESP_MSG) || link_fully_up)
                        return;
-               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
                if (rbuf != NULL) {
-                       b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+                       b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
                        buf_discard(rbuf);
                }
        }
@@ -249,9 +246,9 @@ void tipc_disc_update_link_req(struct link_req *req)
 
 static void disc_timeout(struct link_req *req)
 {
-       spin_lock_bh(&req->bearer->publ.lock);
+       spin_lock_bh(&req->bearer->lock);
 
-       req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+       req->bearer->media->send_msg(req->buf, req->bearer, &req->dest);
 
        if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
            (req->timer_intv == TIPC_LINK_REQ_FAST)) {
@@ -266,7 +263,7 @@ static void disc_timeout(struct link_req *req)
        }
        k_start_timer(&req->timer, req->timer_intv);
 
-       spin_unlock_bh(&req->bearer->publ.lock);
+       spin_unlock_bh(&req->bearer->lock);
 }
 
 /**
@@ -274,15 +271,13 @@ static void disc_timeout(struct link_req *req)
  * @b_ptr: ptr to bearer issuing requests
  * @dest: destination address for request messages
  * @dest_domain: network domain of node(s) which should respond to message
- * @req_links: max number of desired links
  *
  * Returns pointer to link request structure, or NULL if unable to create.
  */
 
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
+struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr,
                                         const struct tipc_media_addr *dest,
-                                        u32 dest_domain,
-                                        u32 req_links)
+                                        u32 dest_domain)
 {
        struct link_req *req;
 
@@ -290,7 +285,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
        if (!req)
                return NULL;
 
-       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
        if (!req->buf) {
                kfree(req);
                return NULL;
index d2c3cffb79fc8303d5ef2c5d3c7ad95783089a72..e48a167e47b25d8afea6569e7bb31a6ec1057c6e 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/discover.h
  *
  * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 struct link_req;
 
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
+struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr,
                                         const struct tipc_media_addr *dest,
-                                        u32 dest_domain,
-                                        u32 req_links);
+                                        u32 dest_domain);
 void tipc_disc_update_link_req(struct link_req *req);
 void tipc_disc_stop_link_req(struct link_req *req);
 
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
+void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
 
 #endif
index 18702f58d111879112b7066cdd27d3aea9d40ea5..89fbb6d6e956cacfe202a0755eaf277a7ba292e3 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/link.c: TIPC link code
  *
  * Copyright (c) 1996-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -90,7 +90,7 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
-static int  link_send_sections_long(struct port *sender,
+static int  link_send_sections_long(struct tipc_port *sender,
                                    struct iovec const *msg_sect,
                                    u32 num_sect, u32 destnode);
 static void link_check_defragm_bufs(struct link *l_ptr);
@@ -113,7 +113,7 @@ static void link_init_max_pkt(struct link *l_ptr)
 {
        u32 max_pkt;
 
-       max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+       max_pkt = (l_ptr->b_ptr->mtu & ~3);
        if (max_pkt > MAX_MSG_SIZE)
                max_pkt = MAX_MSG_SIZE;
 
@@ -246,9 +246,6 @@ static void link_timeout(struct link *l_ptr)
        l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
        l_ptr->stats.queue_sz_counts++;
 
-       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
-               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
-
        if (l_ptr->first_out) {
                struct tipc_msg *msg = buf_msg(l_ptr->first_out);
                u32 length = msg_size(msg);
@@ -303,7 +300,7 @@ static void link_set_timer(struct link *l_ptr, u32 time)
  * Returns pointer to link.
  */
 
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct link *tipc_link_create(struct tipc_bearer *b_ptr, const u32 peer,
                              const struct tipc_media_addr *media_addr)
 {
        struct link *l_ptr;
@@ -317,7 +314,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
        }
 
        l_ptr->addr = peer;
-       if_name = strchr(b_ptr->publ.name, ':') + 1;
+       if_name = strchr(b_ptr->name, ':') + 1;
        sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
                tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
                tipc_node(tipc_own_addr),
@@ -391,7 +388,9 @@ void tipc_link_delete(struct link *l_ptr)
 
 static void link_start(struct link *l_ptr)
 {
+       tipc_node_lock(l_ptr->owner);
        link_state_event(l_ptr, STARTING_EVT);
+       tipc_node_unlock(l_ptr->owner);
 }
 
 /**
@@ -406,7 +405,7 @@ static void link_start(struct link *l_ptr)
 
 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        spin_lock_bh(&tipc_port_list_lock);
        p_ptr = tipc_port_lock(origport);
@@ -415,7 +414,7 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
                        goto exit;
                if (!list_empty(&p_ptr->wait_list))
                        goto exit;
-               p_ptr->publ.congested = 1;
+               p_ptr->congested = 1;
                p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
                list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
                l_ptr->stats.link_congs++;
@@ -428,8 +427,8 @@ exit:
 
 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
 {
-       struct port *p_ptr;
-       struct port *temp_p_ptr;
+       struct tipc_port *p_ptr;
+       struct tipc_port *temp_p_ptr;
        int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 
        if (all)
@@ -445,11 +444,11 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
                if (win <= 0)
                        break;
                list_del_init(&p_ptr->wait_list);
-               spin_lock_bh(p_ptr->publ.lock);
-               p_ptr->publ.congested = 0;
-               p_ptr->wakeup(&p_ptr->publ);
+               spin_lock_bh(p_ptr->lock);
+               p_ptr->congested = 0;
+               p_ptr->wakeup(p_ptr);
                win -= p_ptr->waiting_pkts;
-               spin_unlock_bh(p_ptr->publ.lock);
+               spin_unlock_bh(p_ptr->lock);
        }
 
 exit:
@@ -824,7 +823,10 @@ static void link_add_to_outqueue(struct link *l_ptr,
                l_ptr->last_out = buf;
        } else
                l_ptr->first_out = l_ptr->last_out = buf;
+
        l_ptr->out_queue_size++;
+       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
 }
 
 /*
@@ -867,9 +869,6 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
 
        /* Packet can be queued or sent: */
 
-       if (queue_size > l_ptr->stats.max_queue_sz)
-               l_ptr->stats.max_queue_sz = queue_size;
-
        if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
                   !link_congested(l_ptr))) {
                link_add_to_outqueue(l_ptr, buf, msg);
@@ -1027,12 +1026,12 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
  * except for total message length.
  * Returns user data length or errno.
  */
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
                                 const u32 num_sect,
                                 u32 destaddr)
 {
-       struct tipc_msg *hdr = &sender->publ.phdr;
+       struct tipc_msg *hdr = &sender->phdr;
        struct link *l_ptr;
        struct sk_buff *buf;
        struct tipc_node *node;
@@ -1045,7 +1044,7 @@ again:
         * (Must not hold any locks while building message.)
         */
 
-       res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
+       res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
                        !sender->user_port, &buf);
 
        read_lock_bh(&tipc_net_lock);
@@ -1056,7 +1055,7 @@ again:
                if (likely(l_ptr)) {
                        if (likely(buf)) {
                                res = link_send_buf_fast(l_ptr, buf,
-                                                        &sender->publ.max_pkt);
+                                                        &sender->max_pkt);
                                if (unlikely(res < 0))
                                        buf_discard(buf);
 exit:
@@ -1075,7 +1074,7 @@ exit:
                        if (link_congested(l_ptr) ||
                            !list_empty(&l_ptr->b_ptr->cong_links)) {
                                res = link_schedule_port(l_ptr,
-                                                        sender->publ.ref, res);
+                                                        sender->ref, res);
                                goto exit;
                        }
 
@@ -1084,12 +1083,12 @@ exit:
                         * then re-try fast path or fragment the message
                         */
 
-                       sender->publ.max_pkt = l_ptr->max_pkt;
+                       sender->max_pkt = l_ptr->max_pkt;
                        tipc_node_unlock(node);
                        read_unlock_bh(&tipc_net_lock);
 
 
-                       if ((msg_hdr_sz(hdr) + res) <= sender->publ.max_pkt)
+                       if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
                                goto again;
 
                        return link_send_sections_long(sender, msg_sect,
@@ -1123,14 +1122,14 @@ exit:
  *
  * Returns user data length or errno.
  */
-static int link_send_sections_long(struct port *sender,
+static int link_send_sections_long(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
                                   u32 num_sect,
                                   u32 destaddr)
 {
        struct link *l_ptr;
        struct tipc_node *node;
-       struct tipc_msg *hdr = &sender->publ.phdr;
+       struct tipc_msg *hdr = &sender->phdr;
        u32 dsz = msg_data_sz(hdr);
        u32 max_pkt, fragm_sz, rest;
        struct tipc_msg fragm_hdr;
@@ -1142,7 +1141,7 @@ static int link_send_sections_long(struct port *sender,
 
 again:
        fragm_no = 1;
-       max_pkt = sender->publ.max_pkt - INT_H_SIZE;
+       max_pkt = sender->max_pkt - INT_H_SIZE;
                /* leave room for tunnel header in case of link changeover */
        fragm_sz = max_pkt - INT_H_SIZE;
                /* leave room for fragmentation header in each fragment */
@@ -1157,7 +1156,7 @@ again:
 
        tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
                 INT_H_SIZE, msg_destnode(hdr));
-       msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+       msg_set_link_selector(&fragm_hdr, sender->ref);
        msg_set_size(&fragm_hdr, max_pkt);
        msg_set_fragm_no(&fragm_hdr, 1);
 
@@ -1238,13 +1237,13 @@ error:
        node = tipc_node_find(destaddr);
        if (likely(node)) {
                tipc_node_lock(node);
-               l_ptr = node->active_links[sender->publ.ref & 1];
+               l_ptr = node->active_links[sender->ref & 1];
                if (!l_ptr) {
                        tipc_node_unlock(node);
                        goto reject;
                }
                if (l_ptr->max_pkt < max_pkt) {
-                       sender->publ.max_pkt = l_ptr->max_pkt;
+                       sender->max_pkt = l_ptr->max_pkt;
                        tipc_node_unlock(node);
                        for (; buf_chain; buf_chain = buf) {
                                buf = buf_chain->next;
@@ -1441,7 +1440,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
                info("Outstanding acks: %lu\n",
                     (unsigned long) TIPC_SKB_CB(buf)->handle);
 
-               n_ptr = l_ptr->owner->next;
+               n_ptr = tipc_bclink_retransmit_to();
                tipc_node_lock(n_ptr);
 
                tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -1595,11 +1594,10 @@ static int link_recv_buf_validate(struct sk_buff *buf)
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
 
-void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
 {
        read_lock_bh(&tipc_net_lock);
        while (head) {
-               struct bearer *b_ptr = (struct bearer *)tb_ptr;
                struct tipc_node *n_ptr;
                struct link *l_ptr;
                struct sk_buff *crs;
@@ -1950,6 +1948,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
                msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
                msg_set_seq_gap(msg, 0);
                msg_set_next_sent(msg, 1);
+               msg_set_probe(msg, 0);
                msg_set_link_tolerance(msg, l_ptr->tolerance);
                msg_set_linkprio(msg, l_ptr->priority);
                msg_set_max_pkt(msg, l_ptr->max_pkt_target);
@@ -2618,6 +2617,9 @@ static void link_check_defragm_bufs(struct link *l_ptr)
 
 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
 {
+       if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
+               return;
+
        l_ptr->tolerance = tolerance;
        l_ptr->continuity_interval =
                ((tolerance / 4) > 500) ? 500 : tolerance / 4;
@@ -2658,7 +2660,7 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
 static struct link *link_find_link(const char *name, struct tipc_node **node)
 {
        struct link_name link_name_parts;
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        struct link *l_ptr;
 
        if (!link_name_validate(name, &link_name_parts))
@@ -2961,7 +2963,7 @@ static void link_print(struct link *l_ptr, const char *str)
 
        tipc_printf(buf, str);
        tipc_printf(buf, "Link %x<%s>:",
-                   l_ptr->addr, l_ptr->b_ptr->publ.name);
+                   l_ptr->addr, l_ptr->b_ptr->name);
 
 #ifdef CONFIG_TIPC_DEBUG
        if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
@@ -2981,9 +2983,9 @@ static void link_print(struct link *l_ptr, const char *str)
                     != (l_ptr->out_queue_size - 1)) ||
                    (l_ptr->last_out->next != NULL)) {
                        tipc_printf(buf, "\nSend queue inconsistency\n");
-                       tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
-                       tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
-                       tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
+                       tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
+                       tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
+                       tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
                }
        } else
                tipc_printf(buf, "[]");
index 70967e637027925713571b9718c1c05a8bcedb0d..a7794e7ede291b7143f63b7707ad7a02982f73c0 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/link.h: Include file for TIPC link code
  *
  * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@ struct link {
        u32 checkpoint;
        u32 peer_session;
        u32 peer_bearer_id;
-       struct bearer *b_ptr;
+       struct tipc_bearer *b_ptr;
        u32 tolerance;
        u32 continuity_interval;
        u32 abort_limit;
@@ -196,24 +196,18 @@ struct link {
                u32 bearer_congs;
                u32 deferred_recv;
                u32 duplicates;
-
-               /* for statistical profiling of send queue size */
-
-               u32 max_queue_sz;
-               u32 accu_queue_sz;
-               u32 queue_sz_counts;
-
-               /* for statistical profiling of message lengths */
-
-               u32 msg_length_counts;
-               u32 msg_lengths_total;
-               u32 msg_length_profile[7];
+               u32 max_queue_sz;       /* send queue size high water mark */
+               u32 accu_queue_sz;      /* used for send queue size profiling */
+               u32 queue_sz_counts;    /* used for send queue size profiling */
+               u32 msg_length_counts;  /* used for message length profiling */
+               u32 msg_lengths_total;  /* used for message length profiling */
+               u32 msg_length_profile[7]; /* used for msg. length profiling */
        } stats;
 };
 
-struct port;
+struct tipc_port;
 
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct link *tipc_link_create(struct tipc_bearer *b_ptr, const u32 peer,
                              const struct tipc_media_addr *media_addr);
 void tipc_link_delete(struct link *l_ptr);
 void tipc_link_changeover(struct link *l_ptr);
@@ -230,7 +224,7 @@ void tipc_link_reset(struct link *l_ptr);
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
 int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_send_sections_fast(struct port *sender,
+int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
                                 const u32 num_sect,
                                 u32 destnode);
index bb6180c4fcbb9b5121e575492a82c1c66c9b2207..0787e12423b8a345cf2a4a925db2f34be6840ba7 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/msg.c: TIPC message header routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -381,20 +381,15 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                        tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
                        tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
                }
-               if (msg_routed(msg) && !msg_non_seq(msg))
-                       tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
        }
        if (msg_user(msg) == NAME_DISTRIBUTOR) {
                tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
                tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
-               if (msg_routed(msg))
-                       tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
        }
 
        if (msg_user(msg) ==  LINK_CONFIG) {
                u32 *raw = (u32 *)msg;
                struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
-               tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
                tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
                tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
                tipc_media_addr_printf(buf, orig);
index 92c4c4fd7b3f508c611618e0b4d02929775e6e36..9d643a1b7d227180aa3213001db70a4bc8f2e838 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/msg.h: Include file for TIPC message header routines
  *
  * Copyright (c) 2000-2007, Ericsson AB
- * Copyright (c) 2005-2008, Wind River Systems
+ * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -438,11 +438,6 @@ static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
        msg_set_word(m, 8, n);
 }
 
-static inline u32 msg_transp_seqno(struct tipc_msg *m)
-{
-       return msg_word(m, 8);
-}
-
 static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
 {
        msg_set_word(m, 8, n);
@@ -453,11 +448,6 @@ static inline u32 msg_timestamp(struct tipc_msg *m)
        return msg_word(m, 8);
 }
 
-static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
-{
-       msg_set_word(m, 8, n);
-}
-
 static inline u32 msg_nameinst(struct tipc_msg *m)
 {
        return msg_word(m, 9);
@@ -577,16 +567,6 @@ static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 1, 16, 0x1fff, n);
 }
 
-static inline u32 msg_req_links(struct tipc_msg *m)
-{
-       return msg_bits(m, 1, 16, 0xfff);
-}
-
-static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
-{
-       msg_set_bits(m, 1, 16, 0xfff, n);
-}
-
 
 /*
  * Word 2
index 3af53e327f497e7d66d50c39fff00a19715a86e8..e4dba1dfb6ea8ec0d54164f1767417213bc443b9 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node.c: TIPC node management routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -238,7 +238,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
                        return n_ptr;
                }
                err("Attempt to establish second link on <%s> to %s\n",
-                   l_ptr->b_ptr->publ.name,
+                   l_ptr->b_ptr->name,
                    tipc_addr_string_fill(addr_string, l_ptr->addr));
        }
        return NULL;
index 067bab2a0b982864d70cf365d6ac3fbfe522568f..6ff78f9c7d65a83623e870aa16522c3ca6219c3c 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/port.c: TIPC port code
  *
  * Copyright (c) 1992-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -54,33 +54,19 @@ static DEFINE_SPINLOCK(queue_lock);
 
 static LIST_HEAD(ports);
 static void port_handle_node_down(unsigned long ref);
-static struct sk_buff *port_build_self_abort_msg(struct port *, u32 err);
-static struct sk_buff *port_build_peer_abort_msg(struct port *, u32 err);
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
 static void port_timeout(unsigned long ref);
 
 
-static u32 port_peernode(struct port *p_ptr)
+static u32 port_peernode(struct tipc_port *p_ptr)
 {
-       return msg_destnode(&p_ptr->publ.phdr);
+       return msg_destnode(&p_ptr->phdr);
 }
 
-static u32 port_peerport(struct port *p_ptr)
+static u32 port_peerport(struct tipc_port *p_ptr)
 {
-       return msg_destport(&p_ptr->publ.phdr);
-}
-
-static u32 port_out_seqno(struct port *p_ptr)
-{
-       return msg_transp_seqno(&p_ptr->publ.phdr);
-}
-
-static void port_incr_out_seqno(struct port *p_ptr)
-{
-       struct tipc_msg *m = &p_ptr->publ.phdr;
-
-       if (likely(!msg_routed(m)))
-               return;
-       msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+       return msg_destport(&p_ptr->phdr);
 }
 
 /**
@@ -94,7 +80,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
        struct sk_buff *buf;
        struct sk_buff *ibuf = NULL;
        struct port_list dports = {0, NULL, };
-       struct port *oport = tipc_port_deref(ref);
+       struct tipc_port *oport = tipc_port_deref(ref);
        int ext_targets;
        int res;
 
@@ -103,7 +89,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
 
        /* Create multicast message */
 
-       hdr = &oport->publ.phdr;
+       hdr = &oport->phdr;
        msg_set_type(hdr, TIPC_MCAST_MSG);
        msg_set_nametype(hdr, seq->type);
        msg_set_namelower(hdr, seq->lower);
@@ -211,7 +197,7 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
                        void (*wakeup)(struct tipc_port *),
                        const u32 importance)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        u32 ref;
 
@@ -220,21 +206,19 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
                warn("Port creation failed, no memory\n");
                return NULL;
        }
-       ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
+       ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
        if (!ref) {
                warn("Port creation failed, reference table exhausted\n");
                kfree(p_ptr);
                return NULL;
        }
 
-       p_ptr->publ.usr_handle = usr_handle;
-       p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
-       p_ptr->publ.ref = ref;
-       msg = &p_ptr->publ.phdr;
+       p_ptr->usr_handle = usr_handle;
+       p_ptr->max_pkt = MAX_PKT_DEFAULT;
+       p_ptr->ref = ref;
+       msg = &p_ptr->phdr;
        tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
        msg_set_origport(msg, ref);
-       p_ptr->last_in_seqno = 41;
-       p_ptr->sent = 1;
        INIT_LIST_HEAD(&p_ptr->wait_list);
        INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
        p_ptr->dispatcher = dispatcher;
@@ -246,12 +230,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
        INIT_LIST_HEAD(&p_ptr->port_list);
        list_add_tail(&p_ptr->port_list, &ports);
        spin_unlock_bh(&tipc_port_list_lock);
-       return &(p_ptr->publ);
+       return p_ptr;
 }
 
 int tipc_deleteport(u32 ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
        tipc_withdraw(ref, 0, NULL);
@@ -263,7 +247,7 @@ int tipc_deleteport(u32 ref)
        tipc_port_unlock(p_ptr);
 
        k_cancel_timer(&p_ptr->timer);
-       if (p_ptr->publ.connected) {
+       if (p_ptr->connected) {
                buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
                tipc_nodesub_unsubscribe(&p_ptr->subscription);
        }
@@ -279,14 +263,14 @@ int tipc_deleteport(u32 ref)
        return 0;
 }
 
-static int port_unreliable(struct port *p_ptr)
+static int port_unreliable(struct tipc_port *p_ptr)
 {
-       return msg_src_droppable(&p_ptr->publ.phdr);
+       return msg_src_droppable(&p_ptr->phdr);
 }
 
 int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
@@ -298,24 +282,24 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
 
 int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
+       msg_set_src_droppable(&p_ptr->phdr, (isunreliable != 0));
        tipc_port_unlock(p_ptr);
        return 0;
 }
 
-static int port_unreturnable(struct port *p_ptr)
+static int port_unreturnable(struct tipc_port *p_ptr)
 {
-       return msg_dest_droppable(&p_ptr->publ.phdr);
+       return msg_dest_droppable(&p_ptr->phdr);
 }
 
 int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
@@ -327,12 +311,12 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
 
 int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
+       msg_set_dest_droppable(&p_ptr->phdr, (isunrejectable != 0));
        tipc_port_unlock(p_ptr);
        return 0;
 }
@@ -345,7 +329,7 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
 static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
                                            u32 origport, u32 orignode,
                                            u32 usr, u32 type, u32 err,
-                                           u32 seqno, u32 ack)
+                                           u32 ack)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -358,7 +342,6 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
                msg_set_destport(msg, destport);
                msg_set_origport(msg, origport);
                msg_set_orignode(msg, orignode);
-               msg_set_transp_seqno(msg, seqno);
                msg_set_msgcnt(msg, ack);
        }
        return buf;
@@ -413,10 +396,10 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        /* send self-abort message when rejecting on a connected port */
        if (msg_connected(msg)) {
                struct sk_buff *abuf = NULL;
-               struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+               struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
 
                if (p_ptr) {
-                       if (p_ptr->publ.connected)
+                       if (p_ptr->connected)
                                abuf = port_build_self_abort_msg(p_ptr, err);
                        tipc_port_unlock(p_ptr);
                }
@@ -429,7 +412,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        return data_sz;
 }
 
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
                              struct iovec const *msg_sect, u32 num_sect,
                              int err)
 {
@@ -446,13 +429,13 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
 
 static void port_timeout(unsigned long ref)
 {
-       struct port *p_ptr = tipc_port_lock(ref);
+       struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
 
        if (!p_ptr)
                return;
 
-       if (!p_ptr->publ.connected) {
+       if (!p_ptr->connected) {
                tipc_port_unlock(p_ptr);
                return;
        }
@@ -463,14 +446,12 @@ static void port_timeout(unsigned long ref)
        } else {
                buf = port_build_proto_msg(port_peerport(p_ptr),
                                           port_peernode(p_ptr),
-                                          p_ptr->publ.ref,
+                                          p_ptr->ref,
                                           tipc_own_addr,
                                           CONN_MANAGER,
                                           CONN_PROBE,
                                           TIPC_OK,
-                                          port_out_seqno(p_ptr),
                                           0);
-               port_incr_out_seqno(p_ptr);
                p_ptr->probing_state = PROBING;
                k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
        }
@@ -481,7 +462,7 @@ static void port_timeout(unsigned long ref)
 
 static void port_handle_node_down(unsigned long ref)
 {
-       struct port *p_ptr = tipc_port_lock(ref);
+       struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
 
        if (!p_ptr)
@@ -492,73 +473,71 @@ static void port_handle_node_down(unsigned long ref)
 }
 
 
-static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
 {
-       u32 imp = msg_importance(&p_ptr->publ.phdr);
+       u32 imp = msg_importance(&p_ptr->phdr);
 
-       if (!p_ptr->publ.connected)
+       if (!p_ptr->connected)
                return NULL;
        if (imp < TIPC_CRITICAL_IMPORTANCE)
                imp++;
-       return port_build_proto_msg(p_ptr->publ.ref,
+       return port_build_proto_msg(p_ptr->ref,
                                    tipc_own_addr,
                                    port_peerport(p_ptr),
                                    port_peernode(p_ptr),
                                    imp,
                                    TIPC_CONN_MSG,
                                    err,
-                                   p_ptr->last_in_seqno + 1,
                                    0);
 }
 
 
-static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
 {
-       u32 imp = msg_importance(&p_ptr->publ.phdr);
+       u32 imp = msg_importance(&p_ptr->phdr);
 
-       if (!p_ptr->publ.connected)
+       if (!p_ptr->connected)
                return NULL;
        if (imp < TIPC_CRITICAL_IMPORTANCE)
                imp++;
        return port_build_proto_msg(port_peerport(p_ptr),
                                    port_peernode(p_ptr),
-                                   p_ptr->publ.ref,
+                                   p_ptr->ref,
                                    tipc_own_addr,
                                    imp,
                                    TIPC_CONN_MSG,
                                    err,
-                                   port_out_seqno(p_ptr),
                                    0);
 }
 
 void tipc_port_recv_proto_msg(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
-       struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+       struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
        u32 err = TIPC_OK;
        struct sk_buff *r_buf = NULL;
        struct sk_buff *abort_buf = NULL;
 
        if (!p_ptr) {
                err = TIPC_ERR_NO_PORT;
-       } else if (p_ptr->publ.connected) {
+       } else if (p_ptr->connected) {
                if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
                    (port_peerport(p_ptr) != msg_origport(msg))) {
                        err = TIPC_ERR_NO_PORT;
                } else if (msg_type(msg) == CONN_ACK) {
                        int wakeup = tipc_port_congested(p_ptr) &&
-                                    p_ptr->publ.congested &&
+                                    p_ptr->congested &&
                                     p_ptr->wakeup;
                        p_ptr->acked += msg_msgcnt(msg);
                        if (tipc_port_congested(p_ptr))
                                goto exit;
-                       p_ptr->publ.congested = 0;
+                       p_ptr->congested = 0;
                        if (!wakeup)
                                goto exit;
-                       p_ptr->wakeup(&p_ptr->publ);
+                       p_ptr->wakeup(p_ptr);
                        goto exit;
                }
-       } else if (p_ptr->publ.published) {
+       } else if (p_ptr->published) {
                err = TIPC_ERR_NO_PORT;
        }
        if (err) {
@@ -569,7 +548,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
                                             TIPC_HIGH_IMPORTANCE,
                                             TIPC_CONN_MSG,
                                             err,
-                                            0,
                                             0);
                goto exit;
        }
@@ -583,11 +561,9 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
                                             CONN_MANAGER,
                                             CONN_PROBE_REPLY,
                                             TIPC_OK,
-                                            port_out_seqno(p_ptr),
                                             0);
        }
        p_ptr->probing_state = CONFIRMED;
-       port_incr_out_seqno(p_ptr);
 exit:
        if (p_ptr)
                tipc_port_unlock(p_ptr);
@@ -596,29 +572,29 @@ exit:
        buf_discard(buf);
 }
 
-static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
 {
        struct publication *publ;
 
        if (full_id)
                tipc_printf(buf, "<%u.%u.%u:%u>:",
                            tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
-                           tipc_node(tipc_own_addr), p_ptr->publ.ref);
+                           tipc_node(tipc_own_addr), p_ptr->ref);
        else
-               tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
+               tipc_printf(buf, "%-10u:", p_ptr->ref);
 
-       if (p_ptr->publ.connected) {
+       if (p_ptr->connected) {
                u32 dport = port_peerport(p_ptr);
                u32 destnode = port_peernode(p_ptr);
 
                tipc_printf(buf, " connected to <%u.%u.%u:%u>",
                            tipc_zone(destnode), tipc_cluster(destnode),
                            tipc_node(destnode), dport);
-               if (p_ptr->publ.conn_type != 0)
+               if (p_ptr->conn_type != 0)
                        tipc_printf(buf, " via {%u,%u}",
-                                   p_ptr->publ.conn_type,
-                                   p_ptr->publ.conn_instance);
-       } else if (p_ptr->publ.published) {
+                                   p_ptr->conn_type,
+                                   p_ptr->conn_instance);
+       } else if (p_ptr->published) {
                tipc_printf(buf, " bound to");
                list_for_each_entry(publ, &p_ptr->publications, pport_list) {
                        if (publ->lower == publ->upper)
@@ -639,7 +615,7 @@ struct sk_buff *tipc_port_get_ports(void)
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
        struct print_buf pb;
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        int str_len;
 
        buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
@@ -650,9 +626,9 @@ struct sk_buff *tipc_port_get_ports(void)
        tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
        spin_lock_bh(&tipc_port_list_lock);
        list_for_each_entry(p_ptr, &ports, port_list) {
-               spin_lock_bh(p_ptr->publ.lock);
+               spin_lock_bh(p_ptr->lock);
                port_print(p_ptr, &pb, 0);
-               spin_unlock_bh(p_ptr->publ.lock);
+               spin_unlock_bh(p_ptr->lock);
        }
        spin_unlock_bh(&tipc_port_list_lock);
        str_len = tipc_printbuf_validate(&pb);
@@ -665,12 +641,12 @@ struct sk_buff *tipc_port_get_ports(void)
 
 void tipc_port_reinit(void)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
 
        spin_lock_bh(&tipc_port_list_lock);
        list_for_each_entry(p_ptr, &ports, port_list) {
-               msg = &p_ptr->publ.phdr;
+               msg = &p_ptr->phdr;
                if (msg_orignode(msg) == tipc_own_addr)
                        break;
                msg_set_prevnode(msg, tipc_own_addr);
@@ -695,7 +671,7 @@ static void port_dispatcher_sigh(void *dummy)
        spin_unlock_bh(&queue_lock);
 
        while (buf) {
-               struct port *p_ptr;
+               struct tipc_port *p_ptr;
                struct user_port *up_ptr;
                struct tipc_portid orig;
                struct tipc_name_seq dseq;
@@ -720,8 +696,8 @@ static void port_dispatcher_sigh(void *dummy)
                orig.node = msg_orignode(msg);
                up_ptr = p_ptr->user_port;
                usr_handle = up_ptr->usr_handle;
-               connected = p_ptr->publ.connected;
-               published = p_ptr->publ.published;
+               connected = p_ptr->connected;
+               published = p_ptr->published;
 
                if (unlikely(msg_errcode(msg)))
                        goto err;
@@ -732,6 +708,7 @@ static void port_dispatcher_sigh(void *dummy)
                                tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
                                u32 peer_port = port_peerport(p_ptr);
                                u32 peer_node = port_peernode(p_ptr);
+                               u32 dsz;
 
                                tipc_port_unlock(p_ptr);
                                if (unlikely(!cb))
@@ -742,13 +719,14 @@ static void port_dispatcher_sigh(void *dummy)
                                } else if ((msg_origport(msg) != peer_port) ||
                                           (msg_orignode(msg) != peer_node))
                                        goto reject;
-                               if (unlikely(++p_ptr->publ.conn_unacked >=
-                                            TIPC_FLOW_CONTROL_WIN))
+                               dsz = msg_data_sz(msg);
+                               if (unlikely(dsz &&
+                                            (++p_ptr->conn_unacked >=
+                                             TIPC_FLOW_CONTROL_WIN)))
                                        tipc_acknowledge(dref,
-                                                        p_ptr->publ.conn_unacked);
+                                                        p_ptr->conn_unacked);
                                skb_pull(buf, msg_hdr_sz(msg));
-                               cb(usr_handle, dref, &buf, msg_data(msg),
-                                  msg_data_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg), dsz);
                                break;
                        }
                case TIPC_DIRECT_MSG:{
@@ -872,7 +850,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
 
 static void port_wakeup_sh(unsigned long ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct user_port *up_ptr;
        tipc_continue_event cb = NULL;
        void *uh = NULL;
@@ -898,14 +876,14 @@ static void port_wakeup(struct tipc_port *p_ptr)
 
 void tipc_acknowledge(u32 ref, u32 ack)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return;
-       if (p_ptr->publ.connected) {
-               p_ptr->publ.conn_unacked -= ack;
+       if (p_ptr->connected) {
+               p_ptr->conn_unacked -= ack;
                buf = port_build_proto_msg(port_peerport(p_ptr),
                                           port_peernode(p_ptr),
                                           ref,
@@ -913,7 +891,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
                                           CONN_MANAGER,
                                           CONN_ACK,
                                           TIPC_OK,
-                                          port_out_seqno(p_ptr),
                                           ack);
        }
        tipc_port_unlock(p_ptr);
@@ -936,14 +913,14 @@ int tipc_createport(void *usr_handle,
                    u32 *portref)
 {
        struct user_port *up_ptr;
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
        if (!up_ptr) {
                warn("Port creation failed, no memory\n");
                return -ENOMEM;
        }
-       p_ptr = (struct port *)tipc_createport_raw(NULL, port_dispatcher,
+       p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher,
                                                   port_wakeup, importance);
        if (!p_ptr) {
                kfree(up_ptr);
@@ -952,7 +929,7 @@ int tipc_createport(void *usr_handle,
 
        p_ptr->user_port = up_ptr;
        up_ptr->usr_handle = usr_handle;
-       up_ptr->ref = p_ptr->publ.ref;
+       up_ptr->ref = p_ptr->ref;
        up_ptr->err_cb = error_cb;
        up_ptr->named_err_cb = named_error_cb;
        up_ptr->conn_err_cb = conn_error_cb;
@@ -960,26 +937,26 @@ int tipc_createport(void *usr_handle,
        up_ptr->named_msg_cb = named_msg_cb;
        up_ptr->conn_msg_cb = conn_msg_cb;
        up_ptr->continue_event_cb = continue_event_cb;
-       *portref = p_ptr->publ.ref;
+       *portref = p_ptr->ref;
        tipc_port_unlock(p_ptr);
        return 0;
 }
 
 int tipc_portimportance(u32 ref, unsigned int *importance)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
+       *importance = (unsigned int)msg_importance(&p_ptr->phdr);
        tipc_port_unlock(p_ptr);
        return 0;
 }
 
 int tipc_set_portimportance(u32 ref, unsigned int imp)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
 
        if (imp > TIPC_CRITICAL_IMPORTANCE)
                return -EINVAL;
@@ -987,7 +964,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
+       msg_set_importance(&p_ptr->phdr, (u32)imp);
        tipc_port_unlock(p_ptr);
        return 0;
 }
@@ -995,7 +972,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
 
 int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct publication *publ;
        u32 key;
        int res = -EINVAL;
@@ -1004,7 +981,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        if (!p_ptr)
                return -EINVAL;
 
-       if (p_ptr->publ.connected)
+       if (p_ptr->connected)
                goto exit;
        if (seq->lower > seq->upper)
                goto exit;
@@ -1016,11 +993,11 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                goto exit;
        }
        publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
-                                   scope, p_ptr->publ.ref, key);
+                                   scope, p_ptr->ref, key);
        if (publ) {
                list_add(&publ->pport_list, &p_ptr->publications);
                p_ptr->pub_count++;
-               p_ptr->publ.published = 1;
+               p_ptr->published = 1;
                res = 0;
        }
 exit:
@@ -1030,7 +1007,7 @@ exit:
 
 int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct publication *publ;
        struct publication *tpubl;
        int res = -EINVAL;
@@ -1063,37 +1040,36 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                }
        }
        if (list_empty(&p_ptr->publications))
-               p_ptr->publ.published = 0;
+               p_ptr->published = 0;
        tipc_port_unlock(p_ptr);
        return res;
 }
 
 int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res = -EINVAL;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       if (p_ptr->publ.published || p_ptr->publ.connected)
+       if (p_ptr->published || p_ptr->connected)
                goto exit;
        if (!peer->ref)
                goto exit;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_destnode(msg, peer->node);
        msg_set_destport(msg, peer->ref);
        msg_set_orignode(msg, tipc_own_addr);
-       msg_set_origport(msg, p_ptr->publ.ref);
-       msg_set_transp_seqno(msg, 42);
+       msg_set_origport(msg, p_ptr->ref);
        msg_set_type(msg, TIPC_CONN_MSG);
        msg_set_hdr_sz(msg, SHORT_H_SIZE);
 
        p_ptr->probing_interval = PROBING_INTERVAL;
        p_ptr->probing_state = CONFIRMED;
-       p_ptr->publ.connected = 1;
+       p_ptr->connected = 1;
        k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
 
        tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
@@ -1102,7 +1078,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
        res = 0;
 exit:
        tipc_port_unlock(p_ptr);
-       p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
+       p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
        return res;
 }
 
@@ -1120,7 +1096,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
                tp_ptr->connected = 0;
                /* let timer expire on it's own to avoid deadlock! */
                tipc_nodesub_unsubscribe(
-                       &((struct port *)tp_ptr)->subscription);
+                       &((struct tipc_port *)tp_ptr)->subscription);
                res = 0;
        } else {
                res = -ENOTCONN;
@@ -1135,7 +1111,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
 
 int tipc_disconnect(u32 ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        int res;
 
        p_ptr = tipc_port_lock(ref);
@@ -1151,15 +1127,15 @@ int tipc_disconnect(u32 ref)
  */
 int tipc_shutdown(u32 ref)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
 
-       if (p_ptr->publ.connected) {
-               u32 imp = msg_importance(&p_ptr->publ.phdr);
+       if (p_ptr->connected) {
+               u32 imp = msg_importance(&p_ptr->phdr);
                if (imp < TIPC_CRITICAL_IMPORTANCE)
                        imp++;
                buf = port_build_proto_msg(port_peerport(p_ptr),
@@ -1169,7 +1145,6 @@ int tipc_shutdown(u32 ref)
                                           imp,
                                           TIPC_CONN_MSG,
                                           TIPC_CONN_SHUTDOWN,
-                                          port_out_seqno(p_ptr),
                                           0);
        }
        tipc_port_unlock(p_ptr);
@@ -1182,13 +1157,13 @@ int tipc_shutdown(u32 ref)
  *                        message for this node.
  */
 
-static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
                                   struct iovec const *msg_sect)
 {
        struct sk_buff *buf;
        int res;
 
-       res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect,
+       res = tipc_msg_build(&sender->phdr, msg_sect, num_sect,
                        MAX_MSG_SIZE, !sender->user_port, &buf);
        if (likely(buf))
                tipc_port_recv_msg(buf);
@@ -1201,15 +1176,15 @@ static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
 
 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        u32 destnode;
        int res;
 
        p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || !p_ptr->publ.connected)
+       if (!p_ptr || !p_ptr->connected)
                return -EINVAL;
 
-       p_ptr->publ.congested = 1;
+       p_ptr->congested = 1;
        if (!tipc_port_congested(p_ptr)) {
                destnode = port_peernode(p_ptr);
                if (likely(destnode != tipc_own_addr))
@@ -1219,14 +1194,14 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
                        res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
 
                if (likely(res != -ELINKCONG)) {
-                       port_incr_out_seqno(p_ptr);
-                       p_ptr->publ.congested = 0;
-                       p_ptr->sent++;
+                       p_ptr->congested = 0;
+                       if (res > 0)
+                               p_ptr->sent++;
                        return res;
                }
        }
        if (port_unreliable(p_ptr)) {
-               p_ptr->publ.congested = 0;
+               p_ptr->congested = 0;
                /* Just calculate msg length and return */
                return tipc_msg_calc_data_size(msg_sect, num_sect);
        }
@@ -1240,17 +1215,17 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
 int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
           unsigned int num_sect, struct iovec const *msg_sect)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        u32 destnode = domain;
        u32 destport;
        int res;
 
        p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
+       if (!p_ptr || p_ptr->connected)
                return -EINVAL;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_type(msg, TIPC_NAMED_MSG);
        msg_set_orignode(msg, tipc_own_addr);
        msg_set_origport(msg, ref);
@@ -1263,13 +1238,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
        msg_set_destport(msg, destport);
 
        if (likely(destport)) {
-               p_ptr->sent++;
                if (likely(destnode == tipc_own_addr))
-                       return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
-               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                  destnode);
-               if (likely(res != -ELINKCONG))
+                       res = tipc_port_recv_sections(p_ptr, num_sect,
+                                                     msg_sect);
+               else
+                       res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+                                                          num_sect, destnode);
+               if (likely(res != -ELINKCONG)) {
+                       if (res > 0)
+                               p_ptr->sent++;
                        return res;
+               }
                if (port_unreliable(p_ptr)) {
                        /* Just calculate msg length and return */
                        return tipc_msg_calc_data_size(msg_sect, num_sect);
@@ -1287,27 +1266,32 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
 int tipc_send2port(u32 ref, struct tipc_portid const *dest,
           unsigned int num_sect, struct iovec const *msg_sect)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res;
 
        p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
+       if (!p_ptr || p_ptr->connected)
                return -EINVAL;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_type(msg, TIPC_DIRECT_MSG);
        msg_set_orignode(msg, tipc_own_addr);
        msg_set_origport(msg, ref);
        msg_set_destnode(msg, dest->node);
        msg_set_destport(msg, dest->ref);
        msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
-       p_ptr->sent++;
+
        if (dest->node == tipc_own_addr)
-               return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
-       res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
-       if (likely(res != -ELINKCONG))
+               res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
+       else
+               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
+                                                  dest->node);
+       if (likely(res != -ELINKCONG)) {
+               if (res > 0)
+                       p_ptr->sent++;
                return res;
+       }
        if (port_unreliable(p_ptr)) {
                /* Just calculate msg length and return */
                return tipc_msg_calc_data_size(msg_sect, num_sect);
@@ -1322,15 +1306,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
 int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
               struct sk_buff *buf, unsigned int dsz)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res;
 
-       p_ptr = (struct port *)tipc_ref_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
+       p_ptr = (struct tipc_port *)tipc_ref_deref(ref);
+       if (!p_ptr || p_ptr->connected)
                return -EINVAL;
 
-       msg = &p_ptr->publ.phdr;
+       msg = &p_ptr->phdr;
        msg_set_type(msg, TIPC_DIRECT_MSG);
        msg_set_orignode(msg, tipc_own_addr);
        msg_set_origport(msg, ref);
@@ -1343,12 +1327,16 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
 
        skb_push(buf, DIR_MSG_H_SIZE);
        skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
-       p_ptr->sent++;
+
        if (dest->node == tipc_own_addr)
-               return tipc_port_recv_msg(buf);
-       res = tipc_send_buf_fast(buf, dest->node);
-       if (likely(res != -ELINKCONG))
+               res = tipc_port_recv_msg(buf);
+       else
+               res = tipc_send_buf_fast(buf, dest->node);
+       if (likely(res != -ELINKCONG)) {
+               if (res > 0)
+                       p_ptr->sent++;
                return res;
+       }
        if (port_unreliable(p_ptr))
                return dsz;
        return -ELINKCONG;
index 8e84b989949cd40b050191e2489ddd58bc61046c..87b9424ae0ecb92886fbb3f2ff7f1685ef7e7948 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/port.h: Include file for TIPC port code
  *
  * Copyright (c) 1994-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -95,7 +95,7 @@ struct user_port {
 };
 
 /**
- * struct tipc_port - TIPC port info available to socket API
+ * struct tipc_port - TIPC port structure
  * @usr_handle: pointer to additional user-defined information about port
  * @lock: pointer to spinlock for controlling access to port
  * @connected: non-zero if port is currently connected to a peer port
@@ -107,43 +107,33 @@ struct user_port {
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
  * @ref: unique reference to port in TIPC object registry
  * @phdr: preformatted message header used when sending messages
- */
-struct tipc_port {
-       void *usr_handle;
-       spinlock_t *lock;
-       int connected;
-       u32 conn_type;
-       u32 conn_instance;
-       u32 conn_unacked;
-       int published;
-       u32 congested;
-       u32 max_pkt;
-       u32 ref;
-       struct tipc_msg phdr;
-};
-
-/**
- * struct port - TIPC port structure
- * @publ: TIPC port info available to privileged users
  * @port_list: adjacent ports in TIPC's global list of ports
  * @dispatcher: ptr to routine which handles received messages
  * @wakeup: ptr to routine to call when port is no longer congested
  * @user_port: ptr to user port associated with port (if any)
  * @wait_list: adjacent ports in list of ports waiting on link congestion
  * @waiting_pkts:
- * @sent:
- * @acked:
+ * @sent: # of non-empty messages sent by port
+ * @acked: # of non-empty message acknowledgements from connected port's peer
  * @publications: list of publications for port
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
  * @probing_interval:
- * @last_in_seqno:
  * @timer_ref:
  * @subscription: "node down" subscription used to terminate failed connections
  */
-
-struct port {
-       struct tipc_port publ;
+struct tipc_port {
+       void *usr_handle;
+       spinlock_t *lock;
+       int connected;
+       u32 conn_type;
+       u32 conn_instance;
+       u32 conn_unacked;
+       int published;
+       u32 congested;
+       u32 max_pkt;
+       u32 ref;
+       struct tipc_msg phdr;
        struct list_head port_list;
        u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
        void (*wakeup)(struct tipc_port *);
@@ -156,7 +146,6 @@ struct port {
        u32 pub_count;
        u32 probing_state;
        u32 probing_interval;
-       u32 last_in_seqno;
        struct timer_list timer;
        struct tipc_node_subscr subscription;
 };
@@ -230,7 +219,7 @@ int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
 int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
                unsigned int section_count, struct iovec const *msg);
 
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
                              struct iovec const *msg_sect, u32 num_sect,
                              int err);
 struct sk_buff *tipc_port_get_ports(void);
@@ -242,9 +231,9 @@ void tipc_port_reinit(void);
  * tipc_port_lock - lock port instance referred to and return its pointer
  */
 
-static inline struct port *tipc_port_lock(u32 ref)
+static inline struct tipc_port *tipc_port_lock(u32 ref)
 {
-       return (struct port *)tipc_ref_lock(ref);
+       return (struct tipc_port *)tipc_ref_lock(ref);
 }
 
 /**
@@ -253,27 +242,27 @@ static inline struct port *tipc_port_lock(u32 ref)
  * Can use pointer instead of tipc_ref_unlock() since port is already locked.
  */
 
-static inline void tipc_port_unlock(struct port *p_ptr)
+static inline void tipc_port_unlock(struct tipc_port *p_ptr)
 {
-       spin_unlock_bh(p_ptr->publ.lock);
+       spin_unlock_bh(p_ptr->lock);
 }
 
-static inline struct port *tipc_port_deref(u32 ref)
+static inline struct tipc_port *tipc_port_deref(u32 ref)
 {
-       return (struct port *)tipc_ref_deref(ref);
+       return (struct tipc_port *)tipc_ref_deref(ref);
 }
 
-static inline u32 tipc_peer_port(struct port *p_ptr)
+static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
 {
-       return msg_destport(&p_ptr->publ.phdr);
+       return msg_destport(&p_ptr->phdr);
 }
 
-static inline u32 tipc_peer_node(struct port *p_ptr)
+static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
 {
-       return msg_destnode(&p_ptr->publ.phdr);
+       return msg_destnode(&p_ptr->phdr);
 }
 
-static inline int tipc_port_congested(struct port *p_ptr)
+static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
        return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
 }
@@ -284,7 +273,7 @@ static inline int tipc_port_congested(struct port *p_ptr)
 
 static inline int tipc_port_recv_msg(struct sk_buff *buf)
 {
-       struct port *p_ptr;
+       struct tipc_port *p_ptr;
        struct tipc_msg *msg = buf_msg(buf);
        u32 destport = msg_destport(msg);
        u32 dsz = msg_data_sz(msg);
@@ -299,7 +288,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
        /* validate destination & pass to port, otherwise reject message */
        p_ptr = tipc_port_lock(destport);
        if (likely(p_ptr)) {
-               if (likely(p_ptr->publ.connected)) {
+               if (likely(p_ptr->connected)) {
                        if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
                            (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
                            (unlikely(!msg_connected(msg)))) {
@@ -308,7 +297,7 @@ static inline int tipc_port_recv_msg(struct sk_buff *buf)
                                goto reject;
                        }
                }
-               err = p_ptr->dispatcher(&p_ptr->publ, buf);
+               err = p_ptr->dispatcher(p_ptr, buf);
                tipc_port_unlock(p_ptr);
                if (likely(!err))
                        return dsz;
index 2b02a3a8031318008b4dd67d7b37140c5129c2f4..125dcb0737b241d9036d7813765690f5764c6674 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/socket.c: TIPC socket API
  *
  * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -241,7 +241,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
                        tipc_set_portunreliable(tp_ptr->ref, 1);
        }
 
-       atomic_inc(&tipc_user_count);
        return 0;
 }
 
@@ -321,7 +320,6 @@ static int release(struct socket *sock)
        sock_put(sk);
        sock->sk = NULL;
 
-       atomic_dec(&tipc_user_count);
        return res;
 }
 
@@ -495,6 +493,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
        if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
                return -EACCES;
 
+       if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
+               return -EMSGSIZE;
        if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
                return -EFAULT;
        if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
index ca04479c3d423820e66b07c006d2fe0b9e6b3027..aae9eae13404d4c2c3eaa30035cbbbee8d025195 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/subscr.c: TIPC network topology service
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -160,7 +160,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
 
 static void subscr_timeout(struct subscription *sub)
 {
-       struct port *server_port;
+       struct tipc_port *server_port;
 
        /* Validate server port reference (in case subscriber is terminating) */
 
@@ -472,8 +472,6 @@ static void subscr_named_msg_event(void *usr_handle,
                                   struct tipc_portid const *orig,
                                   struct tipc_name_seq const *dest)
 {
-       static struct iovec msg_sect = {NULL, 0};
-
        struct subscriber *subscriber;
        u32 server_port_ref;
 
@@ -508,7 +506,7 @@ static void subscr_named_msg_event(void *usr_handle,
 
        /* Lock server port (& save lock address for future use) */
 
-       subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
+       subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
 
        /* Add subscriber to topology server's subscriber list */
 
@@ -523,7 +521,7 @@ static void subscr_named_msg_event(void *usr_handle,
 
        /* Send an ACK- to complete connection handshaking */
 
-       tipc_send(server_port_ref, 1, &msg_sect);
+       tipc_send(server_port_ref, 0, NULL);
 
        /* Handle optional subscription request */
 
@@ -542,7 +540,6 @@ int tipc_subscr_start(void)
        spin_lock_init(&topsrv.lock);
        INIT_LIST_HEAD(&topsrv.subscriber_list);
 
-       spin_lock_bh(&topsrv.lock);
        res = tipc_createport(NULL,
                              TIPC_CRITICAL_IMPORTANCE,
                              NULL,
@@ -563,12 +560,10 @@ int tipc_subscr_start(void)
                goto failed;
        }
 
-       spin_unlock_bh(&topsrv.lock);
        return 0;
 
 failed:
        err("Failed to create subscription service\n");
-       spin_unlock_bh(&topsrv.lock);
        return res;
 }
 
index 437a99e560e1b5b2196b8734b0eabd0ae4ac75c7..de870184e457fcb45abc010cbd79e3a3c2cdbb25 100644 (file)
@@ -1171,7 +1171,7 @@ restart:
        newsk->sk_type          = sk->sk_type;
        init_peercred(newsk);
        newu = unix_sk(newsk);
-       newsk->sk_wq            = &newu->peer_wq;
+       RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
        otheru = unix_sk(other);
 
        /* copy address information from listening to new sock*/
@@ -1475,6 +1475,12 @@ restart:
                        goto out_free;
        }
 
+       if (sk_filter(other, skb) < 0) {
+               /* Toss the packet but do not return any error to the sender */
+               err = len;
+               goto out_free;
+       }
+
        unix_state_lock(other);
        err = -EPERM;
        if (!unix_may_send(sk, other))
@@ -1561,7 +1567,6 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
        struct sock *sk = sock->sk;
        struct sock *other = NULL;
-       struct sockaddr_un *sunaddr = msg->msg_name;
        int err, size;
        struct sk_buff *skb;
        int sent = 0;
@@ -1584,7 +1589,6 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
                goto out_err;
        } else {
-               sunaddr = NULL;
                err = -ENOTCONN;
                other = unix_peer(sk);
                if (!other)
@@ -1987,36 +1991,38 @@ static int unix_shutdown(struct socket *sock, int mode)
 
        mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
 
-       if (mode) {
-               unix_state_lock(sk);
-               sk->sk_shutdown |= mode;
-               other = unix_peer(sk);
-               if (other)
-                       sock_hold(other);
-               unix_state_unlock(sk);
-               sk->sk_state_change(sk);
-
-               if (other &&
-                       (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
-
-                       int peer_mode = 0;
-
-                       if (mode&RCV_SHUTDOWN)
-                               peer_mode |= SEND_SHUTDOWN;
-                       if (mode&SEND_SHUTDOWN)
-                               peer_mode |= RCV_SHUTDOWN;
-                       unix_state_lock(other);
-                       other->sk_shutdown |= peer_mode;
-                       unix_state_unlock(other);
-                       other->sk_state_change(other);
-                       if (peer_mode == SHUTDOWN_MASK)
-                               sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
-                       else if (peer_mode & RCV_SHUTDOWN)
-                               sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
-               }
-               if (other)
-                       sock_put(other);
+       if (!mode)
+               return 0;
+
+       unix_state_lock(sk);
+       sk->sk_shutdown |= mode;
+       other = unix_peer(sk);
+       if (other)
+               sock_hold(other);
+       unix_state_unlock(sk);
+       sk->sk_state_change(sk);
+
+       if (other &&
+               (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+
+               int peer_mode = 0;
+
+               if (mode&RCV_SHUTDOWN)
+                       peer_mode |= SEND_SHUTDOWN;
+               if (mode&SEND_SHUTDOWN)
+                       peer_mode |= RCV_SHUTDOWN;
+               unix_state_lock(other);
+               other->sk_shutdown |= peer_mode;
+               unix_state_unlock(other);
+               other->sk_state_change(other);
+               if (peer_mode == SHUTDOWN_MASK)
+                       sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+               else if (peer_mode & RCV_SHUTDOWN)
+                       sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
        }
+       if (other)
+               sock_put(other);
+
        return 0;
 }
 
index 74944a2dd4369ed397bbc10f50bbbfe96a9b80bb..788a12c1eb5d61c9df0a98d6ca2f5ad8ce7bfeda 100644 (file)
@@ -59,8 +59,6 @@
 #include <asm/uaccess.h>        /* copy_to/from_user */
 #include <linux/init.h>         /* __initfunc et al. */
 
-#define KMEM_SAFETYZONE 8
-
 #define DEV_TO_SLAVE(dev)      (*((struct net_device **)netdev_priv(dev)))
 
 /*
index e9a5f8ca4c2718424bc0fafec5a9b7eabe6d65b1..fe01de29bfe8c740edc5e19944663e5d0fd7cd33 100644 (file)
@@ -718,13 +718,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                        wdev->ps = false;
                /* allow mac80211 to determine the timeout */
                wdev->ps_timeout = -1;
-               if (rdev->ops->set_power_mgmt)
-                       if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
-                                                     wdev->ps,
-                                                     wdev->ps_timeout)) {
-                               /* assume this means it's off */
-                               wdev->ps = false;
-                       }
 
                if (!dev->ethtool_ops)
                        dev->ethtool_ops = &cfg80211_ethtool_ops;
@@ -813,6 +806,19 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                rdev->opencount++;
                mutex_unlock(&rdev->devlist_mtx);
                cfg80211_unlock_rdev(rdev);
+
+               /*
+                * Configure power management to the driver here so that its
+                * correctly set also after interface type changes etc.
+                */
+               if (wdev->iftype == NL80211_IFTYPE_STATION &&
+                   rdev->ops->set_power_mgmt)
+                       if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
+                                                     wdev->ps,
+                                                     wdev->ps_timeout)) {
+                               /* assume this means it's off */
+                               wdev->ps = false;
+                       }
                break;
        case NETDEV_UNREGISTER:
                /*
index 9b62710891a2b3ebd188e013975537c33bd172e9..4ebce4284e9de25a680f2812bc7cb8b393304565 100644 (file)
@@ -1968,13 +1968,41 @@ static int parse_station_flags(struct genl_info *info,
        return 0;
 }
 
+static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
+                                int attr)
+{
+       struct nlattr *rate;
+       u16 bitrate;
+
+       rate = nla_nest_start(msg, attr);
+       if (!rate)
+               goto nla_put_failure;
+
+       /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
+       bitrate = cfg80211_calculate_bitrate(info);
+       if (bitrate > 0)
+               NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
+
+       if (info->flags & RATE_INFO_FLAGS_MCS)
+               NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
+       if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
+               NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
+       if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
+               NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+
+       nla_nest_end(msg, rate);
+       return true;
+
+nla_put_failure:
+       return false;
+}
+
 static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                                int flags, struct net_device *dev,
                                const u8 *mac_addr, struct station_info *sinfo)
 {
        void *hdr;
-       struct nlattr *sinfoattr, *txrate;
-       u16 bitrate;
+       struct nlattr *sinfoattr;
 
        hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
        if (!hdr)
@@ -2013,24 +2041,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
                           sinfo->signal_avg);
        if (sinfo->filled & STATION_INFO_TX_BITRATE) {
-               txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE);
-               if (!txrate)
+               if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
+                                         NL80211_STA_INFO_TX_BITRATE))
+                       goto nla_put_failure;
+       }
+       if (sinfo->filled & STATION_INFO_RX_BITRATE) {
+               if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
+                                         NL80211_STA_INFO_RX_BITRATE))
                        goto nla_put_failure;
-
-               /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
-               bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
-               if (bitrate > 0)
-                       NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
-               if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS)
-                       NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS,
-                                   sinfo->txrate.mcs);
-               if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
-                       NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
-               if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI)
-                       NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
-
-               nla_nest_end(msg, txrate);
        }
        if (sinfo->filled & STATION_INFO_RX_PACKETS)
                NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
@@ -2718,7 +2736,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
        hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
                             NL80211_CMD_GET_MESH_CONFIG);
        if (!hdr)
-               goto nla_put_failure;
+               goto out;
        pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
        if (!pinfoattr)
                goto nla_put_failure;
@@ -2759,6 +2777,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
+ out:
        nlmsg_free(msg);
        return -ENOBUFS;
 }
@@ -2954,7 +2973,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
        hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
                             NL80211_CMD_GET_REG);
        if (!hdr)
-               goto nla_put_failure;
+               goto put_failure;
 
        NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
                cfg80211_regdomain->alpha2);
@@ -3001,6 +3020,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
+put_failure:
        nlmsg_free(msg);
        err = -EMSGSIZE;
 out:
index 37693b6ef23a68c21f9684b6083f1371dc7c5921..c565689f0b9f2069d3d3a7515773ac7a1ae0d0ca 100644 (file)
@@ -1801,9 +1801,9 @@ void regulatory_hint_disconnect(void)
 
 static bool freq_is_chan_12_13_14(u16 freq)
 {
-       if (freq == ieee80211_channel_to_frequency(12) ||
-           freq == ieee80211_channel_to_frequency(13) ||
-           freq == ieee80211_channel_to_frequency(14))
+       if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
+           freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
+           freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
                return true;
        return false;
 }
index 7620ae2fcf18e391c18ee6cfea6f4ec95c73b074..6a750bc6bcfe3cf3fddc82202a7d7ddeabf28720 100644 (file)
@@ -29,29 +29,37 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
 }
 EXPORT_SYMBOL(ieee80211_get_response_rate);
 
-int ieee80211_channel_to_frequency(int chan)
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
 {
-       if (chan < 14)
-               return 2407 + chan * 5;
-
-       if (chan == 14)
-               return 2484;
-
-       /* FIXME: 802.11j 17.3.8.3.2 */
-       return (chan + 1000) * 5;
+       /* see 802.11 17.3.8.3.2 and Annex J
+        * there are overlapping channel numbers in 5GHz and 2GHz bands */
+       if (band == IEEE80211_BAND_5GHZ) {
+               if (chan >= 182 && chan <= 196)
+                       return 4000 + chan * 5;
+               else
+                       return 5000 + chan * 5;
+       } else { /* IEEE80211_BAND_2GHZ */
+               if (chan == 14)
+                       return 2484;
+               else if (chan < 14)
+                       return 2407 + chan * 5;
+               else
+                       return 0; /* not supported */
+       }
 }
 EXPORT_SYMBOL(ieee80211_channel_to_frequency);
 
 int ieee80211_frequency_to_channel(int freq)
 {
+       /* see 802.11 17.3.8.3.2 and Annex J */
        if (freq == 2484)
                return 14;
-
-       if (freq < 2484)
+       else if (freq < 2484)
                return (freq - 2407) / 5;
-
-       /* FIXME: 802.11j 17.3.8.3.2 */
-       return freq/5 - 1000;
+       else if (freq >= 4910 && freq <= 4980)
+               return (freq - 4000) / 5;
+       else
+               return (freq - 5000) / 5;
 }
 EXPORT_SYMBOL(ieee80211_frequency_to_channel);
 
@@ -159,12 +167,15 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
 
        /*
         * Disallow pairwise keys with non-zero index unless it's WEP
-        * (because current deployments use pairwise WEP keys with
-        * non-zero indizes but 802.11i clearly specifies to use zero)
+        * or a vendor specific cipher (because current deployments use
+        * pairwise WEP keys with non-zero indices and for vendor specific
+        * ciphers this should be validated in the driver or hardware level
+        * - but 802.11i clearly specifies to use zero)
         */
        if (pairwise && key_idx &&
-           params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
-           params->cipher != WLAN_CIPHER_SUITE_WEP104)
+           ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
+            (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
+            (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
                return -EINVAL;
 
        switch (params->cipher) {
index d112f038edf05d08433bfad22e36095577055cb0..0bf169bb770ed695b9829b234511ba0ce576cb58 100644 (file)
@@ -267,9 +267,12 @@ int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
         * -EINVAL for impossible things.
         */
        if (freq->e == 0) {
+               enum ieee80211_band band = IEEE80211_BAND_2GHZ;
                if (freq->m < 0)
                        return 0;
-               return ieee80211_channel_to_frequency(freq->m);
+               if (freq->m > 14)
+                       band = IEEE80211_BAND_5GHZ;
+               return ieee80211_channel_to_frequency(freq->m, band);
        } else {
                int i, div = 1000000;
                for (i = 0; i < freq->e; i++)
index 8b4d6e3246e50fd07657c39733d19697fd0b68c0..58064d9e565d80cc4fe542e9dec93f262cab8b7a 100644 (file)
@@ -618,21 +618,21 @@ static int xfrm_alg_name_match(const struct xfrm_algo_desc *entry,
                        (entry->compat && !strcmp(name, entry->compat)));
 }
 
-struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe)
 {
        return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_name_match, name,
                              probe);
 }
 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
 
-struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe)
 {
        return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_name_match, name,
                              probe);
 }
 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
 
-struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe)
 {
        return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_name_match, name,
                              probe);
@@ -654,7 +654,7 @@ static int xfrm_aead_name_match(const struct xfrm_algo_desc *entry,
               !strcmp(name, entry->name);
 }
 
-struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len, int probe)
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, int probe)
 {
        struct xfrm_aead_name data = {
                .name = name,
index 8e69533d2313a63a7a4718898ce12277ca92a432..7199d78b2aa14b67c9a7660f29636167d7984a68 100644 (file)
@@ -4,29 +4,32 @@
 #include <linux/xfrm.h>
 #include <linux/socket.h>
 
-static inline unsigned int __xfrm4_addr_hash(xfrm_address_t *addr)
+static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr)
 {
        return ntohl(addr->a4);
 }
 
-static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
+static inline unsigned int __xfrm6_addr_hash(const xfrm_address_t *addr)
 {
        return ntohl(addr->a6[2] ^ addr->a6[3]);
 }
 
-static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
+static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
+                                                   const xfrm_address_t *saddr)
 {
        u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
        return ntohl((__force __be32)sum);
 }
 
-static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
+static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr,
+                                                   const xfrm_address_t *saddr)
 {
        return ntohl(daddr->a6[2] ^ daddr->a6[3] ^
                     saddr->a6[2] ^ saddr->a6[3]);
 }
 
-static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t *saddr,
+static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
+                                          const xfrm_address_t *saddr,
                                           u32 reqid, unsigned short family,
                                           unsigned int hmask)
 {
@@ -42,8 +45,8 @@ static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t
        return (h ^ (h >> 16)) & hmask;
 }
 
-static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
-                                      xfrm_address_t *saddr,
+static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr,
+                                      const xfrm_address_t *saddr,
                                       unsigned short family,
                                       unsigned int hmask)
 {
@@ -60,8 +63,8 @@ static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
 }
 
 static inline unsigned int
-__xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family,
-               unsigned int hmask)
+__xfrm_spi_hash(const xfrm_address_t *daddr, __be32 spi, u8 proto,
+               unsigned short family, unsigned int hmask)
 {
        unsigned int h = (__force u32)spi ^ proto;
        switch (family) {
@@ -80,10 +83,11 @@ static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
        return (index ^ (index >> 8)) & hmask;
 }
 
-static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short family, unsigned int hmask)
+static inline unsigned int __sel_hash(const struct xfrm_selector *sel,
+                                     unsigned short family, unsigned int hmask)
 {
-       xfrm_address_t *daddr = &sel->daddr;
-       xfrm_address_t *saddr = &sel->saddr;
+       const xfrm_address_t *daddr = &sel->daddr;
+       const xfrm_address_t *saddr = &sel->saddr;
        unsigned int h = 0;
 
        switch (family) {
@@ -107,7 +111,9 @@ static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short
        return h & hmask;
 }
 
-static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, unsigned int hmask)
+static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
+                                      const xfrm_address_t *saddr,
+                                      unsigned short family, unsigned int hmask)
 {
        unsigned int h = 0;
 
index 6459588befc33fc58baa4f341b04f95e7a675bca..b1932a629ef8e4213635c8ac57e321f5e452d3b4 100644 (file)
@@ -51,14 +51,14 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
-                         struct flowi *fl, int family, int strict);
+                         const struct flowi *fl, int family);
 
 
 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
                                                int dir);
 
 static inline int
-__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
        return  addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
                addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
@@ -69,7 +69,7 @@ __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
 }
 
 static inline int
-__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
        return  addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
                addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
@@ -79,8 +79,8 @@ __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
                (fl->oif == sel->ifindex || !sel->ifindex);
 }
 
-int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
-                   unsigned short family)
+int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+                       unsigned short family)
 {
        switch (family) {
        case AF_INET:
@@ -92,8 +92,8 @@ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
 }
 
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
-                                                 xfrm_address_t *saddr,
-                                                 xfrm_address_t *daddr,
+                                                 const xfrm_address_t *saddr,
+                                                 const xfrm_address_t *daddr,
                                                  int family)
 {
        struct xfrm_policy_afinfo *afinfo;
@@ -311,7 +311,9 @@ static inline unsigned int idx_hash(struct net *net, u32 index)
        return __idx_hash(index, net->xfrm.policy_idx_hmask);
 }
 
-static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
+static struct hlist_head *policy_hash_bysel(struct net *net,
+                                           const struct xfrm_selector *sel,
+                                           unsigned short family, int dir)
 {
        unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
        unsigned int hash = __sel_hash(sel, family, hmask);
@@ -321,7 +323,10 @@ static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selecto
                net->xfrm.policy_bydst[dir].table + hash);
 }
 
-static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
+static struct hlist_head *policy_hash_direct(struct net *net,
+                                            const xfrm_address_t *daddr,
+                                            const xfrm_address_t *saddr,
+                                            unsigned short family, int dir)
 {
        unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
        unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
@@ -864,10 +869,11 @@ EXPORT_SYMBOL(xfrm_policy_walk_done);
  *
  * Returns 0 if policy found, else an -errno.
  */
-static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
+static int xfrm_policy_match(const struct xfrm_policy *pol,
+                            const struct flowi *fl,
                             u8 type, u16 family, int dir)
 {
-       struct xfrm_selector *sel = &pol->selector;
+       const struct xfrm_selector *sel = &pol->selector;
        int match, ret = -ESRCH;
 
        if (pol->family != family ||
@@ -884,12 +890,12 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
 }
 
 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
-                                                    struct flowi *fl,
+                                                    const struct flowi *fl,
                                                     u16 family, u8 dir)
 {
        int err;
        struct xfrm_policy *pol, *ret;
-       xfrm_address_t *daddr, *saddr;
+       const xfrm_address_t *daddr, *saddr;
        struct hlist_node *entry;
        struct hlist_head *chain;
        u32 priority = ~0U;
@@ -941,7 +947,7 @@ fail:
 }
 
 static struct xfrm_policy *
-__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
+__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
        struct xfrm_policy *pol;
@@ -954,7 +960,7 @@ __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
 }
 
 static struct flow_cache_object *
-xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
+xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
                   u8 dir, struct flow_cache_object *old_obj, void *ctx)
 {
        struct xfrm_policy *pol;
@@ -990,7 +996,8 @@ static inline int policy_to_flow_dir(int dir)
        }
 }
 
-static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
+static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
+                                                const struct flowi *fl)
 {
        struct xfrm_policy *pol;
 
@@ -1098,7 +1105,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
        return 0;
 }
 
-static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
+static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
 {
        struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
 
@@ -1157,9 +1164,8 @@ xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
 /* Resolve list of templates for the flow, given policy. */
 
 static int
-xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
-                     struct xfrm_state **xfrm,
-                     unsigned short family)
+xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
+                     struct xfrm_state **xfrm, unsigned short family)
 {
        struct net *net = xp_net(policy);
        int nx;
@@ -1214,9 +1220,8 @@ fail:
 }
 
 static int
-xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
-                 struct xfrm_state **xfrm,
-                 unsigned short family)
+xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
+                 struct xfrm_state **xfrm, unsigned short family)
 {
        struct xfrm_state *tp[XFRM_MAX_DEPTH];
        struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
@@ -1256,7 +1261,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  * still valid.
  */
 
-static inline int xfrm_get_tos(struct flowi *fl, int family)
+static inline int xfrm_get_tos(const struct flowi *fl, int family)
 {
        struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
        int tos;
@@ -1340,7 +1345,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        default:
                BUG();
        }
-       xdst = dst_alloc(dst_ops);
+       xdst = dst_alloc(dst_ops, 0);
        xfrm_policy_put_afinfo(afinfo);
 
        if (likely(xdst))
@@ -1369,7 +1374,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 }
 
 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
-                               struct flowi *fl)
+                               const struct flowi *fl)
 {
        struct xfrm_policy_afinfo *afinfo =
                xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1392,7 +1397,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 
 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                                            struct xfrm_state **xfrm, int nx,
-                                           struct flowi *fl,
+                                           const struct flowi *fl,
                                            struct dst_entry *dst)
 {
        struct net *net = xp_net(policy);
@@ -1508,7 +1513,7 @@ free_dst:
 }
 
 static int inline
-xfrm_dst_alloc_copy(void **target, void *src, int size)
+xfrm_dst_alloc_copy(void **target, const void *src, int size)
 {
        if (!*target) {
                *target = kmalloc(size, GFP_ATOMIC);
@@ -1520,7 +1525,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
 }
 
 static int inline
-xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
+xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1532,7 +1537,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
 }
 
 static int inline
-xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
+xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
@@ -1542,7 +1547,7 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
 #endif
 }
 
-static int xfrm_expand_policies(struct flowi *fl, u16 family,
+static int xfrm_expand_policies(const struct flowi *fl, u16 family,
                                struct xfrm_policy **pols,
                                int *num_pols, int *num_xfrms)
 {
@@ -1588,7 +1593,7 @@ static int xfrm_expand_policies(struct flowi *fl, u16 family,
 
 static struct xfrm_dst *
 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
-                              struct flowi *fl, u16 family,
+                              const struct flowi *fl, u16 family,
                               struct dst_entry *dst_orig)
 {
        struct net *net = xp_net(pols[0]);
@@ -1631,7 +1636,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 }
 
 static struct flow_cache_object *
-xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
+xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
                   struct flow_cache_object *oldflo, void *ctx)
 {
        struct dst_entry *dst_orig = (struct dst_entry *)ctx;
@@ -1730,18 +1735,36 @@ error:
        return ERR_PTR(err);
 }
 
+static struct dst_entry *make_blackhole(struct net *net, u16 family,
+                                       struct dst_entry *dst_orig)
+{
+       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+       struct dst_entry *ret;
+
+       if (!afinfo) {
+               dst_release(dst_orig);
+               ret = ERR_PTR(-EINVAL);
+       } else {
+               ret = afinfo->blackhole_route(net, dst_orig);
+       }
+       xfrm_policy_put_afinfo(afinfo);
+
+       return ret;
+}
+
 /* Main function: finds/creates a bundle for given flow.
  *
  * At the moment we eat a raw IP route. Mostly to speed up lookups
  * on interfaces with disabled IPsec.
  */
-int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
-                 struct sock *sk, int flags)
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+                             const struct flowi *fl,
+                             struct sock *sk, int flags)
 {
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        struct flow_cache_object *flo;
        struct xfrm_dst *xdst;
-       struct dst_entry *dst, *dst_orig = *dst_p, *route;
+       struct dst_entry *dst, *route;
        u16 family = dst_orig->ops->family;
        u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
        int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
@@ -1823,9 +1846,10 @@ restart:
                        dst_release(dst);
                        xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
-                       return -EREMOTE;
+
+                       return make_blackhole(net, family, dst_orig);
                }
-               if (flags & XFRM_LOOKUP_WAIT) {
+               if (fl->flags & FLOWI_FLAG_CAN_SLEEP) {
                        DECLARE_WAITQUEUE(wait, current);
 
                        add_wait_queue(&net->xfrm.km_waitq, &wait);
@@ -1867,47 +1891,33 @@ no_transform:
                goto error;
        } else if (num_xfrms > 0) {
                /* Flow transformed */
-               *dst_p = dst;
                dst_release(dst_orig);
        } else {
                /* Flow passes untransformed */
                dst_release(dst);
+               dst = dst_orig;
        }
 ok:
        xfrm_pols_put(pols, drop_pols);
-       return 0;
+       return dst;
 
 nopol:
-       if (!(flags & XFRM_LOOKUP_ICMP))
+       if (!(flags & XFRM_LOOKUP_ICMP)) {
+               dst = dst_orig;
                goto ok;
+       }
        err = -ENOENT;
 error:
        dst_release(dst);
 dropdst:
        dst_release(dst_orig);
-       *dst_p = NULL;
        xfrm_pols_put(pols, drop_pols);
-       return err;
-}
-EXPORT_SYMBOL(__xfrm_lookup);
-
-int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
-               struct sock *sk, int flags)
-{
-       int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
-
-       if (err == -EREMOTE) {
-               dst_release(*dst_p);
-               *dst_p = NULL;
-               err = -EAGAIN;
-       }
-
-       return err;
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL(xfrm_lookup);
 
 static inline int
-xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
+xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 {
        struct xfrm_state *x;
 
@@ -1926,7 +1936,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  */
 
 static inline int
-xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
+xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
              unsigned short family)
 {
        if (xfrm_state_kern(x))
@@ -1949,7 +1959,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  * Otherwise "-2 - errored_index" is returned.
  */
 static inline int
-xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
+xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
               unsigned short family)
 {
        int idx = start;
@@ -1987,7 +1997,7 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
 }
 EXPORT_SYMBOL(__xfrm_decode_session);
 
-static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
+static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
 {
        for (; k < sp->len; k++) {
                if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
@@ -2162,7 +2172,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        struct net *net = dev_net(skb->dev);
        struct flowi fl;
        struct dst_entry *dst;
-       int res;
+       int res = 0;
 
        if (xfrm_decode_session(skb, &fl, family) < 0) {
                XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
@@ -2170,9 +2180,12 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        }
 
        skb_dst_force(skb);
-       dst = skb_dst(skb);
 
-       res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
+       dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
+       if (IS_ERR(dst)) {
+               res = 1;
+               dst = NULL;
+       }
        skb_dst_set(skb, dst);
        return res;
 }
@@ -2210,7 +2223,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
 
 static int stale_bundle(struct dst_entry *dst)
 {
-       return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
+       return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC);
 }
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2283,7 +2296,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
  */
 
 static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
-               struct flowi *fl, int family, int strict)
+                         const struct flowi *fl, int family)
 {
        struct dst_entry *dst = &first->u.dst;
        struct xfrm_dst *last;
@@ -2320,11 +2333,6 @@ static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
                    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
                        return 0;
 
-               if (strict && fl &&
-                   !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
-                   !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
-                       return 0;
-
                mtu = dst_mtu(dst->child);
                if (xdst->child_mtu_cached != mtu) {
                        last = xdst;
@@ -2735,8 +2743,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
 #endif
 
 #ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
-                                      struct xfrm_selector *sel_tgt)
+static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+                                      const struct xfrm_selector *sel_tgt)
 {
        if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
                if (sel_tgt->family == sel_cmp->family &&
@@ -2756,7 +2764,7 @@ static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
        return 0;
 }
 
-static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
+static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
                                                     u8 dir, u8 type)
 {
        struct xfrm_policy *pol, *ret = NULL;
@@ -2792,7 +2800,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
        return ret;
 }
 
-static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
+static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
 {
        int match = 0;
 
@@ -2862,7 +2870,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
        return 0;
 }
 
-static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
+static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
 {
        int i, j;
 
@@ -2896,7 +2904,7 @@ static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
        return 0;
 }
 
-int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
                 struct xfrm_migrate *m, int num_migrate,
                 struct xfrm_kmaddress *k)
 {
index 220ebc05c7afc6b602785f6e402d44098cc0bd8d..81221d9cbf0632b2757a6cf0890b4619fea50cb5 100644 (file)
@@ -50,8 +50,8 @@ static void xfrm_audit_state_replay(struct xfrm_state *x,
 #endif /* CONFIG_AUDITSYSCALL */
 
 static inline unsigned int xfrm_dst_hash(struct net *net,
-                                        xfrm_address_t *daddr,
-                                        xfrm_address_t *saddr,
+                                        const xfrm_address_t *daddr,
+                                        const xfrm_address_t *saddr,
                                         u32 reqid,
                                         unsigned short family)
 {
@@ -59,15 +59,16 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
 }
 
 static inline unsigned int xfrm_src_hash(struct net *net,
-                                        xfrm_address_t *daddr,
-                                        xfrm_address_t *saddr,
+                                        const xfrm_address_t *daddr,
+                                        const xfrm_address_t *saddr,
                                         unsigned short family)
 {
        return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
 }
 
 static inline unsigned int
-xfrm_spi_hash(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
+             __be32 spi, u8 proto, unsigned short family)
 {
        return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
 }
@@ -656,9 +657,9 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 EXPORT_SYMBOL(xfrm_sad_getinfo);
 
 static int
-xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
-                   struct xfrm_tmpl *tmpl,
-                   xfrm_address_t *daddr, xfrm_address_t *saddr,
+xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
+                   const struct xfrm_tmpl *tmpl,
+                   const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                    unsigned short family)
 {
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
@@ -677,7 +678,10 @@ xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
        return 0;
 }
 
-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+                                             const xfrm_address_t *daddr,
+                                             __be32 spi, u8 proto,
+                                             unsigned short family)
 {
        unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
        struct xfrm_state *x;
@@ -699,7 +703,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_ad
        return NULL;
 }
 
-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+                                                    const xfrm_address_t *daddr,
+                                                    const xfrm_address_t *saddr,
+                                                    u8 proto, unsigned short family)
 {
        unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
        struct xfrm_state *x;
@@ -746,8 +753,7 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
 }
 
 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
-                              struct flowi *fl, unsigned short family,
-                              xfrm_address_t *daddr, xfrm_address_t *saddr,
+                              const struct flowi *fl, unsigned short family,
                               struct xfrm_state **best, int *acq_in_progress,
                               int *error)
 {
@@ -784,8 +790,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
 }
 
 struct xfrm_state *
-xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
-               struct flowi *fl, struct xfrm_tmpl *tmpl,
+xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+               const struct flowi *fl, struct xfrm_tmpl *tmpl,
                struct xfrm_policy *pol, int *err,
                unsigned short family)
 {
@@ -813,7 +819,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family,
                                           &best, &acquire_in_progress, &error);
        }
        if (best)
@@ -829,7 +835,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family,
                                           &best, &acquire_in_progress, &error);
        }
 
@@ -991,7 +997,11 @@ void xfrm_state_insert(struct xfrm_state *x)
 EXPORT_SYMBOL(xfrm_state_insert);
 
 /* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
+static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+                                         unsigned short family, u8 mode,
+                                         u32 reqid, u8 proto,
+                                         const xfrm_address_t *daddr,
+                                         const xfrm_address_t *saddr, int create)
 {
        unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
        struct hlist_node *entry;
@@ -1369,7 +1379,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
 EXPORT_SYMBOL(xfrm_state_check_expire);
 
 struct xfrm_state *
-xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
+xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
                  u8 proto, unsigned short family)
 {
        struct xfrm_state *x;
@@ -1383,7 +1393,7 @@ EXPORT_SYMBOL(xfrm_state_lookup);
 
 struct xfrm_state *
 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-                        xfrm_address_t *daddr, xfrm_address_t *saddr,
+                        const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                         u8 proto, unsigned short family)
 {
        struct xfrm_state *x;
@@ -1397,7 +1407,7 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
 
 struct xfrm_state *
 xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
-             xfrm_address_t *daddr, xfrm_address_t *saddr,
+             const xfrm_address_t *daddr, const xfrm_address_t *saddr,
              int create, unsigned short family)
 {
        struct xfrm_state *x;
@@ -1727,7 +1737,7 @@ void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
 static LIST_HEAD(xfrm_km_list);
 static DEFINE_RWLOCK(xfrm_km_lock);
 
-void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct xfrm_mgr *km;
 
@@ -1738,7 +1748,7 @@ void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
        read_unlock(&xfrm_km_lock);
 }
 
-void km_state_notify(struct xfrm_state *x, struct km_event *c)
+void km_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct xfrm_mgr *km;
        read_lock(&xfrm_km_lock);
@@ -1819,9 +1829,9 @@ void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
 EXPORT_SYMBOL(km_policy_expired);
 
 #ifdef CONFIG_XFRM_MIGRATE
-int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-              struct xfrm_migrate *m, int num_migrate,
-              struct xfrm_kmaddress *k)
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+              const struct xfrm_migrate *m, int num_migrate,
+              const struct xfrm_kmaddress *k)
 {
        int err = -EINVAL;
        int ret;
index 61291965c5f60a0354ee499f664df340845bc9e7..468ab60d3dc0d7a9ef0afccf6ef4262531d1fcf3 100644 (file)
@@ -234,7 +234,7 @@ out:
 }
 
 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
-                          struct xfrm_algo_desc *(*get_byname)(char *, int),
+                          struct xfrm_algo_desc *(*get_byname)(const char *, int),
                           struct nlattr *rta)
 {
        struct xfrm_algo *p, *ualg;
@@ -497,9 +497,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_state *x;
        int err;
        struct km_event c;
-       uid_t loginuid = NETLINK_CB(skb).loginuid;
-       u32 sessionid = NETLINK_CB(skb).sessionid;
-       u32 sid = NETLINK_CB(skb).sid;
+       uid_t loginuid = audit_get_loginuid(current);
+       u32 sessionid = audit_get_sessionid(current);
+       u32 sid;
 
        err = verify_newsa_info(p, attrs);
        if (err)
@@ -515,6 +515,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        else
                err = xfrm_state_update(x);
 
+       security_task_getsecid(current, &sid);
        xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
 
        if (err < 0) {
@@ -575,9 +576,9 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = nlmsg_data(nlh);
-       uid_t loginuid = NETLINK_CB(skb).loginuid;
-       u32 sessionid = NETLINK_CB(skb).sessionid;
-       u32 sid = NETLINK_CB(skb).sid;
+       uid_t loginuid = audit_get_loginuid(current);
+       u32 sessionid = audit_get_sessionid(current);
+       u32 sid;
 
        x = xfrm_user_state_lookup(net, p, attrs, &err);
        if (x == NULL)
@@ -602,6 +603,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_notify(x, &c);
 
 out:
+       security_task_getsecid(current, &sid);
        xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
        xfrm_state_put(x);
        return err;
@@ -1265,9 +1267,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        int err;
        int excl;
-       uid_t loginuid = NETLINK_CB(skb).loginuid;
-       u32 sessionid = NETLINK_CB(skb).sessionid;
-       u32 sid = NETLINK_CB(skb).sid;
+       uid_t loginuid = audit_get_loginuid(current);
+       u32 sessionid = audit_get_sessionid(current);
+       u32 sid;
 
        err = verify_newpolicy_info(p);
        if (err)
@@ -1286,6 +1288,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
         * a type XFRM_MSG_UPDPOLICY - JHS */
        excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
        err = xfrm_policy_insert(p->dir, xp, excl);
+       security_task_getsecid(current, &sid);
        xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
 
        if (err) {
@@ -1522,10 +1525,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                                            NETLINK_CB(skb).pid);
                }
        } else {
-               uid_t loginuid = NETLINK_CB(skb).loginuid;
-               u32 sessionid = NETLINK_CB(skb).sessionid;
-               u32 sid = NETLINK_CB(skb).sid;
+               uid_t loginuid = audit_get_loginuid(current);
+               u32 sessionid = audit_get_sessionid(current);
+               u32 sid;
 
+               security_task_getsecid(current, &sid);
                xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
                                         sid);
 
@@ -1553,9 +1557,9 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_audit audit_info;
        int err;
 
-       audit_info.loginuid = NETLINK_CB(skb).loginuid;
-       audit_info.sessionid = NETLINK_CB(skb).sessionid;
-       audit_info.secid = NETLINK_CB(skb).sid;
+       audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
+       security_task_getsecid(current, &audit_info.secid);
        err = xfrm_state_flush(net, p->proto, &audit_info);
        if (err) {
                if (err == -ESRCH) /* empty table */
@@ -1582,7 +1586,7 @@ static inline size_t xfrm_aevent_msgsize(void)
               + nla_total_size(4); /* XFRM_AE_ETHR */
 }
 
-static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
+static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
 {
        struct xfrm_aevent_id *id;
        struct nlmsghdr *nlh;
@@ -1720,9 +1724,9 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (err)
                return err;
 
-       audit_info.loginuid = NETLINK_CB(skb).loginuid;
-       audit_info.sessionid = NETLINK_CB(skb).sessionid;
-       audit_info.secid = NETLINK_CB(skb).sid;
+       audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
+       security_task_getsecid(current, &audit_info.secid);
        err = xfrm_policy_flush(net, type, &audit_info);
        if (err) {
                if (err == -ESRCH) /* empty table */
@@ -1789,9 +1793,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = 0;
        if (up->hard) {
-               uid_t loginuid = NETLINK_CB(skb).loginuid;
-               uid_t sessionid = NETLINK_CB(skb).sessionid;
-               u32 sid = NETLINK_CB(skb).sid;
+               uid_t loginuid = audit_get_loginuid(current);
+               u32 sessionid = audit_get_sessionid(current);
+               u32 sid;
+
+               security_task_getsecid(current, &sid);
                xfrm_policy_delete(xp, p->dir);
                xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
 
@@ -1830,9 +1836,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_expired(x, ue->hard, current->pid);
 
        if (ue->hard) {
-               uid_t loginuid = NETLINK_CB(skb).loginuid;
-               uid_t sessionid = NETLINK_CB(skb).sessionid;
-               u32 sid = NETLINK_CB(skb).sid;
+               uid_t loginuid = audit_get_loginuid(current);
+               u32 sessionid = audit_get_sessionid(current);
+               u32 sid;
+
+               security_task_getsecid(current, &sid);
                __xfrm_state_delete(x);
                xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
        }
@@ -1986,7 +1994,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
 #endif
 
 #ifdef CONFIG_XFRM_MIGRATE
-static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
+static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
 {
        struct xfrm_user_migrate um;
 
@@ -2004,7 +2012,7 @@ static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
        return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
 }
 
-static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
+static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
 {
        struct xfrm_user_kmaddress uk;
 
@@ -2025,11 +2033,11 @@ static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
              + userpolicy_type_attrsize();
 }
 
-static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
-                        int num_migrate, struct xfrm_kmaddress *k,
-                        struct xfrm_selector *sel, u8 dir, u8 type)
+static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
+                        int num_migrate, const struct xfrm_kmaddress *k,
+                        const struct xfrm_selector *sel, u8 dir, u8 type)
 {
-       struct xfrm_migrate *mp;
+       const struct xfrm_migrate *mp;
        struct xfrm_userpolicy_id *pol_id;
        struct nlmsghdr *nlh;
        int i;
@@ -2061,9 +2069,9 @@ nlmsg_failure:
        return -EMSGSIZE;
 }
 
-static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                            struct xfrm_migrate *m, int num_migrate,
-                            struct xfrm_kmaddress *k)
+static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                            const struct xfrm_migrate *m, int num_migrate,
+                            const struct xfrm_kmaddress *k)
 {
        struct net *net = &init_net;
        struct sk_buff *skb;
@@ -2079,9 +2087,9 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
 }
 #else
-static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
-                            struct xfrm_migrate *m, int num_migrate,
-                            struct xfrm_kmaddress *k)
+static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                            const struct xfrm_migrate *m, int num_migrate,
+                            const struct xfrm_kmaddress *k)
 {
        return -ENOPROTOOPT;
 }
@@ -2220,7 +2228,7 @@ static inline size_t xfrm_expire_msgsize(void)
               + nla_total_size(sizeof(struct xfrm_mark));
 }
 
-static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
+static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
 {
        struct xfrm_user_expire *ue;
        struct nlmsghdr *nlh;
@@ -2242,7 +2250,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
@@ -2259,7 +2267,7 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
 }
 
-static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
@@ -2274,7 +2282,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
 }
 
-static int xfrm_notify_sa_flush(struct km_event *c)
+static int xfrm_notify_sa_flush(const struct km_event *c)
 {
        struct net *net = c->net;
        struct xfrm_usersa_flush *p;
@@ -2330,7 +2338,7 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
        return l;
 }
 
-static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
+static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
 {
        struct net *net = xs_net(x);
        struct xfrm_usersa_info *p;
@@ -2387,7 +2395,7 @@ nla_put_failure:
        return -1;
 }
 
-static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
+static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
 {
 
        switch (c->event) {
@@ -2546,7 +2554,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
 }
 
 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
-                          int dir, struct km_event *c)
+                          int dir, const struct km_event *c)
 {
        struct xfrm_user_polexpire *upe;
        struct nlmsghdr *nlh;
@@ -2576,7 +2584,7 @@ nlmsg_failure:
        return -EMSGSIZE;
 }
 
-static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct net *net = xp_net(xp);
        struct sk_buff *skb;
@@ -2591,7 +2599,7 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_eve
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
 }
 
-static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
        struct net *net = xp_net(xp);
        struct xfrm_userpolicy_info *p;
@@ -2656,7 +2664,7 @@ nlmsg_failure:
        return -1;
 }
 
-static int xfrm_notify_policy_flush(struct km_event *c)
+static int xfrm_notify_policy_flush(const struct km_event *c)
 {
        struct net *net = c->net;
        struct nlmsghdr *nlh;
@@ -2681,7 +2689,7 @@ nlmsg_failure:
        return -1;
 }
 
-static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
+static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
 
        switch (c->event) {
index 2a5df2b7da83be0c3d69d9bcd33dd3bd4d935a15..b8eeaee5c99ee7cf5c868e9074a6d70f4eeeb856 100644 (file)
@@ -760,7 +760,7 @@ static int cap_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 sk_sid, u8 dir)
 
 static int cap_xfrm_state_pol_flow_match(struct xfrm_state *x,
                                         struct xfrm_policy *xp,
-                                        struct flowi *fl)
+                                        const struct flowi *fl)
 {
        return 1;
 }
index 64c2ed9c90158d1b7df59eff08595566d3dc1927..a83e607d91c343744d66624608661320776a0c87 100644 (file)
@@ -52,13 +52,12 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
 
 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-       NETLINK_CB(skb).eff_cap = current_cap();
        return 0;
 }
 
 int cap_netlink_recv(struct sk_buff *skb, int cap)
 {
-       if (!cap_raised(NETLINK_CB(skb).eff_cap, cap))
+       if (!cap_raised(current_cap(), cap))
                return -EPERM;
        return 0;
 }
index 7b7308ace8c5b1b26bd79369d8c142ea6e14734c..8ef1f7dff277993e8195d719b4ea9314fd582110 100644 (file)
@@ -1233,7 +1233,8 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
 }
 
 int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                                      struct xfrm_policy *xp, struct flowi *fl)
+                                      struct xfrm_policy *xp,
+                                      const struct flowi *fl)
 {
        return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
 }
index c8d699270687e2447976af4b63b51d73749531a6..cef42f5d69a2e48ff2199694b928afe937818ca3 100644 (file)
@@ -4669,6 +4669,7 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
 {
        int err;
        struct common_audit_data ad;
+       u32 sid;
 
        err = cap_netlink_recv(skb, capability);
        if (err)
@@ -4677,8 +4678,9 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
        COMMON_AUDIT_DATA_INIT(&ad, CAP);
        ad.u.cap = capability;
 
-       return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid,
-                           SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad);
+       security_task_getsecid(current, &sid);
+       return avc_has_perm(sid, sid, SECCLASS_CAPABILITY,
+                           CAP_TO_MASK(capability), &ad);
 }
 
 static int ipc_alloc_security(struct task_struct *task,
index 13128f9a3e5aef56b7453af34ce86aa0e99f0513..b43813c9e049c9491cd37283c5f45bf494a6efcd 100644 (file)
@@ -19,7 +19,7 @@ void selinux_xfrm_state_free(struct xfrm_state *x);
 int selinux_xfrm_state_delete(struct xfrm_state *x);
 int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                       struct xfrm_policy *xp, struct flowi *fl);
+                       struct xfrm_policy *xp, const struct flowi *fl);
 
 /*
  * Extract the security blob from the sock (it's actually on the socket)
index fff78d3b51a2ecddd1f18dd959ad99c80c090742..c43ab542246c2c1493355d5d3dd7f71584ab9512 100644 (file)
@@ -112,7 +112,7 @@ int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
  */
 
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
-                       struct flowi *fl)
+                       const struct flowi *fl)
 {
        u32 state_sid;
        int rc;
This page took 2.941391 seconds and 5 git commands to generate.